source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
catchmind_ver2.py | #์์ง ํด๊ฒฐ ๋ชปํ๊ฑฐ:LED ์ถ๋ ฅ, ์์ฑ์ธ์
#๊ณต๋ฃก๊ฒ์์ผ๋ก ์ป์ ์๊น ๋ธ๋ญ ๊ฐฏ์๋ฅผ colorlistcnt
#์(๋นจ์ฃผ๋
ธ์ดํ๋ณดํฐ) colorlist
#colorlist=["red","orange","yellow","green","blue","purple","gray"]
colorlist=["black","red","green","yellow","blue","purple","skyblue"]
#colorlistcnt=[0,2,3,0,4,2,3]
colorlistcnt=[1,2,3,1,4,2,3]
from turtle import*
import time
import led_display as led
import threading
kkk=input("๊ทธ๋ฆด ๊ฒ์ ์
๋ ฅํ์ธ์")
def LED_init():
t=threading.Thread(target=led.main, args=())
t.setDaemon(True)
t.start()
return
#์์์๊ฐ ์ธก์
start=time.time()
pencolor("black")
title("Catch my drawing")
#ํ๋ฉด ์ค์
setup(1600,800)
hideturtle()
speed(100000)
pensize(5)
#ํํ์
h=-350
for i in range(15):
up()
goto(-800,h)
down()
forward(1600)
h+=50
#์์ง์
v=-750
setheading(-90)
for i in range(31):
up()
goto(v,-400)
down()
backward(800)
v+=50
#์๊น ๋ณ๋ก ํ๋ฉด์ ์์น ํด์ฃผ๊ธฐ
def drawColor(color,b):
pensize(30)
pencolor(color)
up()
goto(725,b)
down()
goto(725,b-15)
#ํ๋ฉด์ ์๊น์ ์กด์ฌ ๋ํ๋ด๊ธฐ
for i in range(0,7,1):
if colorlistcnt[i]>0:
drawColor(colorlist[i],335-i*50)
#ํ๋ก๊ทธ๋จ(์ฐฝ) ์ข
๋ฃ
def endP():
bye()
while(1):
mmm=input("์ ๋ต์ ์
๋ ฅํ์์ค : ")
answer(mmm)
if mmm==kkk:
break
#์ ๋ต
def answer(mmm):
if mmm==kkk:
print("๊ฑธ๋ฆฐ ์๊ฐ:",time.time()-start)
else:
print("์ ๋ต์ด ์๋๋๋ค.")
#ํด๋ฆญ์ ๋ฐ๋ผ ์์น ํ๊ธฐ
def drawShape(x, y):
ledcolor = 2
if 700<=x<=750:
for k in range(0,7,1):
if 300-50*k<y<=350-50*k:
if colorlistcnt[k]>0:
pencolor(colorlist[k])
ledcolor = k
a=x-x%50+25
b=(y//50+1)*50
up()
goto(a,b-15)
down()
goto(a,b-30)
led.set_pixel(int((a+775)/50), int((400-b)/50), ledcolor)
onkey(endP,"space")
listen()
LED_init()
while True:
onscreenclick(drawShape)
mainloop()
#์์ฑ์ธ์ํด์ ์ ๋ต์ผ ์
|
test_fx.py | import builtins
import contextlib
import copy
import functools
import inspect
import math
import numbers
import operator
import os
import pickle
import sys
import torch
import traceback
import warnings
import unittest
from math import sqrt
from torch.multiprocessing import Process
from torch.testing import FileCheck
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests
import torch.utils._pytree as pytree
import torch.fx._pytree as fx_pytree
from torch.fx import symbolic_trace, Proxy, Node, GraphModule, Interpreter, Tracer, Transformer, Graph, wrap, PH
import torch._C._fx
from torch.fx.node import Target, Argument
from torch.fx.passes import shape_prop
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx.operator_schemas import get_signature_for_torch_op
from copy import deepcopy
from collections import namedtuple
from torch.fx.proxy import TraceError
from fx.test_subgraph_rewriter import TestSubgraphRewriter # noqa: F401
from fx.test_dce_pass import TestDCE # noqa: F401
from fx.test_fx_const_fold import TestConstFold # noqa: F401
from fx.test_fx_param_shape_control_flow import TestConstParamShapeInControlFlow # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import AnnotationsTest # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import TypeCheckerTest # noqa: F401
from typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union
from torch.testing._internal.common_utils import (
IS_FBCODE,
IS_MACOS,
IS_WINDOWS,
TEST_WITH_ROCM,
find_library_location,
run_tests,
)
from torch.testing._internal.jit_utils import JitTestCase
from fx.named_tup import MyNamedTup
try:
from torchvision import models as torchvision_models
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class SimpleTest(torch.nn.Module):
def forward(self, x):
return torch.relu(x + 3.0)
def a_non_torch_leaf(a, b):
return a + b
# Used for test_autowrap_function. Autowrapped functions need to be global
def fx_int(x: float) -> int:
return int(x)
def fx_int_x2(x: float) -> int:
return int(x) * 2
# used in test_pytree. It's all the way out here because pickling a GraphModule
# that uses Point errors out if Point is local to the function
Point = namedtuple('Point', ['x', 'y'])
# Test wrap() passing both a function name as well as a function
# directly
def a_lifted_leaf(a, b):
return a[0] + a[1] + b
wrap('a_lifted_leaf')
# Test wrapping twice doesn't break anything
wrap('a_lifted_leaf')
def a_lifted_leaf2(a, b):
return a[0] + a[1] + b
wrap(a_lifted_leaf2)
wrap('len')
@wrap
def wrapped_via_decorator(a):
return a + 1
wrap('wrapped_with_submodule')
def wrapped_with_submodule(x: torch.Tensor, batchnorm1d: torch.nn.BatchNorm1d):
return batchnorm1d(x)
real_wrapped_via_decorator = wrapped_via_decorator
real_a_lifed_leaf = a_lifted_leaf
real_a_lifed_leaf2 = a_lifted_leaf2
_sqrt = sqrt
wrap('wrapper_fn')
def wrapper_fn(x):
return torch.foo(x)
class Pair(NamedTuple):
x : torch.Tensor
y : torch.Tensor
# for testing pytrees
class Foo(object): # noqa: B209
def __init__(self, a, b):
self.a = a
self.b = b
class TestFX(JitTestCase):
def setUp(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
return
lib_file_path = find_library_location('libtorchbind_test.so')
torch.ops.load_library(str(lib_file_path))
def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None):
"""Check that an nn.Module's results match the GraphModule version
for a given set of args/kwargs.
"""
kwargs = kwargs if kwargs else {}
ref_outs = m(*args, **kwargs)
gm = symbolic_trace(m)
gm.graph.lint()
test_outs = gm(*args, **kwargs)
self.assertEqual(ref_outs, test_outs)
def test_graph_module(self):
class MySub(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.w + x
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(4, 3)
self.sub_mod = MySub()
self.w = torch.nn.Parameter(torch.rand(3))
def forward(self, A, B, c):
t = torch.sigmoid(A) + self.lin(c)
return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3))
m = MyModule()
gm = symbolic_trace(m)
ms = torch.jit.script(gm)
class M2(torch.nn.Module):
def forward(self, A):
m, idx = torch.max(A, 0)
return m + 1, idx + 1
m2 = M2()
gm2 = symbolic_trace(m2)
class T(torch.nn.Module):
def forward(self, A, b=4, *args, c=5, **kwargs):
x = A + 1 + args[0] + kwargs['3']
return x
t = T()
symbolic_trace(t)
def test_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(torch.sin(x + y), gm(x, y))
def test_args_kwargs(self):
class T(torch.nn.Module):
def forward(self, *args, **kwargs):
x = args[0] + kwargs['foo']
return x
t = T()
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_args_kwargs_no_self(self):
class T(torch.nn.Module):
def forward(*args, **kwargs): # noqa: B902
self = args[0]
return torch.relu(args[1])
t = T()
with self.assertRaisesRegex(RuntimeError, r'cannot be part of \*args expansion'):
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_fx_shifts(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x << 3, x >> 3
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_dict(self):
class MyDictMod(torch.nn.Module):
def forward(self, d):
return d['3'].relu(), {'4' : d['3'].neg()}
input_dict = {'3': torch.rand(3, 4)}
m = MyDictMod()
self.checkGraphModule(m, (input_dict,))
def test_disallow_override(self):
# Custom delegate to disallow in-place tensor operations
class NoMutableCallTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
name = target if isinstance(target, str) else torch.typename(target)
if name[-1] == '_':
raise RuntimeError('In-place operations are not supported')
return super().create_node(kind, target, args, kwargs, name)
# Test method
class MyInplaceMod(torch.nn.Module):
def forward(self, x):
x.add_(3.0)
return x
m = MyInplaceMod()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m)
# Test free function
class MyInplaceMod2(torch.nn.Module):
def forward(self, x):
torch.log_(x)
return x
m2 = MyInplaceMod2()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m2)
# Test symbolic node as an arg
class MyInplaceMod3(torch.nn.Module):
def forward(self, x):
y = torch.ones(3, 4)
y.add_(x)
return x
m3 = MyInplaceMod3()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m3)
def test_leaf_module(self):
# Custom delegate to make it so that there are no leaf modules, everything
# should get traced through
class NoLeafModulesTracer(Tracer):
def is_leaf_module(self, m, qualname):
return False
class MyReluMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x)
mrm = MyReluMod()
sym = NoLeafModulesTracer().trace(mrm)
for node in sym.nodes:
self.assertNotEqual(node.op, 'call_module')
sym.lint()
def test_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf2', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_wrapped_via_decorator(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(m).transform()
self.assertIn('wrapped_via_decorator', transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
m = symbolic_trace(M())
self.assertIn("wrapped_with_submodule", m.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), m(input))
def test_wrapped_retrace(self):
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
retraced = symbolic_trace(m)
self.assertIn('wrapped_via_decorator', retraced.code)
self.assertEqual(retraced(0), 1)
def test_graph_edit_with_proxy(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
gm.graph.lint()
self.assertEqual(gm(3, 4), 14)
def test_graph_unique_names(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
seen_names : Set[str] = set()
for node in gm.graph.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_stack_traces(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
tracer = torch.fx.Tracer()
tracer.record_stack_traces = True
graph = tracer.trace(M())
for node in graph.nodes:
if node.op == 'output':
continue
self.assertTrue(node.stack_trace is not None)
assert 'test_fx.py' in node.stack_trace
def test_graph_unique_names_manual(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1')
c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
graph2 = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
graph2.graph_copy(graph, val_map)
seen_names : Set[str] = set()
for node in graph2.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_unpack(self):
class M(torch.nn.Module):
def forward(self, a, b):
c, d = a
return c + d + b
a = (torch.rand(1), torch.rand(1))
b = torch.rand(1)
m = M()
self.checkGraphModule(m, (a, b))
def test_native_callable(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
# This test exercises the case where we use FX to translate from Python
# code to some native callable object
#
# For the purposes of testing, we use ElementwiseInterpreter defined
# in test_custom_class.cpp.
#
# We test that we can
# 1) Construct a native callable from FX IR
# 2) Construct a drop-in replacement module that delegates to the
# native callable rather than the original code
# 3) Run both the original code and native callable wrapper with
# equivalent results
# 4) TorchScript compile the native callable wrapper and confirm
# equivalent results with the reference
# 5) TorchScript serialize and deserialize the native callable
# and confirm equivalent results with the reference
# We use this simple Module as a reference computation
class MySimpleMod(torch.nn.Module):
def forward(self, x):
return 3.0 * x + x
msm = MySimpleMod()
# This is what a lowering pass might look like: a function that takes
# a valid nn.Module, symbolically traces it, lowers the Module to some
# representation, and wraps that representation up into another
# nn.Module instance that handles dispatch to the compiled/lowered code.
def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module:
# ===== Stage 1: Symbolic trace the module =====
mod = symbolic_trace(orig_mod)
# ===== Stage 2: Lower GraphModule representation to the C++
# interpreter's instruction format ======
instructions = []
constant_idx = 0
constants = {}
fn_input_names = []
target_to_name = {
operator.add : "add",
operator.mul : "mul"
}
output_node : Optional[Node] = None
# For each instruction, create a triple
# (instruction_name : str, inputs : List[str], output : str)
# to feed into the C++ interpreter
for n in mod.graph.nodes:
target, args, out_name = n.target, n.args, n.name
assert len(n.kwargs) == 0, "kwargs currently not supported"
if n.op == 'placeholder':
# Placeholders specify function argument names. Save these
# for later when we generate the wrapper GraphModule
fn_input_names.append(target)
elif n.op == 'call_function':
assert target in target_to_name, "Unsupported call target " + target
arg_names = []
for arg in args:
if not isinstance(arg, Node):
# Pull out constants. These constants will later be
# fed to the interpreter C++ object via add_constant()
arg_name = f'constant_{constant_idx}'
constants[arg_name] = torch.tensor(
[arg] if isinstance(arg, numbers.Number) else arg)
arg_names.append(arg_name)
constant_idx += 1
else:
arg_names.append(arg.name)
instructions.append((target_to_name[target], arg_names, out_name))
elif n.op == 'output':
if output_node is not None:
raise RuntimeError('Multiple output nodes!')
output_node = n
else:
raise RuntimeError('Unsupported opcode ' + n.op)
interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter()
# Load constants
for k, v in constants.items():
interpreter.add_constant(k, v)
# Specify names for positional input arguments
interpreter.set_input_names(fn_input_names)
# Load instructions
interpreter.set_instructions(instructions)
# Specify name for single output
assert isinstance(output_node.args[0], torch.fx.Node)
interpreter.set_output_name(output_node.args[0].name)
# ===== Stage 3: Create a wrapper GraphModule around the interpreter =====
class WrapperModule(torch.nn.Module):
def __init__(self, interpreter):
super().__init__()
self.interpreter = interpreter
wrapper = WrapperModule(interpreter)
# Create a graph that: 1) Takes function arguments 2) Invokes the interpreter
# 3) Returns the speficied return value
# FIXME: The following code could be greatly simplified by symbolic_trace'ing
# the wrapper with a Tracer that considers the Wrapper instance a root
# module, however, I can't get `__call__` exposed on TorchBind classes
# without it messing up Python `hasattr` for some reason. More digging
# into CPython's implementation of hasattr is probably in order...
graph = torch.fx.Graph()
# Add placeholders for fn inputs
placeholder_nodes = []
for name in fn_input_names:
placeholder_nodes.append(graph.create_node('placeholder', name))
# Get the interpreter object
interpreter_node = graph.create_node('get_attr', 'interpreter')
# Add a node to call the interpreter instance
output_node = graph.create_node(
op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes))
# Register output
graph.output(output_node)
graph.lint()
# Return final GraphModule!!!
return GraphModule(wrapper, graph)
# Lower GraphModule to C++ interpreter
lowered = lower_to_elementwise_interpreter(msm)
# Compare correctness with original module
x = torch.rand(3, 4)
ref_out = msm(x)
test_out = lowered(x)
torch.testing.assert_allclose(test_out, ref_out)
# Test TorchScript compilation
scripted_lowered = torch.jit.script(lowered)
script_out = scripted_lowered(x)
torch.testing.assert_allclose(script_out, ref_out)
# Test TorchScript ser/de
import_copy = self.getExportImportCopy(scripted_lowered)
imported_out = import_copy(x)
torch.testing.assert_allclose(imported_out, ref_out)
def test_reserved_getattr(self):
"""Ensure that we do not name any nodes with a reserved builtin like `getattr`"""
class M(torch.nn.Module):
def forward(self, a):
return a.foo.bar.baz
m = M()
m_g = symbolic_trace(m)
m_g.graph.lint()
for node in m_g.graph.nodes:
self.assertTrue(node.name != "getattr")
def test_node_tagging(self):
class TaggingTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
n = super().create_node(kind, target, args, kwargs, name)
n.tag = 'foo'
return n
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = TaggingTracer().trace(m)
g.lint()
for n in g.nodes:
self.assertTrue(hasattr(n, 'tag'))
self.assertEqual(n.tag, 'foo')
def test_tensor_attribute(self):
class TensorAttribute(torch.nn.Module):
def __init__(self):
super().__init__()
self.tensor = torch.rand(3, 4)
def forward(self, x):
return torch.nn.functional.linear(x, self.tensor)
ta = TensorAttribute()
traced = symbolic_trace(ta)
traced(torch.rand(4, 4))
class WrapperForQualname(torch.nn.Module):
def __init__(self):
super().__init__()
self.ta = TensorAttribute()
def forward(self, x):
return torch.nn.functional.linear(x, self.ta.tensor)
wfq = WrapperForQualname()
traced2 = symbolic_trace(wfq)
traced2.graph.lint()
traced2(torch.rand(4, 4))
def test_symbolic_trace_sequential(self):
class Simple(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
seq = torch.nn.Sequential(
Simple(),
Simple(),
Simple()
)
traced = symbolic_trace(seq)
traced.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(traced(x), seq(x))
def test_tensor_constant(self):
class ConstTensor(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.linear(x, torch.zeros(3, 4))
ct = ConstTensor()
traced = symbolic_trace(ct)
traced.graph.lint()
traced(torch.rand(4, 4))
def test_pickle_graphmodule(self):
class Nested(torch.nn.Module):
def __init__(self):
super().__init__()
self.st = torch.nn.Linear(4, 4)
def forward(self, x):
return self.st(x)
n = Nested()
traced = symbolic_trace(n)
traced.graph.lint()
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(loaded(x), traced(x))
def test_pickle_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(loaded(x, y), gm(x, y))
def test_all_input_nodes(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.placeholder('x')
b : torch.fx.Node = graph.call_module('linear_mod', args=(a,))
c : torch.fx.Node = graph.get_attr('y_attr')
d : torch.fx.Node = graph.call_function(operator.add, args=(b, c))
e : torch.fx.Node = graph.call_function(torch.unsqueeze, args=(d, 0))
graph.output(e)
graph.lint()
self.assertEqual(b.all_input_nodes, [a])
self.assertEqual(c.all_input_nodes, [])
self.assertEqual(d.all_input_nodes, [b, c])
self.assertEqual(e.all_input_nodes, [d])
def test_deepcopy_graphmodule_with_transform(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
def transform(traced):
new_graph = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_value = new_graph.graph_copy(traced.graph, val_map)
relu_out = new_graph.create_node(
op='call_method', target='neg', args=(output_value,), kwargs={})
new_graph.output(relu_out)
return GraphModule(traced, new_graph)
transformed = transform(traced)
transformed.graph.lint()
copied = copy.deepcopy(transformed)
self.assertNotEqual(id(type(transformed)), id(type(copied)))
x = torch.randn(3, 4)
self.assertEqual(copied(x), transformed(x))
def test_deepcopy_with_submods_params(self):
class Bar(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
def forward(self, x):
return torch.relu(x) + self.param
class Baz(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.bar = Bar()
def forward(self, x):
return self.bar(x) - self.param
baz = Baz()
traced = symbolic_trace(baz)
traced.graph.lint()
copied = copy.deepcopy(traced)
copied.graph.lint()
def test_deepcopy_graph_with_tracer_cls(self):
class TestTracer(Tracer):
def is_leaf_module(self, module, name):
return True
g = Graph(tracer_cls=TestTracer)
x = g.placeholder("x")
g.output(x)
h = copy.deepcopy(g)
self.assertIsNotNone(h._tracer_cls)
self.assertTrue(g._tracer_cls == h._tracer_cls)
def test_unpack_list_better_error(self):
class SomeArgs(torch.nn.Module):
def forward(self, a, b):
return torch.rand(3, 4)
class UnpacksList(torch.nn.Module):
def __init__(self):
super().__init__()
self.sa = SomeArgs()
def forward(self, x : list):
return self.sa(*x)
ul = UnpacksList()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ul)
def test_unpack_dict_better_error(self):
class SomeKwargs(torch.nn.Module):
def forward(self, x=3, y=4):
return torch.rand(3, 4)
class UnpacksDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.sk = SomeKwargs()
def forward(self, x : dict):
return self.sk(**x)
ud = UnpacksDict()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ud)
def test_pretty_print_targets(self):
# Test that Graph pretty-print prints friendly name for targets
# in `operator` and `builtins`
class SomeMod(torch.nn.Module):
def forward(self, x):
return torch.add(x.foo + x.bar, 3.0)
traced = symbolic_trace(SomeMod())
graph_str = str(traced.graph)
self.assertIn('builtins.getattr', graph_str)
self.assertIn('operator.add', graph_str)
self.assertIn('torch.add', graph_str)
def test_pretty_print_node(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.param: torch.nn.Parameter = torch.nn.Parameter(
torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x: torch.Tensor, y: int = 2):
return self.linear(x[y] + self.param).clamp(min=0.0, max=1.0)
traced = symbolic_trace(M())
all_formatted = "\n".join([n.format_node() for n in traced.graph.nodes])
FileCheck().check("x").check("placeholder") \
.check("y").check("placeholder") \
.check("getitem").check("call_function") \
.check("param").check("get_attr") \
.check("add").check("call_function") \
.check("linear").check("call_module") \
.check("clamp").check("call_method") \
.run(all_formatted)
def test_script_tensor_constant(self):
# TorchScript seems to ignore attributes that start with `__`.
# We used to call anonymous Tensor values `__tensor_constant*`, but
# they were getting ignored by script. Now they're called
# `_tensor_constant*`
class IHaveATensorConstant(torch.nn.Module):
def forward(self, x):
return x + torch.rand(3, 4)
traced = torch.fx.symbolic_trace(IHaveATensorConstant())
torch.jit.script(traced)
def test_autowrap_functions(self):
class AutowrapFnTest(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2)
class AutowrapFnTest2(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2) + fx_int_x2(x.shape[0] / 2)
# Check function(s) are wrapped
# `int` would normally throw a TypeError as argument can't be `Proxy`
tracer = Tracer(autowrap_functions=(fx_int,))
graph = tracer.trace(AutowrapFnTest())
traced = GraphModule(tracer.root, graph, 'test')
tracer_2 = Tracer(autowrap_functions=(fx_int, fx_int_x2))
tracer_2.trace(AutowrapFnTest2())
# Test scriptability
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(4)), 2)
def test_torch_fx_len(self):
class FXLenTest(torch.nn.Module):
def forward(self, x):
return len(x)
traced = symbolic_trace(FXLenTest())
self.assertEqual(traced(torch.rand(3, 4)), 3)
# Test scriptability
scripted = torch.jit.script(FXLenTest())
self.assertEqual(scripted(torch.rand(3)), 3)
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(3)), 3)
# Test non-proxy len
class FXLenTest2(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = [3, 4, 5]
def forward(self, x):
return x + len(self.l)
traced2 = symbolic_trace(FXLenTest2())
inp = torch.rand(3, 4)
self.assertEqual(traced2(inp), inp + 3.0)
self.assertIs(len, builtins.len)
def test_sqrt(self):
class Sqrt1(torch.nn.Module):
def forward(self, x):
return sqrt(x.size(0))
class Sqrt2(torch.nn.Module):
def forward(self, x):
return math.sqrt(x.size(0))
class Sqrt3(torch.nn.Module):
def forward(self, x):
return x + math.sqrt(2) + sqrt(2)
self.checkGraphModule(Sqrt1(), [torch.zeros(8)])
self.checkGraphModule(Sqrt2(), [torch.zeros(8)])
self.checkGraphModule(Sqrt3(), [torch.zeros(8)])
self.assertIs(sqrt, _sqrt)
self.assertIs(math.sqrt, _sqrt)
def test_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
out = gm(input)
self.assertEqual(out, ref_out)
def test_pickle_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
self.assertEqual(loaded(input), gm(input))
def test_pretty_print(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
printed = str(traced)
assert 'SimpleTest()' in printed
assert 'torch.relu' in printed
def test_pretty_print_graph(self):
class KwargPrintTest(torch.nn.Module):
def forward(self, x):
return torch.squeeze(x + 3.0, dim=2)
st = KwargPrintTest()
traced = symbolic_trace(st)
traced.graph.lint()
stringed = str(traced.graph)
for s in ['args', 'kwargs', '#users']:
assert s in stringed
def test_custom_proxy_type(self):
class TensorPair:
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair(x : TensorPair, y : TensorPair):
s = x.add(y)
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair(x, y)
traced = symbolic_trace(use_tensor_pair)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_type_literal(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_literal(x : TensorPair):
s = x.add(TensorPair(torch.zeros(5, 3), torch.zeros(5, 3)))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair_literal(x)
traced = symbolic_trace(use_tensor_pair_literal)
traced_out = traced(x)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_dynamic_value(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor):
s = x.add(TensorPair(y, y))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = torch.randn(5, 3)
ref_out = use_tensor_pair_ctor(x, y)
traced = symbolic_trace(use_tensor_pair_ctor)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_input_dependent_control_flow(self):
class ZeroTensor(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, inp):
if inp.sum() == 0:
self.is_zero = True
self.tensor = torch.tensor([])
else:
self.is_zero = False
self.tensor = inp
def add(self, other):
if self.is_zero:
return ZeroTensor(other.tensor)
elif other.is_zero:
return self
def use_zero_tensor(x : torch.Tensor, y : torch.Tensor):
return ZeroTensor(x + y)
x, y = torch.randn(5, 3), torch.randn(5, 3)
ref_out = use_zero_tensor(x, y)
traced = symbolic_trace(use_zero_tensor)
traced_out = traced(x, y)
self.assertEqual(traced_out.is_zero, ref_out.is_zero)
self.assertEqual(traced_out.tensor, ref_out.tensor)
def test_graph_fns(self):
g = Graph()
a = g.placeholder('a')
b = g.call_module('linear', (a,))
c = g.get_attr('bias')
d = g.call_method('add', (b, c))
e = g.call_function(torch.sin, (d,))
g.output(e)
mod = torch.nn.Module()
mod.linear = torch.nn.Linear(3, 4)
mod.bias = torch.rand(4)
gm = GraphModule(mod, g)
gm.graph.lint()
input = torch.rand(3)
r = gm(input)
ref = torch.sin(mod.linear(input) + mod.bias)
self.assertEqual(r, ref)
def test_remove_uses(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu)
g.erase_node(neg)
self.assertTrue(neg not in relu.users)
def test_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(3, 4)
symbolic_trace(eb)
def test_pickle_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(10, 3, mode='sum')
traced = symbolic_trace(eb)
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
self.assertEqual(loaded(input, offsets), traced(input, offsets))
def test_return_tuple(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return (x, x + x)
original = M()
traced = symbolic_trace(original)
self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1)))
def test_construct_root_dict(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
linear_mod : torch.nn.Module = torch.nn.Linear(3, 4)
add_param : torch.Tensor = torch.rand(3, 4)
gm : torch.fx.GraphModule = torch.fx.GraphModule(
{'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph)
gm.graph.lint()
assert 'self.foo.bar.baz' in gm.code
x : torch.Tensor = torch.rand(3, 3)
out : torch.Tensor = gm(x)
ref_out : torch.Tensor = linear_mod(x) + add_param
self.assertEqual(out, ref_out)
def test_symbolic_trace_assert(self):
class AssertsTensorShape(torch.nn.Module):
def forward(self, x):
torch._assert(x.shape[1] > 4, "assert_foobar")
return x
m = AssertsTensorShape()
# verify traceability
traced = symbolic_trace(m)
# verify assertion on traced model works correctly at runtime
traced(torch.rand(4, 5))
with self.assertRaisesRegex(AssertionError, "assert_foobar"):
traced(torch.rand(4, 3))
# verify the symbolically traced module is scriptable
ms = torch.jit.script(m)
with self.assertRaisesRegex(torch.jit.Error, "assert_foobar"):
ms(torch.rand(4, 3))
def test_fx_create_arg(self):
class CustomArgObject:
def __init__(self, x, y):
self.x = x
self.y = y
def __fx_create_arg__(self, tracer: torch.fx.Tracer):
return tracer.create_node(
"call_function",
CustomArgObject,
args=(
tracer.create_arg(self.x),
tracer.create_arg(self.y),
),
kwargs={},
)
class HasCustomArgObjectWhenLeaf(torch.nn.Module):
def forward(self, o: CustomArgObject):
# Not normally traceable; good reason to make
# this module a leaf.
for x in o.x:
o.y += x
return o.y
class Root(torch.nn.Module):
def __init__(self):
super().__init__()
self.inner = HasCustomArgObjectWhenLeaf()
def forward(self, x, y):
o = CustomArgObject(x, y)
return self.inner(o)
class CreateArgTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is HasCustomArgObjectWhenLeaf
m = Root()
graph = CreateArgTracer().trace(m)
gm = torch.fx.GraphModule(m, graph)
assert "CustomArgObject(" in gm.code
def test_trace_fn_constant(self):
some_constant = torch.rand(3, 4)
def add_const(x):
return some_constant + x
traced = symbolic_trace(add_const)
input = torch.rand(3, 4)
self.assertEqual(traced(input), add_const(input))
def test_copy_no_remap(self):
traced = symbolic_trace(SimpleTest())
g = traced.graph
copied = torch.fx.Graph()
for node in g.nodes:
copied.node_copy(node)
with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'):
copied.lint()
def test_wrong_topo(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
nodes = list(graph.nodes)
nodes[3].append(nodes[2])
with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'):
graph.lint()
def test_example_shape_prop(self):
class TestCase(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr = torch.randn(3, 4)
self.submod = torch.nn.Linear(4, 4)
def forward(self, x):
return torch.neg(self.submod(x.relu() + self.attr))
tc = TestCase()
tc_traced = symbolic_trace(tc)
ref_out = tc_traced(torch.rand(3, 4))
shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4))
# Make sure we're testing all opcodes
opcodes = set()
output_shape : Optional[torch.Shape] = None
output_stride : Optional[Tuple[int]] = None
for node in tc_traced.graph.nodes:
opcodes.add(node.op)
if node.op == 'output':
output_shape = node.args[0].meta['tensor_meta'].shape
output_stride = node.args[0].meta['tensor_meta'].stride
self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method',
'call_module', 'output']))
# Test shape propogation and make sure results match actual
self.assertEqual(output_shape, ref_out.shape)
self.assertEqual(output_stride, ref_out.stride())
def test_shape_prop_layout(self):
class ConvTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv2d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
# contiguous layout
test_mod = ConvTest()
traced = symbolic_trace(test_mod)
x = torch.randn(5, 5, 224, 224)
shape_prop.ShapeProp(traced).propagate(x)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced.graph.nodes))
x_channels_last = x.contiguous(memory_format=torch.channels_last)
traced.to(memory_format=torch.channels_last)
shape_prop.ShapeProp(traced).propagate(x_channels_last)
for node in traced.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last)
def test_shape_prop_aggregate(self):
class ReturnTwo(torch.nn.Module):
def forward(self, x):
return (3, torch.sum(x))
class UnderTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.rt = ReturnTwo()
def forward(self, x):
return self.rt(x)
ut = UnderTest()
class RTTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is ReturnTwo
graph = RTTracer().trace(ut)
mod = torch.fx.GraphModule(ut, graph)
shape_prop.ShapeProp(mod).propagate(torch.rand(3, 4))
for node in mod.graph.nodes:
if node.op == 'call_module':
assert 'tensor_meta' in node.meta
tensor_meta = node.meta['tensor_meta']
assert tensor_meta[0] == 3
assert tensor_meta[1].shape == torch.Size([])
def test_shape_prop_layout_3d(self):
class ConvTest3d(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv3d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
test_mod_3d = ConvTest3d()
traced_3d = symbolic_trace(test_mod_3d)
x_3d = torch.randn(5, 5, 224, 224, 15)
shape_prop.ShapeProp(traced_3d).propagate(x_3d)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced_3d.graph.nodes))
x_channels_last_3d = x_3d.contiguous(memory_format=torch.channels_last_3d)
traced_3d.to(memory_format=torch.channels_last_3d)
shape_prop.ShapeProp(traced_3d).propagate(x_channels_last_3d)
for node in traced_3d.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last_3d)
def test_interpreter(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
interpreter = Interpreter(gm)
input = torch.randn(3, 4)
self.assertEqual(interpreter.run(input), gm(input))
self.assertEqual(interpreter.run(input), m(input))
def test_interpreter_run_node_override(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
class RunNodeInterpreter(Interpreter):
def __init__(self, module):
super().__init__(module)
def run_node(self, n : Node) -> Any:
result = super().run_node(n)
n.cached_value = result
return result
input = torch.randn(3, 4)
RunNodeInterpreter(gm).run(input)
for node in gm.graph.nodes:
assert hasattr(node, 'cached_value')
def test_interpreter_onthefly_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapInterpreter(Interpreter):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
input = torch.randn(3, 4)
result = NegSigmSwapInterpreter(gm).run(input)
self.assertEqual(result, torch.neg(input).sigmoid())
def test_interpreter_partial_eval(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
gm = torch.fx.symbolic_trace(MyModule())
interp = Interpreter(gm)
env = {}
for node in gm.graph.nodes:
if node.op == 'call_module' and node.target == 'linear':
env[node] = torch.arange(0, 12, 1).reshape(3, 4) - 6.0
break
assert len(env) == 1
x = torch.randn(3, 4)
result = interp.run(x, initial_env=env)
self.assertEqual(result, (torch.arange(0, 12, 1).reshape(3, 4) - 6.0).clamp(0.0, 1.0))
def test_interpreter_star_args(self):
def with_star_args(x, *args):
return x + args[0]
gm = torch.fx.symbolic_trace(with_star_args)
interp = Interpreter(gm)
result = interp.run(torch.ones(3, 4), torch.ones(3, 4), torch.rand(3, 4))
self.assertEqual(result, torch.ones(3, 4) * 2.0)
@skipIfNoTorchVision
def test_interpreter_noop_resnet18(self):
rn18 = torchvision_models.resnet18()
transformed = torch.fx.Transformer(symbolic_trace(rn18)).transform()
inp = torch.randn(5, 3, 224, 224)
self.assertEqual(transformed(inp), rn18(inp))
@skipIfNoTorchVision
def test_interpreter_gc_values(self):
rn18 = torchvision_models.resnet18()
interp = Interpreter(symbolic_trace(rn18))
inp = torch.rand(5, 3, 224, 224)
out = interp.run(inp)
env_key_names = set(n.name for n in interp.env.keys())
self.assertEqual(env_key_names, set(['output']))
def test_transformer_noop(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_transformer_op_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapXformer(Transformer):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
transformed = NegSigmSwapXformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(transformed(input), torch.neg(input).sigmoid())
def test_transformer_multi_outputs(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
x = x + self.param
out = self.linear(x)
return x, out
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_fn_type_annotations(self):
class Foo(torch.nn.Module):
def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]:
return {'a': p.x + p.y + z + i}
foo_scripted = torch.jit.script(Foo())
foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
fxed = symbolic_trace(Foo())
fxed_scripted = torch.jit.script(fxed)
fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
def test_fn_type_annotation_empty(self):
def forward(a : List[torch.Tensor]):
return a[0]
torch.jit.script(symbolic_trace(forward))
def test_wrapped_method(self):
def wrap_with_relu(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
return torch.relu(fn(*args, **kwargs))
return wrapper
class Foo(torch.nn.Module):
@wrap_with_relu
def forward(self, x, w):
return torch.matmul(x, w)
f = Foo()
traced = symbolic_trace(f)
x, w = torch.rand(3, 4), torch.rand(4, 4)
self.assertTrue(any(n.target == torch.relu for n in traced.graph.nodes))
def test_empty_graph_codegen(self):
graph = torch.fx.Graph()
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(gm(), None)
def test_sequential(self):
m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1))
gm = torch.fx.symbolic_trace(m)
gm_copy = copy.deepcopy(gm)
def test_ctx_mgr(self):
@contextlib.contextmanager
def do_nothing():
yield
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@do_nothing()
def forward(self, x):
return torch.relu(x)
m = M()
self.checkGraphModule(m, (torch.rand(3, 4),))
def test_typename_print(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,),
type_expr=List[float])
output : torch.fx.Node = graph.output(b)
self.assertTrue('typing.List[float]' in str(graph))
def test_ellipsis(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x + y[:, 1:10, ...]
traced = symbolic_trace(M())
x, y = torch.rand(5, 9, 3, 4), torch.rand(5, 15, 3, 4)
self.assertEqual(traced(x, y), x + y[:, 1:10, ...])
def test_inf_nan(self):
class FooMod(torch.nn.Module):
def forward(self, x):
return x + float('inf'), x + float('-inf'), x + float('nan')
fm = FooMod()
self.checkGraphModule(fm, (torch.rand(3, 4),))
def test_inf_nan_kwds(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('inf')), {}, name='inf')
c : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('nan')), {}, name='nan')
graph.output((b, c))
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
x = torch.rand(3, 4)
self.assertEqual(gm(x), (x + float('inf'), x + float('nan')))
def test_deepcopy_recursion_depth(self):
depth = sys.getrecursionlimit() + 20
g = torch.fx.Graph()
x = g.placeholder('x')
for i in range(depth):
x = g.call_function(torch.relu, (x,))
g.output(x)
copied_graph = copy.deepcopy(g)
val_map = {}
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
val_map[orig_node] = new_node
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
orig_users = set(orig_node.users.keys())
orig_users_equiv = set(val_map[u] for u in orig_users)
new_users = set(new_node.users.keys())
self.assertEqual(orig_users_equiv, new_users)
@skipIfNoTorchVision
def test_replace_uses(self):
rn18 = torchvision_models.resnet18()
class LowerReluTracer(torch.fx.Tracer):
def is_leaf_module(self, m : torch.nn.Module, qualname : str):
if isinstance(m, torch.nn.ReLU):
return False
return super().is_leaf_module(m, qualname)
rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18))
to_erase = []
for node in rn18_traced.graph.nodes:
if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]:
kwargs = node.kwargs.copy()
# Neg doesn't have in-place
kwargs.pop('inplace')
with rn18_traced.graph.inserting_before(node):
new_node = rn18_traced.graph.call_function(
the_function=torch.neg, args=node.args, kwargs=node.kwargs)
node.replace_all_uses_with(replace_with=new_node)
to_erase.append(node)
for node in to_erase:
rn18_traced.graph.erase_node(node)
def test_replace_input(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.replace_input_with(x, y)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input_x = torch.randn(33, 44)
input_y = torch.randn(11, 22)
self.assertEqual(gm(input_x, input_y), torch.relu(input_y))
def test_insertion_point(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
with graph.inserting_before(b):
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_update_args_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_arg(0, y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_update_kwargs_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, kwargs={'input': x})
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_kwarg('input', y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_move_before(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
b.prepend(neg)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_erase_node_error(self):
st = SimpleTest()
traced = symbolic_trace(st)
for node in traced.graph.nodes:
# Test deleting with uses both in another Node and at the output
if node.target in [operator.add, torch.relu]:
with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'):
traced.graph.erase_node(node)
def test_copy_it(self):
d = immutable_dict([(3, 4), (5, 6)])
l = immutable_list([(3, 4), (5, 6)])
self.assertEqual(d, deepcopy(d))
self.assertEqual(l, deepcopy(l))
def test_get_torch_func_signature(self):
for key in dir(torch):
obj = getattr(torch, key)
if callable(obj):
schemas = get_signature_for_torch_op(obj)
def test_find_uses(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
y = torch.relu(x)
z = x + x
u = torch.neg(x)
graph.output((y + z + u).node)
graph.lint()
users_of_x = x.node.users
self.assertEqual(len(users_of_x), 3)
expected_ops = set(['relu', 'add', 'neg'])
for use in users_of_x:
assert any(use.name.startswith(prefix) for prefix in expected_ops)
def test_inline_graph(self):
class InlineInto(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class ToInline(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
inline_into = symbolic_trace(InlineInto())
to_inline = symbolic_trace(ToInline())
combined_graph = torch.fx.Graph()
output_node = combined_graph.graph_copy(inline_into.graph, {})
input_node = list(to_inline.graph.nodes)[0]
assert input_node and input_node.op == 'placeholder'
val_map = {input_node : output_node}
output = combined_graph.graph_copy(to_inline.graph, val_map)
combined_graph.output(output)
combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph)
input = torch.rand(3, 4)
self.assertEqual(combined_module(input), input.relu().neg())
def test_multi_insert_point(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
relu = torch.relu(x)
with graph.inserting_before(relu.node):
y = torch.neg(x)
z = torch.tanh(y)
graph.output((relu.node, z.node))
graph.lint()
expected_ops = ['x', 'neg', 'tanh', 'relu']
for node, expected in zip(graph.nodes, expected_ops):
assert expected in node.name
def test_reassign_args_kwargs_uses(self):
graph = torch.fx.Graph()
x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y'))
z = x + y
zed = z + z + z
graph.output(zed.node)
graph.lint()
# zed = z + z + z -> zed = z + z + x
zed.node.args = (zed.node.args[0], x.node)
self.assertEqual(x.node.users.keys(), [z.node, zed.node])
# z = x + y -> z = y + y
z.node.args = (y.node, y.node)
self.assertEqual(x.node.users.keys(), [zed.node])
def test_trace_function(self):
def foo(x, y):
return torch.relu(x) + y
x, y = torch.randn(3, 4), torch.randn(3, 4)
self.checkGraphModule(foo, (x, y))
def test_trace_dict_int_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[int, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({42: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
traced_graph = MyTracer().trace(CallsModWithDict())
def test_trace_dict_proxy_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[torch.Tensor, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({x: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
with self.assertRaisesRegex(RuntimeError, 'cannot contain a Node'):
traced_graph = MyTracer().trace(CallsModWithDict())
def test_direct_param_use(self):
class TransposeTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.b = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.b
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = TransposeTest()
def forward(self, x):
return self.a.b, self.a.b.t(), self.a.b.view(12)
traced = torch.fx.symbolic_trace(Foo())
assert(all('constant' not in node.target for node in traced.graph.nodes))
def test_single_default_arg(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1):
return y
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
def test_multiple_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1, z=2):
return y + z
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
self.checkGraphModule(m, (3, 4))
def test_regular_and_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y=1):
return x + y
m = M()
self.checkGraphModule(m, (2,))
self.checkGraphModule(m, (2, 3))
def test_string_literal_return(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self):
return "foo"
m = M()
self.checkGraphModule(m, ())
def test_namedtuple_return_qualname(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return MyNamedTup(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), MyNamedTup(input, input))
def test_update_args_kwargs_yells_at_you(self):
symtraced = symbolic_trace(SimpleTest())
node = next(iter(symtraced.graph.nodes))
with self.assertRaisesRegex(AttributeError, '__update_args_kwargs'):
node.__update_args_kwargs((), {})
def test_torchbind_class_attribute_in_fx(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._StackString is registered, skipping")
class FooBar1234(torch.nn.Module):
def __init__(self):
super(FooBar1234, self).__init__()
self.f = torch.classes._TorchScriptTesting._StackString(["3", "4"])
def forward(self):
return self.f.top()
m = FooBar1234()
self.checkGraphModule(m, ())
def test_torchbind_class_attribute_in_fx_tensor_arg(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._ReLUClass is registered, skipping")
class FooBar2341(torch.nn.Module):
def __init__(self):
super(FooBar2341, self).__init__()
self.f = torch.classes._TorchScriptTesting._ReLUClass()
def forward(self, x):
return self.f.run(x)
m = FooBar2341()
traced = symbolic_trace(m)
input = torch.randn(3, 4)
self.assertEqual(traced(input), m(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_script_method_trace(self):
class Scripted(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class Holder(torch.nn.Module):
def __init__(self):
super().__init__()
self.s = torch.jit.script(Scripted())
def forward(self, x):
return self.s(x)
h = Holder()
traced = symbolic_trace(h)
input = torch.randn(3, 4)
self.assertEqual(traced(input), h(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_namedtuple_return_trace(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return Pair(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), Pair(input, input))
def test_return_type_exists(self):
class ReturnTypeModule(torch.nn.Module):
def other(self, x: List[str]) -> List[str]:
return x
def forward(self, x: List[str]) -> List[str]:
return self.other(x)
traced = symbolic_trace(ReturnTypeModule())
self.assertIn("-> typing_List[str]", traced._code)
scripted = torch.jit.script(traced)
self.assertIn("-> List[str]", scripted.code)
def getitem_inner(self):
class GetItemBase(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer('pe', torch.randn(8, 8))
class GetItem1(GetItemBase):
def forward(self, x):
return self.pe[:, :x.size(0)]
class GetItem2(GetItemBase):
def forward(self, x):
return self.pe[x.size(0)]
class GetItem3(GetItemBase):
def forward(self, x):
return self.pe[4] # fx creates `self._tensor_constant0` here
self.checkGraphModule(GetItem1(), [torch.zeros(4)])
self.checkGraphModule(GetItem2(), [torch.zeros(4)])
self.checkGraphModule(GetItem3(), [torch.zeros(4)])
@unittest.skipUnless(os.environ.get("FX_PATCH_GETITEM") == "1",
"Will be checked in test_getitem_subproc")
def test_getitem(self):
self.getitem_inner()
def test_getitem_subproc(self):
# need to run this test in a subproc to work around:
# https://github.com/pytorch/pytorch/issues/50710
proc = Process(target=run_getitem_target)
proc.start()
proc.join()
self.assertEqual(proc.exitcode, 0)
def test_user_friendly_call_provenance_with_function(self):
def fn(x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(fn)
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'fn.forward'"):
scripted = torch.jit.script(traced)
def test_user_friendly_call_provenance_with_module(self):
class M(torch.nn.Module):
def forward(self, x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(M())
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'M.forward'"):
scripted = torch.jit.script(traced)
def test_snake_case(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.activations = torch.nn.ModuleDict([
["snake_case", torch.nn.ReLU()],
["PascalCase", torch.nn.LeakyReLU()],
["ALL_CAPS", torch.nn.PReLU()]
])
def forward(self, x):
a = self.activations["snake_case"](x)
b = self.activations["PascalCase"](x)
c = self.activations["ALL_CAPS"](x)
return a, b, c
traced = symbolic_trace(M())
check = [
("activations_snake_case", "activations.snake_case"),
("activations_pascal_case", "activations.PascalCase"),
("activations_all_caps", "activations.ALL_CAPS")
]
i = 0
for node in traced.graph.nodes:
if node.op == "placeholder" or node.op == "output":
continue
name = check[i][0]
target = check[i][1]
self.assertEqual(name, node.name)
self.assertEqual(target, node.target)
i += 1
self.assertEqual(i, 3)
def test_no_mutation(self):
from torch.fx.immutable_collections import immutable_list
x = immutable_list([3, 4])
with self.assertRaisesRegex(NotImplementedError, "new_args"):
x[0] = 4
def test_partial_trace(self):
class Foo(torch.nn.Module):
def forward(self, x, y):
if y:
return 2 * x
else:
return x
mod = Foo()
mod_true = symbolic_trace(mod, concrete_args={'y': True})
mod_false = symbolic_trace(mod, concrete_args={'y': False})
self.assertEqual(mod_true(3, True), 6)
print(mod_true.code)
assert(any([i.target == torch._assert for i in mod_true.graph.nodes]))
with self.assertRaises(AssertionError):
mod_true(3, False)
self.assertEqual(mod_false(3, False), 3)
with self.assertRaises(AssertionError):
mod_false(3, True)
def f_higher(a, f):
return f(a)
nf = symbolic_trace(f_higher, concrete_args={'f': lambda x: x * 2})
self.assertEqual(nf(3, lambda x: x * 2), 6)
def test_custom_traceback_raised_when_exception_source_is_graphmodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.W = torch.nn.Parameter(torch.randn(5))
def forward(self, x):
return torch.dot(self.W, x)
traced = torch.fx.symbolic_trace(M())
out = [n for n in traced.graph.nodes if n.op == "output"][-1]
with traced.graph.inserting_before(out):
relu_out = traced.graph.call_method(method_name='relu',
args=(out.args[0],))
out.args = (relu_out,)
traced.recompile()
with self.capture_stderr() as captured:
with self.assertRaises(TypeError):
traced(5)
self.assertRegex(captured[0],
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_custom_traceback_not_raised_when_exception_source_is_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 4)
def forward(self, x):
return self.linear(x)
traced = torch.fx.symbolic_trace(M())
# Do not change this to `capture_stderr` or another context
# manager without ensuring that the output is as expected
try:
traced(torch.rand(5, 5))
except RuntimeError:
captured = traceback.format_exc()
self.assertNotRegex(captured,
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_ast_rewriter_rewrites_assert(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_rewrites_assert_with_message(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z, "msg"
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_reassigns_submodules(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.bn = torch.nn.BatchNorm2d(100)
def forward(self, x: torch.Tensor):
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_submodule_manipulation_API(self):
class C(torch.nn.Module):
def __init__(self):
super(C, self).__init__()
self.conv = torch.nn.Conv2d(16, 33, 3, stride=2)
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.conv(torch.cat([self.param, x]))
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
self.linear = torch.nn.Linear(100, 200)
self.register_buffer("buf", torch.randn(2, 3))
self.net_c = C()
def forward(self, x):
return self.linear(torch.cat([self.buf, self.net_c(x)]))
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
self.net_b = B()
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.net_b(x) + self.param
a = symbolic_trace(A())
a.add_submodule("net_b.net_c.dropout", torch.nn.Dropout(p=0.2))
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"][-1]
with a.graph.inserting_before(conv):
with warnings.catch_warnings(record=True) as w:
dropout = a.graph.call_module(module_name="net_b.net_c.dropout",
args=conv.args)
self.assertEqual(len(w), 0)
conv.replace_all_uses_with(dropout)
a.graph.erase_node(conv)
a.recompile()
def module_exists(gm: GraphModule, path: str) -> bool:
return any(path == name for name, _ in gm.named_modules())
def parameter_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_parameters())
and any(path == name for name in gm.state_dict().keys()))
def buffer_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_buffers())
and any(path == name for name in gm.state_dict().keys()))
# Test that we added the "dropout" submodule
self.assertTrue(module_exists(a, "net_b.net_c.dropout"))
# Test `get_submodule` with an added submodule
self.assertIsNotNone(a.get_submodule("net_b.net_c.dropout"))
# Test that the "conv" submodule is still there
self.assertTrue(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with an original module
self.assertIsNotNone(a.get_submodule("net_b.net_c.conv"))
# Test that the "conv" node is NOT still there
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"]
self.assertEqual(conv, [])
a.delete_submodule("net_b.net_c.conv")
# Test that the "conv" submodule is now gone
self.assertFalse(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with a deleted submodule
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`conv`"):
self.assertIsNone(a.get_submodule("net_b.net_c.conv"))
# Test `get_attr` warnings
cat = [n for n in a.graph.nodes if n.target == torch.cat][-1]
with a.graph.inserting_before(cat):
with warnings.catch_warnings(record=True) as w:
param = a.graph.get_attr(qualified_name="net_b.net_c.param")
self.assertEqual(len(w), 0)
with self.assertWarnsRegex(UserWarning, "Attempted to "
"insert a get_attr Node with no "
"underlying reference in the "
"owning GraphModule"):
bad_param = a.graph.get_attr(qualified_name="net_b.param")
a.graph.erase_node(bad_param)
cat.args = (*cat.args, param)
a.recompile()
a.graph.lint()
# Test `get_parameter`
a.get_parameter("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "is not an "
"nn.Parameter"):
a.get_parameter("net_b.buf")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`param`"):
a.get_parameter("net_b.param")
# Test `get_buffer`
a.get_buffer("net_b.buf")
with self.assertRaisesRegex(AttributeError, "is not a "
"buffer"):
a.get_buffer("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`buf`"):
a.get_buffer("net_b.net_c.buf")
# Test non-nested attributes
a.get_submodule("")
a.get_parameter("param")
# Insert some unused submodules
a.add_submodule("net_b.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.rnn", torch.nn.RNN(10, 20, 2))
a.add_submodule("batch_norm_2d", torch.nn.BatchNorm2d(100))
# Garbage collection
a.delete_all_unused_submodules()
# Test that all the unused submodules are gone
self.assertFalse(module_exists(a, "net_b.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.rnn"))
self.assertFalse(module_exists(a, "batch_norm_2d"))
# Test that we didn't delete any unused Parameters or buffers
self.assertTrue(parameter_exists(a, "net_b.net_c.param"))
self.assertTrue(buffer_exists(a, "net_b.buf"))
a.graph.lint()
def test_tracing_graphmodules_as_leaf_submodules(self):
class A(torch.nn.Module):
def forward(self, t):
return t + t
class B(torch.nn.Module):
def __init__(self):
super(type(self), self).__init__()
self.calling = False
self.called = False
def forward(self, t):
if self.calling:
return t - t
else:
return t + t
def __call__(self, *args):
self.called = True
self.calling = True
return super(type(self), self).__call__(*args)
self.calling = False
class M(torch.nn.Module):
def __init__(self, a, b):
super().__init__()
self.a = a
self.b = b
def forward(self, t):
x = self.a(t)
y = self.b(t)
return x + y
class LeafTracer(Tracer):
def is_leaf_module(self, module, name):
return True
class LeafTracerNotB(Tracer):
def is_leaf_module(self, module, name):
return False if "b" in name else True
# Recompile calls added "for fun", since they
# chain __call__ wrappers.
#
# Test: B as a regular, non-leaf module
#
a = symbolic_trace(A())
a.recompile()
m = M(a, B())
graph = LeafTracerNotB().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is not treated as leaf.
self.assertFalse(hasattr(gm, "b"))
# Test assert custom __call__ on submodule b was honored.
match = [
n
for n in gm.graph.nodes
if n.op == "call_function" and n.target == operator.sub
]
self.assertTrue(len(match) == 1)
#
# Test: B as a regular, leaf module
# symbolic_trace should only patch torch.nn.Module.__call__,
# which means B.__call__ should still execute
#
a = symbolic_trace(A())
a.recompile()
b = B()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is leaf:
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
# Test b.__call__ was run
self.assertTrue(b.called)
self.assertTrue(gm.get_submodule("b").called)
#
# Test: B as GraphModule leaf
# __call__ not honored since symbolic_trace directly invokes forward()
#
a = symbolic_trace(A())
a.recompile()
b = symbolic_trace(B())
b.recompile()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("my_buff", torch.rand(3, 4))
self.register_parameter(
"my_param", torch.nn.Parameter(torch.rand(3, 4))
)
def forward(self, x):
return x + self.my_buff + self.my_param
mod = MyModule()
mod_traced = symbolic_trace(mod)
# Create new GraphModule based on original, either w/ dict or root module.
orig_buff = mod_traced.get_buffer("my_buff")
orig_param = mod_traced.get_parameter("my_param")
mod_traced_new = GraphModule(
{"my_buff": orig_buff, "my_param": orig_param} if use_dict_init else mod,
mod_traced.graph,
)
# Check that both my_buff and my_param are found and the same.
try:
new_buff = mod_traced_new.get_buffer("my_buff")
except Exception:
self.fail("Did not find my_buff")
self.assertEqual(orig_buff, new_buff)
try:
new_param = mod_traced_new.get_parameter("my_param")
except Exception:
self.fail("Did not find my_param")
self.assertEqual(orig_param, new_param)
x = torch.rand(3, 4)
orig_out = mod_traced(x)
submodules_out = mod_traced_new(x)
self.assertEqual(orig_out, submodules_out)
def test_graph_module_init_buffer_param_copied_dict_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=True)
def test_graph_module_init_buffer_param_copied_mod_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=False)
def test_annotations_with_no_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, a: A) -> torch.Tensor:
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor':
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_no_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List[torch.Tensor], a: A) -> torch.Tensor:
return a(x[0])
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List['torch.Tensor'], a: A) -> 'torch.Tensor':
return a(x)[0]
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
@unittest.skipIf(sys.version_info < (3, 7), "`__future__` feature "
"`annotations` is not defined in Python <3.7")
def test_annotation_with_future(self):
try:
import fx.test_future # noqa: F401
finally:
del sys.modules["__future__"]
def test_annotations_empty_tuple(self):
class Foo(torch.nn.Module):
def forward(self, x: Tuple[()], y: Tuple[str, Tuple[()]]):
return "foo"
traced = torch.fx.symbolic_trace(Foo())
x = ()
y = ("bar", ())
traced(x, y)
FileCheck().check("_Tuple[()]") \
.check("typing_Tuple[str,typing_Tuple[()]]") \
.run(traced.code)
scripted = torch.jit.script(traced)
scripted(x, y)
FileCheck().check("Tuple[()]") \
.check("Tuple[str, Tuple[()]]") \
.run(scripted.code)
@skipIfNoTorchVision
def test_cpatcher(self):
cnt = 0
def patched_impl(to_patch, args, kwargs):
nonlocal cnt
cnt += 1
return to_patch(*args, **kwargs)
c_patch_enabled = True
def patched_in(to_patch, args, kwargs):
nonlocal c_patch_enabled
try:
c_patch_enabled = False
r = patched_impl(to_patch, args, kwargs)
finally:
c_patch_enabled = True
return r
def trace_func(frame, action, arg):
if action == 'c_call':
if c_patch_enabled:
torch._C._fx.patch_function(arg, patched_in)
import torch
rn = torchvision_models.resnet18()
try:
sys.setprofile(trace_func)
rn(torch.rand(1, 3, 224, 224))
print("testing print patch")
finally:
sys.setprofile(None)
assert(cnt != 0)
def test_randn(self):
def f():
return torch.randn(3, 3)
fx_f = symbolic_trace(f, enable_cpatching=True)
assert(any(i.target == torch.randn for i in fx_f.graph.nodes))
fx_f = symbolic_trace(f, enable_cpatching=False)
assert(all(i.target != torch.randn for i in fx_f.graph.nodes))
fx_f = symbolic_trace(f, enable_cpatching=True)
assert(any(i.target == torch.randn for i in fx_f.graph.nodes))
def test_pytree(self):
def f_sum(x):
return sum(x)
def f_sum_dict(x):
out = 0
for k, v in x.items():
out += v
return out
def f_dict_list_map(x):
new_dict = {}
for k, v in x.items():
new_dict[k] = [i + 1 for i in v]
return new_dict
def f_dict_add(x):
return x['a'] + sum(x['z'])
def f_namedtuple_add(x):
return x.x + x.y
pytree._register_pytree_node(
Foo,
lambda x: ([x.a, x.b], None),
lambda x, _: Foo(x[0], x[1]),
)
fx_pytree.register_pytree_flatten_spec(Foo, lambda x, _: [x.a, x.b])
def f_custom(x):
return x.a + x.b
def f_custom_dict(x):
return f_sum_dict(x.a) + x.b
def f_return_custom(x):
return Foo(x.b, x.a)
tests = [
(f_sum, [PH, PH, PH]),
(f_sum, []),
(f_sum_dict, {'a': PH, 'b': PH, 'c': PH}),
(f_dict_list_map, {'a': (PH, PH), 'b': [PH], 'c': []}),
(f_dict_list_map, {5: (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': []}),
(f_custom, Foo(PH, PH)),
(f_custom, Foo(PH, 3)),
(f_custom_dict, Foo({'a': PH, 'b': PH}, PH)),
# (f_return_custom, Foo(PH, PH)), # Don't currently support output pytrees
(f_namedtuple_add, Point(PH, PH)),
]
def verify_pytree(f, inp):
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
num_flat_args = len([i == PH for i in pytree.tree_flatten(inp)[0]])
orig_out = f(val)
nf = symbolic_trace(f, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
nf = symbolic_trace(nf)
self.assertEqual(nf(val), orig_out)
assert "tree_flatten_spec" not in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == 1)
nf = symbolic_trace(nf, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
pickled = pickle.dumps(nf)
nf = pickle.loads(pickled)
self.assertEqual(nf(val), orig_out)
for f, inp in tests:
verify_pytree(f, inp)
def test_pytree_concrete(self):
def f(b, a):
if b:
return a['a']
else:
return a['z']
inp = {'a': {'a': PH, 'z': PH}, 'b': True}
nf = symbolic_trace(f, concrete_args=inp)
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
self.assertEqual(nf(**val), f(**val))
nf = symbolic_trace(nf)
self.assertEqual(nf(**val), f(**val))
def run_getitem_target():
from torch.fx._symbolic_trace import _wrapped_methods_to_patch
_wrapped_methods_to_patch.append((torch.Tensor, "__getitem__"))
try:
TestFX().getitem_inner()
finally:
_wrapped_methods_to_patch.pop()
class TestOperatorSignatures(JitTestCase):
@onlyCPU
@ops(op_db, allowed_dtypes=(torch.float,))
def test_get_torch_func_signature_exhaustive(self, device, dtype, op):
# Sorted and one entry on each line to minimize merge conflicts.
known_no_schema = {'cdist',
'contiguous',
'dstack',
'einsum',
'expand',
'expand_as',
'fill_',
'hstack',
'linalg.multi_dot',
'lu',
'norm',
'polygamma',
'special.polygamma',
'repeat',
'reshape_as',
'resize_',
'resize_as_',
'special.zeta',
'stack',
'to_sparse',
'view',
'view_as',
'nn.functional.hardshrink',
'vstack',
'where',
'zero_',
'__getitem__',
'__radd__',
'__rsub__',
'__rmul__',
'__rdiv__',
'__rmod__',
'__rpow__',
'__rand__',
'__ror__',
'__rxor__',
'__rmatmul__'}
try:
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
schemas = get_signature_for_torch_op(op.op)
if not schemas:
raise RuntimeError('No Schemas Returned')
for sample_input in sample_inputs_itr:
# Iterate through overloads until we hit a match. If we exit this
# loop via `else`, we haven't found a match
for schema in schemas:
try:
bound_args = schema.bind(sample_input.input, *sample_input.args, **sample_input.kwargs)
bound_args.apply_defaults()
op(*bound_args.args, **bound_args.kwargs)
break
except TypeError as e:
pass
else:
raise RuntimeError(f'Did not match any schemas for op {op.name}!')
except Exception as e:
assert op.name in known_no_schema or "nn.functional" in op.name
class TestFunctionalTracing(JitTestCase):
IGNORE_FUNCS = ("has_torch_function", "has_torch_function_unary",
"has_torch_function_variadic", "handle_torch_function",
"boolean_dispatch")
TO_PATCH = {"has_torch_function": None,
"has_torch_function_unary": None,
"has_torch_function_variadic": None}
BUILT_IN_FUNC = (AssertionError, "")
PROXY_ITERABLE = (TypeError, r"argument of type 'Proxy' is not iterable")
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
LEN_ERROR = (RuntimeError, r"'len' is not supported in symbolic tracing by default")
ARG_TYPE_MISMATCH = (TypeError, r", not Proxy$")
CONTROL_FLOW = (TraceError, r"symbolically traced variables cannot be used as inputs to control flow")
INTERPOLATE_ARGS_CONFLICT = (ValueError, r"only one of size or scale_factor should be defined")
UNTRACEABLE_FUNCTIONALS = {
"adaptive_avg_pool1d": BUILT_IN_FUNC,
"avg_pool1d": BUILT_IN_FUNC,
"avg_pool2d": BUILT_IN_FUNC,
"avg_pool3d": BUILT_IN_FUNC,
"celu_": BUILT_IN_FUNC,
"channel_shuffle": BUILT_IN_FUNC,
"conv1d": BUILT_IN_FUNC,
"conv2d": BUILT_IN_FUNC,
"conv3d": BUILT_IN_FUNC,
"conv_tbc": BUILT_IN_FUNC,
"conv_transpose1d": BUILT_IN_FUNC,
"conv_transpose2d": BUILT_IN_FUNC,
"conv_transpose3d": BUILT_IN_FUNC,
"cosine_similarity": BUILT_IN_FUNC,
"elu_": BUILT_IN_FUNC,
"hardtanh_": BUILT_IN_FUNC,
"leaky_relu_": BUILT_IN_FUNC,
"logsigmoid": BUILT_IN_FUNC,
"one_hot": BUILT_IN_FUNC,
"pdist": BUILT_IN_FUNC,
"pixel_shuffle": BUILT_IN_FUNC,
"pixel_unshuffle": BUILT_IN_FUNC,
"relu_": BUILT_IN_FUNC,
"rrelu_": BUILT_IN_FUNC,
"selu_": BUILT_IN_FUNC,
"softplus": BUILT_IN_FUNC,
"softshrink": BUILT_IN_FUNC,
"threshold_": BUILT_IN_FUNC,
"adaptive_avg_pool2d": LEN_ERROR,
"adaptive_avg_pool3d": LEN_ERROR,
"adaptive_max_pool2d_with_indices": LEN_ERROR,
"adaptive_max_pool3d_with_indices": LEN_ERROR,
"instance_norm": CONTROL_FLOW,
"pad": LEN_ERROR,
"adaptive_max_pool1d": PROXY_ITERABLE,
"adaptive_max_pool2d": PROXY_ITERABLE,
"adaptive_max_pool3d": PROXY_ITERABLE,
"fractional_max_pool2d": PROXY_ITERABLE,
"fractional_max_pool3d": PROXY_ITERABLE,
"max_pool1d": PROXY_ITERABLE,
"max_pool2d": PROXY_ITERABLE,
"max_pool3d": PROXY_ITERABLE,
"group_norm": PROXY_ITERATED,
"lp_pool2d": PROXY_ITERATED,
"max_unpool1d": PROXY_ITERATED,
"max_unpool2d": PROXY_ITERATED,
"max_unpool3d": PROXY_ITERATED,
"adaptive_max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"hardshrink": ARG_TYPE_MISMATCH,
"layer_norm": ARG_TYPE_MISMATCH,
"lp_pool1d": ARG_TYPE_MISMATCH,
"max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"pairwise_distance": ARG_TYPE_MISMATCH,
"affine_grid": CONTROL_FLOW,
"alpha_dropout": CONTROL_FLOW,
"batch_norm": CONTROL_FLOW,
"binary_cross_entropy": CONTROL_FLOW,
"binary_cross_entropy_with_logits": CONTROL_FLOW,
"celu": CONTROL_FLOW,
"cosine_embedding_loss": CONTROL_FLOW,
"cross_entropy": CONTROL_FLOW,
"ctc_loss": CONTROL_FLOW,
"dropout": CONTROL_FLOW,
"dropout2d": CONTROL_FLOW,
"dropout3d": CONTROL_FLOW,
"elu": CONTROL_FLOW,
"embedding": CONTROL_FLOW,
"embedding_bag": CONTROL_FLOW,
"feature_alpha_dropout": CONTROL_FLOW,
"fold": CONTROL_FLOW,
"gaussian_nll_loss": CONTROL_FLOW,
"glu": CONTROL_FLOW,
"grid_sample": CONTROL_FLOW,
"gumbel_softmax": CONTROL_FLOW,
"hardsigmoid": CONTROL_FLOW,
"hardswish": CONTROL_FLOW,
"hardtanh": CONTROL_FLOW,
"hinge_embedding_loss": CONTROL_FLOW,
"huber_loss": CONTROL_FLOW,
"interpolate": CONTROL_FLOW,
"kl_div": CONTROL_FLOW,
"l1_loss": CONTROL_FLOW,
"leaky_relu": CONTROL_FLOW,
"local_response_norm": CONTROL_FLOW,
"margin_ranking_loss": CONTROL_FLOW,
"mse_loss": CONTROL_FLOW,
"multi_head_attention_forward": CONTROL_FLOW,
"multi_margin_loss": CONTROL_FLOW,
"multilabel_margin_loss": CONTROL_FLOW,
"multilabel_soft_margin_loss": CONTROL_FLOW,
"nll_loss": CONTROL_FLOW,
"poisson_nll_loss": CONTROL_FLOW,
"relu": CONTROL_FLOW,
"relu6": CONTROL_FLOW,
"rrelu": CONTROL_FLOW,
"selu": CONTROL_FLOW,
"silu": CONTROL_FLOW,
"mish": CONTROL_FLOW,
"smooth_l1_loss": CONTROL_FLOW,
"soft_margin_loss": CONTROL_FLOW,
"threshold": CONTROL_FLOW,
"triplet_margin_loss": CONTROL_FLOW,
"triplet_margin_with_distance_loss": CONTROL_FLOW,
"unfold": CONTROL_FLOW,
"upsample": CONTROL_FLOW,
"upsample_bilinear": INTERPOLATE_ARGS_CONFLICT,
"upsample_nearest": INTERPOLATE_ARGS_CONFLICT,
}
# List of nn.functionals with Tensor inputs but not with type annotation
FUNCTIONALS_WITHOUT_ANNOTATION = (
"adaptive_max_pool1d",
"adaptive_max_pool2d",
"adaptive_max_pool3d",
"fractional_max_pool2d",
"fractional_max_pool3d",
"max_pool1d",
"max_pool2d",
"max_pool3d",
"gaussian_nll_loss",
"upsample",
"upsample_bilinear",
"upsample_nearest",
)
# Inconsistent behavior between Python 3.8 and other Python versions:
# - Python 3.8+: Re-raise internal exception like `PROXY_ITERATED`
# - Other Python: Raise `argument of type 'Proxy' is not iterable` due to the same
# internal exception above
# Use the following map to override the expected exception for Python 3.8
UNTRACEABLE_FUNCTIONALS_PY38 = {
"adaptive_max_pool1d": PROXY_ITERATED,
"adaptive_max_pool2d": PROXY_ITERATED,
"adaptive_max_pool3d": PROXY_ITERATED,
"fractional_max_pool2d": PROXY_ITERATED,
"fractional_max_pool3d": PROXY_ITERATED,
"max_pool1d": PROXY_ITERATED,
"max_pool2d": PROXY_ITERATED,
"max_pool3d": PROXY_ITERATED,
"group_norm": LEN_ERROR
}
@classmethod
def _get_functional(cls):
functional_list = []
for f in dir(torch.nn.functional):
if not f.islower():
continue
# Ignore internal functions
if f.startswith('_'):
continue
# Ignore supporting functions
if f in cls.IGNORE_FUNCS:
continue
fn = getattr(torch.nn.functional, f)
# Ignore non-callable object like modules
if not isinstance(fn, Callable):
continue
if f not in cls.FUNCTIONALS_WITHOUT_ANNOTATION:
try:
sig = inspect.signature(fn)
has_tensor_arg = False
for arg, param in sig.parameters.items():
if isinstance(param.annotation, type) and issubclass(param.annotation, torch.Tensor):
has_tensor_arg = True
if not has_tensor_arg:
continue
# No signature or Object is not supported
except ValueError:
pass
functional_list.append((f, fn))
return functional_list
@classmethod
def generate_test_func(cls, func_name, fn):
def functional_test(self):
if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \
sys.version_info >= (3, 8) and sys.version_info < (3, 10):
exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
elif func_name in self.UNTRACEABLE_FUNCTIONALS:
exc, err = self.UNTRACEABLE_FUNCTIONALS[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
else:
symbolic_trace(fn)
return functional_test
@classmethod
def generate_tests(cls):
functional_list = cls._get_functional()
for func_name, fn in functional_list:
test_name = "test_nn_functional_" + func_name
functional_test = cls.generate_test_func(func_name, fn)
setattr(cls, test_name, functional_test)
@classmethod
def setUpClass(cls):
def no(*args, **kwargs):
return False
for name in cls.TO_PATCH.keys():
cls.TO_PATCH[name] = getattr(torch.nn.functional, name)
setattr(torch.nn.functional, name, no)
@classmethod
def tearDownClass(cls):
for name in cls.TO_PATCH.keys():
setattr(torch.nn.functional, name, cls.TO_PATCH[name])
TestFunctionalTracing.generate_tests()
instantiate_device_type_tests(TestOperatorSignatures, globals())
@skipIfNoTorchVision
class TestVisionTracing(JitTestCase):
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
INCONSISTENT_TYPE = (
RuntimeError,
r"Return value was annotated as having type __torch__.torchvision.models[.\w]+ but is actually of type Tensor"
)
UNTRACEABLE_MODELS = {
"fasterrcnn_resnet50_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_320_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_fpn": PROXY_ITERATED,
"maskrcnn_resnet50_fpn": PROXY_ITERATED,
"keypointrcnn_resnet50_fpn": PROXY_ITERATED,
"retinanet_resnet50_fpn": PROXY_ITERATED,
}
UNSCRIPTABLE_MODELS = {
"googlenet": INCONSISTENT_TYPE,
"inception_v3": INCONSISTENT_TYPE,
}
output_transform = {
"fcn_resnet50": lambda x: x["out"],
"fcn_resnet101": lambda x: x["out"],
"deeplabv3_resnet50": lambda x: x["out"],
"deeplabv3_resnet101": lambda x: x["out"],
"deeplabv3_mobilenet_v3_large": lambda x: x["out"],
"lraspp_mobilenet_v3_large": lambda x: x["out"],
"fasterrcnn_resnet50_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_320_fpn": lambda x: x[1],
"maskrcnn_resnet50_fpn": lambda x: x[1],
"keypointrcnn_resnet50_fpn": lambda x: x[1],
"retinanet_resnet50_fpn": lambda x: x[1],
}
@classmethod
def generate_test_fn(cls, name, model_fn, x, kwargs):
def run_test(self):
model = model_fn(**kwargs)
model = model.eval()
if name in self.UNTRACEABLE_MODELS:
err, exc = self.UNTRACEABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
graph = symbolic_trace(model)
else:
out_transform = self.output_transform.get(name, lambda x: x)
graph : torch.fx.GraphModule = symbolic_trace(model)
a = out_transform(model(x))
b = out_transform(graph(x))
self.assertEqual(a, b)
if name in self.UNSCRIPTABLE_MODELS:
err, exc = self.UNSCRIPTABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
script = torch.jit.script(graph)
else:
script = torch.jit.script(graph)
c = out_transform(script(x))
self.assertEqual(a, c)
return run_test
@classmethod
def generate_classification_tests(cls):
for k, v in torchvision_models.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_' + k
x = torch.rand(1, 3, 299, 299) if k in ['inception_v3'] else torch.rand(1, 3, 224, 224)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_segmentation_tests(cls):
for k, v in torchvision_models.segmentation.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_segmentation_' + k
x = torch.rand(1, 3, 32, 32)
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_detection_tests(cls):
for k, v in torchvision_models.detection.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_detection_' + k
x = [torch.rand(3, 300, 300)]
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_video_tests(cls):
for k, v in torchvision_models.video.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_video_' + k
x = torch.rand(1, 3, 4, 112, 112)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_tests(cls):
cls.generate_classification_tests()
cls.generate_detection_tests()
cls.generate_segmentation_tests()
cls.generate_video_tests()
if HAS_TORCHVISION:
TestVisionTracing.generate_tests()
if __name__ == '__main__':
run_tests()
|
BuildReport.py | ## @file
# Routines for generating build report.
#
# This module contains the functionality to generate build report after
# build all target completes successfully.
#
# Copyright (c) 2010 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
## Import Modules
#
import Common.LongFilePathOs as os
import re
import platform
import textwrap
import traceback
import sys
import time
import struct
import hashlib
import subprocess
import threading
from datetime import datetime
from io import BytesIO
from Common import EdkLogger
from Common.Misc import SaveFileOnChange
from Common.Misc import GuidStructureByteArrayToGuidString
from Common.Misc import GuidStructureStringToGuidString
from Common.BuildToolError import FILE_WRITE_FAILURE
from Common.BuildToolError import CODE_ERROR
from Common.BuildToolError import COMMAND_FAILURE
from Common.BuildToolError import FORMAT_INVALID
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.MultipleWorkspace import MultipleWorkspace as mws
import Common.GlobalData as GlobalData
from AutoGen.AutoGen import ModuleAutoGen
from Common.Misc import PathClass
from Common.StringUtils import NormPath
from Common.DataType import *
import collections
from Common.Expression import *
from GenFds.AprioriSection import DXE_APRIORI_GUID, PEI_APRIORI_GUID
## Pattern to extract contents in EDK DXS files
gDxsDependencyPattern = re.compile(r"DEPENDENCY_START(.+)DEPENDENCY_END", re.DOTALL)
## Pattern to find total FV total size, occupied size in flash report intermediate file
gFvTotalSizePattern = re.compile(r"EFI_FV_TOTAL_SIZE = (0x[0-9a-fA-F]+)")
gFvTakenSizePattern = re.compile(r"EFI_FV_TAKEN_SIZE = (0x[0-9a-fA-F]+)")
## Pattern to find module size and time stamp in module summary report intermediate file
gModuleSizePattern = re.compile(r"MODULE_SIZE = (\d+)")
gTimeStampPattern = re.compile(r"TIME_STAMP = (\d+)")
## Pattern to find GUID value in flash description files
gPcdGuidPattern = re.compile(r"PCD\((\w+)[.](\w+)\)")
## Pattern to collect offset, GUID value pair in the flash report intermediate file
gOffsetGuidPattern = re.compile(r"(0x[0-9A-Fa-f]+) ([-A-Fa-f0-9]+)")
## Pattern to find module base address and entry point in fixed flash map file
gModulePattern = r"\n[-\w]+\s*\(([^,]+),\s*BaseAddress=%(Address)s,\s*EntryPoint=%(Address)s\)\s*\(GUID=([-0-9A-Fa-f]+)[^)]*\)"
gMapFileItemPattern = re.compile(gModulePattern % {"Address" : "(-?0[xX][0-9A-Fa-f]+)"})
## Pattern to find all module referenced header files in source files
gIncludePattern = re.compile(r'#include\s*["<]([^">]+)[">]')
gIncludePattern2 = re.compile(r"#include\s+EFI_([A-Z_]+)\s*[(]\s*(\w+)\s*[)]")
## Pattern to find the entry point for EDK module using EDKII Glue library
gGlueLibEntryPoint = re.compile(r"__EDKII_GLUE_MODULE_ENTRY_POINT__\s*=\s*(\w+)")
## Tags for MaxLength of line in report
gLineMaxLength = 120
## Tags for end of line in report
gEndOfLine = "\r\n"
## Tags for section start, end and separator
gSectionStart = ">" + "=" * (gLineMaxLength - 2) + "<"
gSectionEnd = "<" + "=" * (gLineMaxLength - 2) + ">" + "\n"
gSectionSep = "=" * gLineMaxLength
## Tags for subsection start, end and separator
gSubSectionStart = ">" + "-" * (gLineMaxLength - 2) + "<"
gSubSectionEnd = "<" + "-" * (gLineMaxLength - 2) + ">"
gSubSectionSep = "-" * gLineMaxLength
## The look up table to map PCD type to pair of report display type and DEC type
gPcdTypeMap = {
TAB_PCDS_FIXED_AT_BUILD : ('FIXED', TAB_PCDS_FIXED_AT_BUILD),
TAB_PCDS_PATCHABLE_IN_MODULE: ('PATCH', TAB_PCDS_PATCHABLE_IN_MODULE),
TAB_PCDS_FEATURE_FLAG : ('FLAG', TAB_PCDS_FEATURE_FLAG),
TAB_PCDS_DYNAMIC : ('DYN', TAB_PCDS_DYNAMIC),
TAB_PCDS_DYNAMIC_HII : ('DYNHII', TAB_PCDS_DYNAMIC),
TAB_PCDS_DYNAMIC_VPD : ('DYNVPD', TAB_PCDS_DYNAMIC),
TAB_PCDS_DYNAMIC_EX : ('DEX', TAB_PCDS_DYNAMIC_EX),
TAB_PCDS_DYNAMIC_EX_HII : ('DEXHII', TAB_PCDS_DYNAMIC_EX),
TAB_PCDS_DYNAMIC_EX_VPD : ('DEXVPD', TAB_PCDS_DYNAMIC_EX),
}
## The look up table to map module type to driver type
gDriverTypeMap = {
SUP_MODULE_SEC : '0x3 (SECURITY_CORE)',
SUP_MODULE_PEI_CORE : '0x4 (PEI_CORE)',
SUP_MODULE_PEIM : '0x6 (PEIM)',
SUP_MODULE_DXE_CORE : '0x5 (DXE_CORE)',
SUP_MODULE_DXE_DRIVER : '0x7 (DRIVER)',
SUP_MODULE_DXE_SAL_DRIVER : '0x7 (DRIVER)',
SUP_MODULE_DXE_SMM_DRIVER : '0x7 (DRIVER)',
SUP_MODULE_DXE_RUNTIME_DRIVER: '0x7 (DRIVER)',
SUP_MODULE_UEFI_DRIVER : '0x7 (DRIVER)',
SUP_MODULE_UEFI_APPLICATION : '0x9 (APPLICATION)',
SUP_MODULE_SMM_CORE : '0xD (SMM_CORE)',
'SMM_DRIVER' : '0xA (SMM)', # Extension of module type to support PI 1.1 SMM drivers
SUP_MODULE_MM_STANDALONE : '0xE (MM_STANDALONE)',
SUP_MODULE_MM_CORE_STANDALONE : '0xF (MM_CORE_STANDALONE)'
}
## The look up table of the supported opcode in the dependency expression binaries
gOpCodeList = ["BEFORE", "AFTER", "PUSH", "AND", "OR", "NOT", "TRUE", "FALSE", "END", "SOR"]
## Save VPD Pcd
VPDPcdList = []
##
# Writes a string to the file object.
#
# This function writes a string to the file object and a new line is appended
# afterwards. It may optionally wraps the string for better readability.
#
# @File The file object to write
# @String The string to be written to the file
# @Wrapper Indicates whether to wrap the string
#
def FileWrite(File, String, Wrapper=False):
if Wrapper:
String = textwrap.fill(String, 120)
File.write(String + gEndOfLine)
def ByteArrayForamt(Value):
IsByteArray = False
SplitNum = 16
ArrayList = []
if Value.startswith('{') and Value.endswith('}'):
Value = Value[1:-1]
ValueList = Value.split(',')
if len(ValueList) >= SplitNum:
IsByteArray = True
if IsByteArray:
if ValueList:
Len = len(ValueList)/SplitNum
for i, element in enumerate(ValueList):
ValueList[i] = '0x%02X' % int(element.strip(), 16)
if Len:
Id = 0
while (Id <= Len):
End = min(SplitNum*(Id+1), len(ValueList))
Str = ','.join(ValueList[SplitNum*Id : End])
if End == len(ValueList):
Str += '}'
ArrayList.append(Str)
break
else:
Str += ','
ArrayList.append(Str)
Id += 1
else:
ArrayList = [Value + '}']
return IsByteArray, ArrayList
##
# Find all the header file that the module source directly includes.
#
# This function scans source code to find all header files the module may
# include. This is not accurate but very effective to find all the header
# file the module might include with #include statement.
#
# @Source The source file name
# @IncludePathList The list of include path to find the source file.
# @IncludeFiles The dictionary of current found include files.
#
def FindIncludeFiles(Source, IncludePathList, IncludeFiles):
FileContents = open(Source).read()
#
# Find header files with pattern #include "XXX.h" or #include <XXX.h>
#
for Match in gIncludePattern.finditer(FileContents):
FileName = Match.group(1).strip()
for Dir in [os.path.dirname(Source)] + IncludePathList:
FullFileName = os.path.normpath(os.path.join(Dir, FileName))
if os.path.exists(FullFileName):
IncludeFiles[FullFileName.lower().replace("\\", "/")] = FullFileName
break
#
# Find header files with pattern like #include EFI_PPI_CONSUMER(XXX)
#
for Match in gIncludePattern2.finditer(FileContents):
Key = Match.group(2)
Type = Match.group(1)
if "ARCH_PROTOCOL" in Type:
FileName = "ArchProtocol/%(Key)s/%(Key)s.h" % {"Key" : Key}
elif "PROTOCOL" in Type:
FileName = "Protocol/%(Key)s/%(Key)s.h" % {"Key" : Key}
elif "PPI" in Type:
FileName = "Ppi/%(Key)s/%(Key)s.h" % {"Key" : Key}
elif TAB_GUID in Type:
FileName = "Guid/%(Key)s/%(Key)s.h" % {"Key" : Key}
else:
continue
for Dir in IncludePathList:
FullFileName = os.path.normpath(os.path.join(Dir, FileName))
if os.path.exists(FullFileName):
IncludeFiles[FullFileName.lower().replace("\\", "/")] = FullFileName
break
## Split each lines in file
#
# This method is used to split the lines in file to make the length of each line
# less than MaxLength.
#
# @param Content The content of file
# @param MaxLength The Max Length of the line
#
def FileLinesSplit(Content=None, MaxLength=None):
ContentList = Content.split(TAB_LINE_BREAK)
NewContent = ''
NewContentList = []
for Line in ContentList:
while len(Line.rstrip()) > MaxLength:
LineSpaceIndex = Line.rfind(TAB_SPACE_SPLIT, 0, MaxLength)
LineSlashIndex = Line.rfind(TAB_SLASH, 0, MaxLength)
LineBackSlashIndex = Line.rfind(TAB_BACK_SLASH, 0, MaxLength)
if max(LineSpaceIndex, LineSlashIndex, LineBackSlashIndex) > 0:
LineBreakIndex = max(LineSpaceIndex, LineSlashIndex, LineBackSlashIndex)
else:
LineBreakIndex = MaxLength
NewContentList.append(Line[:LineBreakIndex])
Line = Line[LineBreakIndex:]
if Line:
NewContentList.append(Line)
for NewLine in NewContentList:
NewContent += NewLine + TAB_LINE_BREAK
NewContent = NewContent.replace(TAB_LINE_BREAK, gEndOfLine).replace('\r\r\n', gEndOfLine)
return NewContent
##
# Parse binary dependency expression section
#
# This utility class parses the dependency expression section and translate the readable
# GUID name and value.
#
class DepexParser(object):
##
# Constructor function for class DepexParser
#
# This constructor function collect GUID values so that the readable
# GUID name can be translated.
#
# @param self The object pointer
# @param Wa Workspace context information
#
def __init__(self, Wa):
self._GuidDb = {}
for Pa in Wa.AutoGenObjectList:
for Package in Pa.PackageList:
for Protocol in Package.Protocols:
GuidValue = GuidStructureStringToGuidString(Package.Protocols[Protocol])
self._GuidDb[GuidValue.upper()] = Protocol
for Ppi in Package.Ppis:
GuidValue = GuidStructureStringToGuidString(Package.Ppis[Ppi])
self._GuidDb[GuidValue.upper()] = Ppi
for Guid in Package.Guids:
GuidValue = GuidStructureStringToGuidString(Package.Guids[Guid])
self._GuidDb[GuidValue.upper()] = Guid
for Ma in Pa.ModuleAutoGenList:
for Pcd in Ma.FixedVoidTypePcds:
PcdValue = Ma.FixedVoidTypePcds[Pcd]
if len(PcdValue.split(',')) == 16:
GuidValue = GuidStructureByteArrayToGuidString(PcdValue)
self._GuidDb[GuidValue.upper()] = Pcd
##
# Parse the binary dependency expression files.
#
# This function parses the binary dependency expression file and translate it
# to the instruction list.
#
# @param self The object pointer
# @param DepexFileName The file name of binary dependency expression file.
#
def ParseDepexFile(self, DepexFileName):
DepexFile = open(DepexFileName, "rb")
DepexStatement = []
OpCode = DepexFile.read(1)
while OpCode:
Statement = gOpCodeList[struct.unpack("B", OpCode)[0]]
if Statement in ["BEFORE", "AFTER", "PUSH"]:
GuidValue = "%08X-%04X-%04X-%02X%02X-%02X%02X%02X%02X%02X%02X" % \
struct.unpack(PACK_PATTERN_GUID, DepexFile.read(16))
GuidString = self._GuidDb.get(GuidValue, GuidValue)
Statement = "%s %s" % (Statement, GuidString)
DepexStatement.append(Statement)
OpCode = DepexFile.read(1)
return DepexStatement
##
# Reports library information
#
# This class reports the module library subsection in the build report file.
#
class LibraryReport(object):
##
# Constructor function for class LibraryReport
#
# This constructor function generates LibraryReport object for
# a module.
#
# @param self The object pointer
# @param M Module context information
#
def __init__(self, M):
self.LibraryList = []
if int(str(M.AutoGenVersion), 0) >= 0x00010005:
self._EdkIIModule = True
else:
self._EdkIIModule = False
for Lib in M.DependentLibraryList:
LibInfPath = str(Lib)
LibClassList = Lib.LibraryClass[0].LibraryClass
LibConstructorList = Lib.ConstructorList
LibDesstructorList = Lib.DestructorList
LibDepexList = Lib.DepexExpression[M.Arch, M.ModuleType]
for LibAutoGen in M.LibraryAutoGenList:
if LibInfPath == LibAutoGen.MetaFile.Path:
LibTime = LibAutoGen.BuildTime
break
self.LibraryList.append((LibInfPath, LibClassList, LibConstructorList, LibDesstructorList, LibDepexList, LibTime))
##
# Generate report for module library information
#
# This function generates report for the module library.
# If the module is EDKII style one, the additional library class, library
# constructor/destructor and dependency expression may also be reported.
#
# @param self The object pointer
# @param File The file object for report
#
def GenerateReport(self, File):
if len(self.LibraryList) > 0:
FileWrite(File, gSubSectionStart)
FileWrite(File, TAB_BRG_LIBRARY)
FileWrite(File, gSubSectionSep)
for LibraryItem in self.LibraryList:
LibInfPath = LibraryItem[0]
FileWrite(File, LibInfPath)
#
# Report library class, library constructor and destructor for
# EDKII style module.
#
if self._EdkIIModule:
LibClass = LibraryItem[1]
EdkIILibInfo = ""
LibConstructor = " ".join(LibraryItem[2])
if LibConstructor:
EdkIILibInfo += " C = " + LibConstructor
LibDestructor = " ".join(LibraryItem[3])
if LibDestructor:
EdkIILibInfo += " D = " + LibDestructor
LibDepex = " ".join(LibraryItem[4])
if LibDepex:
EdkIILibInfo += " Depex = " + LibDepex
if LibraryItem[5]:
EdkIILibInfo += " Time = " + LibraryItem[5]
if EdkIILibInfo:
FileWrite(File, "{%s: %s}" % (LibClass, EdkIILibInfo))
else:
FileWrite(File, "{%s}" % LibClass)
FileWrite(File, gSubSectionEnd)
##
# Reports dependency expression information
#
# This class reports the module dependency expression subsection in the build report file.
#
class DepexReport(object):
##
# Constructor function for class DepexReport
#
# This constructor function generates DepexReport object for
# a module. If the module source contains the DXS file (usually EDK
# style module), it uses the dependency in DXS file; otherwise,
# it uses the dependency expression from its own INF [Depex] section
# and then merges with the ones from its dependent library INF.
#
# @param self The object pointer
# @param M Module context information
#
def __init__(self, M):
self.Depex = ""
self._DepexFileName = os.path.join(M.BuildDir, "OUTPUT", M.Module.BaseName + ".depex")
ModuleType = M.ModuleType
if not ModuleType:
ModuleType = COMPONENT_TO_MODULE_MAP_DICT.get(M.ComponentType, "")
if ModuleType in [SUP_MODULE_SEC, SUP_MODULE_PEI_CORE, SUP_MODULE_DXE_CORE, SUP_MODULE_SMM_CORE, SUP_MODULE_MM_CORE_STANDALONE, SUP_MODULE_UEFI_APPLICATION]:
return
for Source in M.SourceFileList:
if os.path.splitext(Source.Path)[1].lower() == ".dxs":
Match = gDxsDependencyPattern.search(open(Source.Path).read())
if Match:
self.Depex = Match.group(1).strip()
self.Source = "DXS"
break
else:
self.Depex = M.DepexExpressionDict.get(M.ModuleType, "")
self.ModuleDepex = " ".join(M.Module.DepexExpression[M.Arch, M.ModuleType])
if not self.ModuleDepex:
self.ModuleDepex = "(None)"
LibDepexList = []
for Lib in M.DependentLibraryList:
LibDepex = " ".join(Lib.DepexExpression[M.Arch, M.ModuleType]).strip()
if LibDepex != "":
LibDepexList.append("(" + LibDepex + ")")
self.LibraryDepex = " AND ".join(LibDepexList)
if not self.LibraryDepex:
self.LibraryDepex = "(None)"
self.Source = "INF"
##
# Generate report for module dependency expression information
#
# This function generates report for the module dependency expression.
#
# @param self The object pointer
# @param File The file object for report
# @param GlobalDepexParser The platform global Dependency expression parser object
#
def GenerateReport(self, File, GlobalDepexParser):
if not self.Depex:
return
FileWrite(File, gSubSectionStart)
if os.path.isfile(self._DepexFileName):
try:
DepexStatements = GlobalDepexParser.ParseDepexFile(self._DepexFileName)
FileWrite(File, "Final Dependency Expression (DEPEX) Instructions")
for DepexStatement in DepexStatements:
FileWrite(File, " %s" % DepexStatement)
FileWrite(File, gSubSectionSep)
except:
EdkLogger.warn(None, "Dependency expression file is corrupted", self._DepexFileName)
FileWrite(File, "Dependency Expression (DEPEX) from %s" % self.Source)
if self.Source == "INF":
FileWrite(File, self.Depex, True)
FileWrite(File, gSubSectionSep)
FileWrite(File, "From Module INF: %s" % self.ModuleDepex, True)
FileWrite(File, "From Library INF: %s" % self.LibraryDepex, True)
else:
FileWrite(File, self.Depex)
FileWrite(File, gSubSectionEnd)
##
# Reports dependency expression information
#
# This class reports the module build flags subsection in the build report file.
#
class BuildFlagsReport(object):
##
# Constructor function for class BuildFlagsReport
#
# This constructor function generates BuildFlagsReport object for
# a module. It reports the build tool chain tag and all relevant
# build flags to build the module.
#
# @param self The object pointer
# @param M Module context information
#
def __init__(self, M):
BuildOptions = {}
#
# Add build flags according to source file extension so that
# irrelevant ones can be filtered out.
#
for Source in M.SourceFileList:
Ext = os.path.splitext(Source.File)[1].lower()
if Ext in [".c", ".cc", ".cpp"]:
BuildOptions["CC"] = 1
elif Ext in [".s", ".asm"]:
BuildOptions["PP"] = 1
BuildOptions["ASM"] = 1
elif Ext in [".vfr"]:
BuildOptions["VFRPP"] = 1
BuildOptions["VFR"] = 1
elif Ext in [".dxs"]:
BuildOptions["APP"] = 1
BuildOptions["CC"] = 1
elif Ext in [".asl"]:
BuildOptions["ASLPP"] = 1
BuildOptions["ASL"] = 1
elif Ext in [".aslc"]:
BuildOptions["ASLCC"] = 1
BuildOptions["ASLDLINK"] = 1
BuildOptions["CC"] = 1
elif Ext in [".asm16"]:
BuildOptions["ASMLINK"] = 1
BuildOptions["SLINK"] = 1
BuildOptions["DLINK"] = 1
#
# Save module build flags.
#
self.ToolChainTag = M.ToolChain
self.BuildFlags = {}
for Tool in BuildOptions:
self.BuildFlags[Tool + "_FLAGS"] = M.BuildOption.get(Tool, {}).get("FLAGS", "")
##
# Generate report for module build flags information
#
# This function generates report for the module build flags expression.
#
# @param self The object pointer
# @param File The file object for report
#
def GenerateReport(self, File):
FileWrite(File, gSubSectionStart)
FileWrite(File, "Build Flags")
FileWrite(File, "Tool Chain Tag: %s" % self.ToolChainTag)
for Tool in self.BuildFlags:
FileWrite(File, gSubSectionSep)
FileWrite(File, "%s = %s" % (Tool, self.BuildFlags[Tool]), True)
FileWrite(File, gSubSectionEnd)
##
# Reports individual module information
#
# This class reports the module section in the build report file.
# It comprises of module summary, module PCD, library, dependency expression,
# build flags sections.
#
class ModuleReport(object):
##
# Constructor function for class ModuleReport
#
# This constructor function generates ModuleReport object for
# a separate module in a platform build.
#
# @param self The object pointer
# @param M Module context information
# @param ReportType The kind of report items in the final report file
#
def __init__(self, M, ReportType):
self.ModuleName = M.Module.BaseName
self.ModuleInfPath = M.MetaFile.File
self.FileGuid = M.Guid
self.Size = 0
self.BuildTimeStamp = None
self.Hash = 0
self.DriverType = ""
if not M.IsLibrary:
ModuleType = M.ModuleType
if not ModuleType:
ModuleType = COMPONENT_TO_MODULE_MAP_DICT.get(M.ComponentType, "")
#
# If a module complies to PI 1.1, promote Module type to "SMM_DRIVER"
#
if ModuleType == SUP_MODULE_DXE_SMM_DRIVER:
PiSpec = M.Module.Specification.get("PI_SPECIFICATION_VERSION", "0x00010000")
if int(PiSpec, 0) >= 0x0001000A:
ModuleType = "SMM_DRIVER"
self.DriverType = gDriverTypeMap.get(ModuleType, "0x2 (FREE_FORM)")
self.UefiSpecVersion = M.Module.Specification.get("UEFI_SPECIFICATION_VERSION", "")
self.PiSpecVersion = M.Module.Specification.get("PI_SPECIFICATION_VERSION", "")
self.PciDeviceId = M.Module.Defines.get("PCI_DEVICE_ID", "")
self.PciVendorId = M.Module.Defines.get("PCI_VENDOR_ID", "")
self.PciClassCode = M.Module.Defines.get("PCI_CLASS_CODE", "")
self.BuildTime = M.BuildTime
self._BuildDir = M.BuildDir
self.ModulePcdSet = {}
if "PCD" in ReportType:
#
# Collect all module used PCD set: module INF referenced directly or indirectly.
# It also saves module INF default values of them in case they exist.
#
for Pcd in M.ModulePcdList + M.LibraryPcdList:
self.ModulePcdSet.setdefault((Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Pcd.Type), (Pcd.InfDefaultValue, Pcd.DefaultValue))
self.LibraryReport = None
if "LIBRARY" in ReportType:
self.LibraryReport = LibraryReport(M)
self.DepexReport = None
if "DEPEX" in ReportType:
self.DepexReport = DepexReport(M)
if "BUILD_FLAGS" in ReportType:
self.BuildFlagsReport = BuildFlagsReport(M)
##
# Generate report for module information
#
# This function generates report for separate module expression
# in a platform build.
#
# @param self The object pointer
# @param File The file object for report
# @param GlobalPcdReport The platform global PCD report object
# @param GlobalPredictionReport The platform global Prediction report object
# @param GlobalDepexParser The platform global Dependency expression parser object
# @param ReportType The kind of report items in the final report file
#
def GenerateReport(self, File, GlobalPcdReport, GlobalPredictionReport, GlobalDepexParser, ReportType):
FileWrite(File, gSectionStart)
FwReportFileName = os.path.join(self._BuildDir, "DEBUG", self.ModuleName + ".txt")
if os.path.isfile(FwReportFileName):
try:
FileContents = open(FwReportFileName).read()
Match = gModuleSizePattern.search(FileContents)
if Match:
self.Size = int(Match.group(1))
Match = gTimeStampPattern.search(FileContents)
if Match:
self.BuildTimeStamp = datetime.fromtimestamp(int(Match.group(1)))
except IOError:
EdkLogger.warn(None, "Fail to read report file", FwReportFileName)
if "HASH" in ReportType:
OutputDir = os.path.join(self._BuildDir, "OUTPUT")
DefaultEFIfile = os.path.join(OutputDir, self.ModuleName + ".efi")
if os.path.isfile(DefaultEFIfile):
Tempfile = os.path.join(OutputDir, self.ModuleName + "_hash.tmp")
# rebase the efi image since its base address may not zero
cmd = ["GenFw", "--rebase", str(0), "-o", Tempfile, DefaultEFIfile]
try:
PopenObject = subprocess.Popen(' '.join(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
except Exception as X:
EdkLogger.error("GenFw", COMMAND_FAILURE, ExtraData="%s: %s" % (str(X), cmd[0]))
EndOfProcedure = threading.Event()
EndOfProcedure.clear()
if PopenObject.stderr:
StdErrThread = threading.Thread(target=ReadMessage, args=(PopenObject.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
PopenObject.wait()
if PopenObject.stderr:
StdErrThread.join()
if PopenObject.returncode != 0:
EdkLogger.error("GenFw", COMMAND_FAILURE, "Failed to generate firmware hash image for %s" % (DefaultEFIfile))
if os.path.isfile(Tempfile):
self.Hash = hashlib.sha1()
buf = open(Tempfile, 'rb').read()
if self.Hash.update(buf):
self.Hash = self.Hash.update(buf)
self.Hash = self.Hash.hexdigest()
os.remove(Tempfile)
FileWrite(File, "Module Summary")
FileWrite(File, "Module Name: %s" % self.ModuleName)
FileWrite(File, "Module INF Path: %s" % self.ModuleInfPath)
FileWrite(File, "File GUID: %s" % self.FileGuid)
if self.Size:
FileWrite(File, "Size: 0x%X (%.2fK)" % (self.Size, self.Size / 1024.0))
if self.Hash:
FileWrite(File, "SHA1 HASH: %s *%s" % (self.Hash, self.ModuleName + ".efi"))
if self.BuildTimeStamp:
FileWrite(File, "Build Time Stamp: %s" % self.BuildTimeStamp)
if self.BuildTime:
FileWrite(File, "Module Build Time: %s" % self.BuildTime)
if self.DriverType:
FileWrite(File, "Driver Type: %s" % self.DriverType)
if self.UefiSpecVersion:
FileWrite(File, "UEFI Spec Version: %s" % self.UefiSpecVersion)
if self.PiSpecVersion:
FileWrite(File, "PI Spec Version: %s" % self.PiSpecVersion)
if self.PciDeviceId:
FileWrite(File, "PCI Device ID: %s" % self.PciDeviceId)
if self.PciVendorId:
FileWrite(File, "PCI Vendor ID: %s" % self.PciVendorId)
if self.PciClassCode:
FileWrite(File, "PCI Class Code: %s" % self.PciClassCode)
FileWrite(File, gSectionSep)
if "PCD" in ReportType:
GlobalPcdReport.GenerateReport(File, self.ModulePcdSet)
if "LIBRARY" in ReportType:
self.LibraryReport.GenerateReport(File)
if "DEPEX" in ReportType:
self.DepexReport.GenerateReport(File, GlobalDepexParser)
if "BUILD_FLAGS" in ReportType:
self.BuildFlagsReport.GenerateReport(File)
if "FIXED_ADDRESS" in ReportType and self.FileGuid:
GlobalPredictionReport.GenerateReport(File, self.FileGuid)
FileWrite(File, gSectionEnd)
def ReadMessage(From, To, ExitFlag):
while True:
# read one line a time
Line = From.readline()
# empty string means "end"
if Line is not None and Line != "":
To(Line.rstrip())
else:
break
if ExitFlag.isSet():
break
##
# Reports platform and module PCD information
#
# This class reports the platform PCD section and module PCD subsection
# in the build report file.
#
class PcdReport(object):
##
# Constructor function for class PcdReport
#
# This constructor function generates PcdReport object a platform build.
# It collects the whole PCD database from platform DSC files, platform
# flash description file and package DEC files.
#
# @param self The object pointer
# @param Wa Workspace context information
#
def __init__(self, Wa):
self.AllPcds = {}
self.UnusedPcds = {}
self.ConditionalPcds = {}
self.MaxLen = 0
self.Arch = None
if Wa.FdfProfile:
self.FdfPcdSet = Wa.FdfProfile.PcdDict
else:
self.FdfPcdSet = {}
self.DefaultStoreSingle = True
self.SkuSingle = True
if GlobalData.gDefaultStores and len(GlobalData.gDefaultStores) > 1:
self.DefaultStoreSingle = False
if GlobalData.gSkuids and len(GlobalData.gSkuids) > 1:
self.SkuSingle = False
self.ModulePcdOverride = {}
for Pa in Wa.AutoGenObjectList:
self.Arch = Pa.Arch
#
# Collect all platform referenced PCDs and grouped them by PCD token space
# GUID C Names
#
for Pcd in Pa.AllPcdList:
PcdList = self.AllPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
if Pcd not in PcdList:
PcdList.append(Pcd)
if len(Pcd.TokenCName) > self.MaxLen:
self.MaxLen = len(Pcd.TokenCName)
#
# Collect the PCD defined in DSC/FDF file, but not used in module
#
UnusedPcdFullList = []
for item in Pa.Platform.Pcds:
Pcd = Pa.Platform.Pcds[item]
if not Pcd.Type:
# check the Pcd in FDF file, whether it is used in module first
for T in PCD_TYPE_LIST:
PcdList = self.AllPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(T, [])
if Pcd in PcdList:
Pcd.Type = T
break
if not Pcd.Type:
PcdTypeFlag = False
for package in Pa.PackageList:
for T in PCD_TYPE_LIST:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName, T) in package.Pcds:
Pcd.Type = T
PcdTypeFlag = True
if not Pcd.DatumType:
Pcd.DatumType = package.Pcds[(Pcd.TokenCName, Pcd.TokenSpaceGuidCName, T)].DatumType
break
if PcdTypeFlag:
break
if not Pcd.DatumType:
PcdType = Pcd.Type
# Try to remove Hii and Vpd suffix
if PcdType.startswith(TAB_PCDS_DYNAMIC_EX):
PcdType = TAB_PCDS_DYNAMIC_EX
elif PcdType.startswith(TAB_PCDS_DYNAMIC):
PcdType = TAB_PCDS_DYNAMIC
for package in Pa.PackageList:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName, PcdType) in package.Pcds:
Pcd.DatumType = package.Pcds[(Pcd.TokenCName, Pcd.TokenSpaceGuidCName, PcdType)].DatumType
break
PcdList = self.AllPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
UnusedPcdList = self.UnusedPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
if Pcd in UnusedPcdList:
UnusedPcdList.remove(Pcd)
if Pcd not in PcdList and Pcd not in UnusedPcdFullList:
UnusedPcdFullList.append(Pcd)
if len(Pcd.TokenCName) > self.MaxLen:
self.MaxLen = len(Pcd.TokenCName)
if GlobalData.gConditionalPcds:
for PcdItem in GlobalData.gConditionalPcds:
if '.' in PcdItem:
(TokenSpaceGuidCName, TokenCName) = PcdItem.split('.')
if (TokenCName, TokenSpaceGuidCName) in Pa.Platform.Pcds:
Pcd = Pa.Platform.Pcds[(TokenCName, TokenSpaceGuidCName)]
PcdList = self.ConditionalPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
if Pcd not in PcdList:
PcdList.append(Pcd)
UnusedPcdList = []
if UnusedPcdFullList:
for Pcd in UnusedPcdFullList:
if Pcd.TokenSpaceGuidCName + '.' + Pcd.TokenCName in GlobalData.gConditionalPcds:
continue
UnusedPcdList.append(Pcd)
for Pcd in UnusedPcdList:
PcdList = self.UnusedPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
if Pcd not in PcdList:
PcdList.append(Pcd)
for Module in Pa.Platform.Modules.values():
#
# Collect module override PCDs
#
for ModulePcd in Module.M.ModulePcdList + Module.M.LibraryPcdList:
TokenCName = ModulePcd.TokenCName
TokenSpaceGuid = ModulePcd.TokenSpaceGuidCName
ModuleDefault = ModulePcd.DefaultValue
ModulePath = os.path.basename(Module.M.MetaFile.File)
self.ModulePcdOverride.setdefault((TokenCName, TokenSpaceGuid), {})[ModulePath] = ModuleDefault
#
# Collect PCD DEC default value.
#
self.DecPcdDefault = {}
self._GuidDict = {}
for Pa in Wa.AutoGenObjectList:
for Package in Pa.PackageList:
Guids = Package.Guids
self._GuidDict.update(Guids)
for (TokenCName, TokenSpaceGuidCName, DecType) in Package.Pcds:
DecDefaultValue = Package.Pcds[TokenCName, TokenSpaceGuidCName, DecType].DefaultValue
self.DecPcdDefault.setdefault((TokenCName, TokenSpaceGuidCName, DecType), DecDefaultValue)
#
# Collect PCDs defined in DSC common section
#
self.DscPcdDefault = {}
for Pa in Wa.AutoGenObjectList:
for (TokenCName, TokenSpaceGuidCName) in Pa.Platform.Pcds:
DscDefaultValue = Pa.Platform.Pcds[(TokenCName, TokenSpaceGuidCName)].DscDefaultValue
if DscDefaultValue:
self.DscPcdDefault[(TokenCName, TokenSpaceGuidCName)] = DscDefaultValue
def GenerateReport(self, File, ModulePcdSet):
if not ModulePcdSet:
if self.ConditionalPcds:
self.GenerateReportDetail(File, ModulePcdSet, 1)
if self.UnusedPcds:
IsEmpty = True
for Token in self.UnusedPcds:
TokenDict = self.UnusedPcds[Token]
for Type in TokenDict:
if TokenDict[Type]:
IsEmpty = False
break
if not IsEmpty:
break
if not IsEmpty:
self.GenerateReportDetail(File, ModulePcdSet, 2)
self.GenerateReportDetail(File, ModulePcdSet)
##
# Generate report for PCD information
#
# This function generates report for separate module expression
# in a platform build.
#
# @param self The object pointer
# @param File The file object for report
# @param ModulePcdSet Set of all PCDs referenced by module or None for
# platform PCD report
# @param ReportySubType 0 means platform/module PCD report, 1 means Conditional
# directives section report, 2 means Unused Pcds section report
# @param DscOverridePcds Module DSC override PCDs set
#
def GenerateReportDetail(self, File, ModulePcdSet, ReportSubType = 0):
PcdDict = self.AllPcds
if ReportSubType == 1:
PcdDict = self.ConditionalPcds
elif ReportSubType == 2:
PcdDict = self.UnusedPcds
if not ModulePcdSet:
FileWrite(File, gSectionStart)
if ReportSubType == 1:
FileWrite(File, "Conditional Directives used by the build system")
elif ReportSubType == 2:
FileWrite(File, "PCDs not used by modules or in conditional directives")
else:
FileWrite(File, "Platform Configuration Database Report")
FileWrite(File, " *B - PCD override in the build option")
FileWrite(File, " *P - Platform scoped PCD override in DSC file")
FileWrite(File, " *F - Platform scoped PCD override in FDF file")
if not ReportSubType:
FileWrite(File, " *M - Module scoped PCD override")
FileWrite(File, gSectionSep)
else:
if not ReportSubType and ModulePcdSet:
#
# For module PCD sub-section
#
FileWrite(File, gSubSectionStart)
FileWrite(File, TAB_BRG_PCD)
FileWrite(File, gSubSectionSep)
AllPcdDict = {}
for Key in PcdDict:
AllPcdDict[Key] = {}
for Type in PcdDict[Key]:
for Pcd in PcdDict[Key][Type]:
AllPcdDict[Key][(Pcd.TokenCName, Type)] = Pcd
for Key in sorted(AllPcdDict):
#
# Group PCD by their token space GUID C Name
#
First = True
for PcdTokenCName, Type in sorted(AllPcdDict[Key]):
#
# Group PCD by their usage type
#
Pcd = AllPcdDict[Key][(PcdTokenCName, Type)]
TypeName, DecType = gPcdTypeMap.get(Type, ("", Type))
MixedPcdFlag = False
if GlobalData.MixedPcd:
for PcdKey in GlobalData.MixedPcd:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName) in GlobalData.MixedPcd[PcdKey]:
PcdTokenCName = PcdKey[0]
MixedPcdFlag = True
if MixedPcdFlag and not ModulePcdSet:
continue
#
# Get PCD default value and their override relationship
#
DecDefaultValue = self.DecPcdDefault.get((Pcd.TokenCName, Pcd.TokenSpaceGuidCName, DecType))
DscDefaultValue = self.DscPcdDefault.get((Pcd.TokenCName, Pcd.TokenSpaceGuidCName))
DscDefaultValBak = DscDefaultValue
Field = ''
for (CName, Guid, Field) in self.FdfPcdSet:
if CName == PcdTokenCName and Guid == Key:
DscDefaultValue = self.FdfPcdSet[(CName, Guid, Field)]
break
if DscDefaultValue != DscDefaultValBak:
try:
DscDefaultValue = ValueExpressionEx(DscDefaultValue, Pcd.DatumType, self._GuidDict)(True)
except BadExpression as DscDefaultValue:
EdkLogger.error('BuildReport', FORMAT_INVALID, "PCD Value: %s, Type: %s" %(DscDefaultValue, Pcd.DatumType))
InfDefaultValue = None
PcdValue = DecDefaultValue
if DscDefaultValue:
PcdValue = DscDefaultValue
#The DefaultValue of StructurePcd already be the latest, no need to update.
if not self.IsStructurePcd(Pcd.TokenCName, Pcd.TokenSpaceGuidCName):
Pcd.DefaultValue = PcdValue
if ModulePcdSet is not None:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Type) not in ModulePcdSet:
continue
InfDefaultValue, PcdValue = ModulePcdSet[Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Type]
#The DefaultValue of StructurePcd already be the latest, no need to update.
if not self.IsStructurePcd(Pcd.TokenCName, Pcd.TokenSpaceGuidCName):
Pcd.DefaultValue = PcdValue
if InfDefaultValue:
try:
InfDefaultValue = ValueExpressionEx(InfDefaultValue, Pcd.DatumType, self._GuidDict)(True)
except BadExpression as InfDefaultValue:
EdkLogger.error('BuildReport', FORMAT_INVALID, "PCD Value: %s, Type: %s" % (InfDefaultValue, Pcd.DatumType))
if InfDefaultValue == "":
InfDefaultValue = None
BuildOptionMatch = False
if GlobalData.BuildOptionPcd:
for pcd in GlobalData.BuildOptionPcd:
if (Pcd.TokenSpaceGuidCName, Pcd.TokenCName) == (pcd[0], pcd[1]):
if pcd[2]:
continue
PcdValue = pcd[3]
#The DefaultValue of StructurePcd already be the latest, no need to update.
if not self.IsStructurePcd(Pcd.TokenCName, Pcd.TokenSpaceGuidCName):
Pcd.DefaultValue = PcdValue
BuildOptionMatch = True
break
if First:
if ModulePcdSet is None:
FileWrite(File, "")
FileWrite(File, Key)
First = False
if Pcd.DatumType in TAB_PCD_NUMERIC_TYPES:
PcdValueNumber = int(PcdValue.strip(), 0)
if DecDefaultValue is None:
DecMatch = True
else:
DecDefaultValueNumber = int(DecDefaultValue.strip(), 0)
DecMatch = (DecDefaultValueNumber == PcdValueNumber)
if InfDefaultValue is None:
InfMatch = True
else:
InfDefaultValueNumber = int(InfDefaultValue.strip(), 0)
InfMatch = (InfDefaultValueNumber == PcdValueNumber)
if DscDefaultValue is None:
DscMatch = True
else:
DscDefaultValueNumber = int(DscDefaultValue.strip(), 0)
DscMatch = (DscDefaultValueNumber == PcdValueNumber)
else:
if DecDefaultValue is None:
DecMatch = True
else:
DecMatch = (DecDefaultValue.strip() == PcdValue.strip())
if InfDefaultValue is None:
InfMatch = True
else:
InfMatch = (InfDefaultValue.strip() == PcdValue.strip())
if DscDefaultValue is None:
DscMatch = True
else:
DscMatch = (DscDefaultValue.strip() == PcdValue.strip())
IsStructure = False
if self.IsStructurePcd(Pcd.TokenCName, Pcd.TokenSpaceGuidCName):
IsStructure = True
if TypeName in ('DYNVPD', 'DEXVPD'):
SkuInfoList = Pcd.SkuInfoList
Pcd = GlobalData.gStructurePcd[self.Arch][(Pcd.TokenCName, Pcd.TokenSpaceGuidCName)]
Pcd.DatumType = Pcd.StructName
if TypeName in ('DYNVPD', 'DEXVPD'):
Pcd.SkuInfoList = SkuInfoList
if Pcd.PcdValueFromComm or Pcd.PcdFieldValueFromComm:
BuildOptionMatch = True
DecMatch = False
elif Pcd.PcdValueFromFdf or Pcd.PcdFieldValueFromFdf:
DscDefaultValue = True
DscMatch = True
DecMatch = False
elif Pcd.SkuOverrideValues:
DscOverride = False
if Pcd.DefaultFromDSC:
DscOverride = True
else:
DictLen = 0
for item in Pcd.SkuOverrideValues:
DictLen += len(Pcd.SkuOverrideValues[item])
if not DictLen:
DscOverride = False
else:
if not Pcd.SkuInfoList:
OverrideValues = Pcd.SkuOverrideValues
if OverrideValues:
for Data in OverrideValues.values():
Struct = list(Data.values())
if Struct:
DscOverride = self.ParseStruct(Struct[0])
break
else:
SkuList = sorted(Pcd.SkuInfoList.keys())
for Sku in SkuList:
SkuInfo = Pcd.SkuInfoList[Sku]
if TypeName in ('DYNHII', 'DEXHII'):
if SkuInfo.DefaultStoreDict:
DefaultStoreList = sorted(SkuInfo.DefaultStoreDict.keys())
for DefaultStore in DefaultStoreList:
OverrideValues = Pcd.SkuOverrideValues[Sku]
DscOverride = self.ParseStruct(OverrideValues[DefaultStore])
if DscOverride:
break
else:
OverrideValues = Pcd.SkuOverrideValues[Sku]
if OverrideValues:
Keys = list(OverrideValues.keys())
OverrideFieldStruct = self.OverrideFieldValue(Pcd, OverrideValues[Keys[0]])
DscOverride = self.ParseStruct(OverrideFieldStruct)
if DscOverride:
break
if DscOverride:
DscDefaultValue = True
DscMatch = True
DecMatch = False
else:
DecMatch = True
else:
DscDefaultValue = True
DscMatch = True
DecMatch = False
#
# Report PCD item according to their override relationship
#
if Pcd.DatumType == 'BOOLEAN':
if DscDefaultValue:
DscDefaultValue = str(int(DscDefaultValue, 0))
if DecDefaultValue:
DecDefaultValue = str(int(DecDefaultValue, 0))
if InfDefaultValue:
InfDefaultValue = str(int(InfDefaultValue, 0))
if Pcd.DefaultValue:
Pcd.DefaultValue = str(int(Pcd.DefaultValue, 0))
if DecMatch:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, ' ')
elif InfDefaultValue and InfMatch:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*M')
elif BuildOptionMatch:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*B')
else:
if DscDefaultValue and DscMatch:
if (Pcd.TokenCName, Key, Field) in self.FdfPcdSet:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*F')
else:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*P')
else:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*M')
if ModulePcdSet is None:
if IsStructure:
continue
if not TypeName in ('PATCH', 'FLAG', 'FIXED'):
continue
if not BuildOptionMatch:
ModuleOverride = self.ModulePcdOverride.get((Pcd.TokenCName, Pcd.TokenSpaceGuidCName), {})
for ModulePath in ModuleOverride:
ModuleDefault = ModuleOverride[ModulePath]
if Pcd.DatumType in TAB_PCD_NUMERIC_TYPES:
ModulePcdDefaultValueNumber = int(ModuleDefault.strip(), 0)
Match = (ModulePcdDefaultValueNumber == PcdValueNumber)
if Pcd.DatumType == 'BOOLEAN':
ModuleDefault = str(ModulePcdDefaultValueNumber)
else:
Match = (ModuleDefault.strip() == PcdValue.strip())
if Match:
continue
IsByteArray, ArrayList = ByteArrayForamt(ModuleDefault.strip())
if IsByteArray:
FileWrite(File, ' *M %-*s = %s' % (self.MaxLen + 15, ModulePath, '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
Value = ModuleDefault.strip()
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' *M %-*s = %s' % (self.MaxLen + 15, ModulePath, Value))
if ModulePcdSet is None:
FileWrite(File, gSectionEnd)
else:
if not ReportSubType and ModulePcdSet:
FileWrite(File, gSubSectionEnd)
def ParseStruct(self, struct):
HasDscOverride = False
if struct:
for _, Values in struct.items():
if Values[1] and Values[1].endswith('.dsc'):
HasDscOverride = True
break
return HasDscOverride
def PrintPcdDefault(self, File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue):
if not DscMatch and DscDefaultValue is not None:
Value = DscDefaultValue.strip()
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DSC DEFAULT', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DSC DEFAULT', Value))
if not InfMatch and InfDefaultValue is not None:
Value = InfDefaultValue.strip()
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'INF DEFAULT', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'INF DEFAULT', Value))
if not DecMatch and DecDefaultValue is not None:
Value = DecDefaultValue.strip()
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DEC DEFAULT', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DEC DEFAULT', Value))
if IsStructure:
self.PrintStructureInfo(File, Pcd.DefaultValues)
if DecMatch and IsStructure:
self.PrintStructureInfo(File, Pcd.DefaultValues)
def PrintPcdValue(self, File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, Flag = ' '):
if not Pcd.SkuInfoList:
Value = Pcd.DefaultValue
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', Value))
if IsStructure:
FiledOverrideFlag = False
OverrideValues = Pcd.SkuOverrideValues
if OverrideValues:
for Data in OverrideValues.values():
Struct = list(Data.values())
if Struct:
OverrideFieldStruct = self.OverrideFieldValue(Pcd, Struct[0])
self.PrintStructureInfo(File, OverrideFieldStruct)
FiledOverrideFlag = True
break
if not FiledOverrideFlag and (Pcd.PcdFieldValueFromComm or Pcd.PcdFieldValueFromFdf):
OverrideFieldStruct = self.OverrideFieldValue(Pcd, {})
self.PrintStructureInfo(File, OverrideFieldStruct)
self.PrintPcdDefault(File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue)
else:
FirstPrint = True
SkuList = sorted(Pcd.SkuInfoList.keys())
for Sku in SkuList:
SkuInfo = Pcd.SkuInfoList[Sku]
SkuIdName = SkuInfo.SkuIdName
if TypeName in ('DYNHII', 'DEXHII'):
if SkuInfo.DefaultStoreDict:
DefaultStoreList = sorted(SkuInfo.DefaultStoreDict.keys())
for DefaultStore in DefaultStoreList:
Value = SkuInfo.DefaultStoreDict[DefaultStore]
IsByteArray, ArrayList = ByteArrayForamt(Value)
if Pcd.DatumType == 'BOOLEAN':
Value = str(int(Value, 0))
if FirstPrint:
FirstPrint = False
if IsByteArray:
if self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '{'))
elif self.DefaultStoreSingle and not self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '{'))
elif not self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', '{'))
else:
FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
if self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', Value))
elif self.DefaultStoreSingle and not self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value))
elif not self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', Value))
else:
FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', Value))
else:
if IsByteArray:
if self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '{'))
elif self.DefaultStoreSingle and not self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '{'))
elif not self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', '{'))
else:
FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
if self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', Value))
elif self.DefaultStoreSingle and not self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value))
elif not self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', Value))
else:
FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', Value))
FileWrite(File, '%*s: %s: %s' % (self.MaxLen + 4, SkuInfo.VariableGuid, SkuInfo.VariableName, SkuInfo.VariableOffset))
if IsStructure:
OverrideValues = Pcd.SkuOverrideValues[Sku]
OverrideFieldStruct = self.OverrideFieldValue(Pcd, OverrideValues[DefaultStore])
self.PrintStructureInfo(File, OverrideFieldStruct)
self.PrintPcdDefault(File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue)
else:
Value = SkuInfo.DefaultValue
IsByteArray, ArrayList = ByteArrayForamt(Value)
if Pcd.DatumType == 'BOOLEAN':
Value = str(int(Value, 0))
if FirstPrint:
FirstPrint = False
if IsByteArray:
if self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', "{"))
else:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
if self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', Value))
else:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value))
else:
if IsByteArray:
if self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', "{"))
else:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
if self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', Value))
else:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value))
if TypeName in ('DYNVPD', 'DEXVPD'):
FileWrite(File, '%*s' % (self.MaxLen + 4, SkuInfo.VpdOffset))
VPDPcdItem = (Pcd.TokenSpaceGuidCName + '.' + PcdTokenCName, SkuIdName, SkuInfo.VpdOffset, Pcd.MaxDatumSize, SkuInfo.DefaultValue)
if VPDPcdItem not in VPDPcdList:
VPDPcdList.append(VPDPcdItem)
if IsStructure:
FiledOverrideFlag = False
OverrideValues = Pcd.SkuOverrideValues[Sku]
if OverrideValues:
Keys = OverrideValues.keys()
OverrideFieldStruct = self.OverrideFieldValue(Pcd, OverrideValues[Keys[0]])
self.PrintStructureInfo(File, OverrideFieldStruct)
FiledOverrideFlag = True
if not FiledOverrideFlag and (Pcd.PcdFieldValueFromComm or Pcd.PcdFieldValueFromFdf):
OverrideFieldStruct = self.OverrideFieldValue(Pcd, {})
self.PrintStructureInfo(File, OverrideFieldStruct)
self.PrintPcdDefault(File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue)
def OverrideFieldValue(self, Pcd, OverrideStruct):
OverrideFieldStruct = collections.OrderedDict()
if OverrideStruct:
for Key, Values in OverrideStruct.items():
if Values[1] and Values[1].endswith('.dsc'):
OverrideFieldStruct[Key] = Values
if Pcd.PcdFieldValueFromFdf:
for Key, Values in Pcd.PcdFieldValueFromFdf.items():
if Key in OverrideFieldStruct and Values[0] == OverrideFieldStruct[Key][0]:
continue
OverrideFieldStruct[Key] = Values
if Pcd.PcdFieldValueFromComm:
for Key, Values in Pcd.PcdFieldValueFromComm.items():
if Key in OverrideFieldStruct and Values[0] == OverrideFieldStruct[Key][0]:
continue
OverrideFieldStruct[Key] = Values
return OverrideFieldStruct
def PrintStructureInfo(self, File, Struct):
for Key, Value in sorted(Struct.items(), key=lambda x: x[0]):
if Value[1] and 'build command options' in Value[1]:
FileWrite(File, ' *B %-*s = %s' % (self.MaxLen + 4, '.' + Key, Value[0]))
elif Value[1] and Value[1].endswith('.fdf'):
FileWrite(File, ' *F %-*s = %s' % (self.MaxLen + 4, '.' + Key, Value[0]))
else:
FileWrite(File, ' %-*s = %s' % (self.MaxLen + 4, '.' + Key, Value[0]))
def StrtoHex(self, value):
try:
value = hex(int(value))
return value
except:
if value.startswith("L\"") and value.endswith("\""):
valuelist = []
for ch in value[2:-1]:
valuelist.append(hex(ord(ch)))
valuelist.append('0x00')
return valuelist
elif value.startswith("\"") and value.endswith("\""):
return hex(ord(value[1:-1]))
elif value.startswith("{") and value.endswith("}"):
valuelist = []
if ',' not in value:
return value[1:-1]
for ch in value[1:-1].split(','):
ch = ch.strip()
if ch.startswith('0x') or ch.startswith('0X'):
valuelist.append(ch)
continue
try:
valuelist.append(hex(int(ch.strip())))
except:
pass
return valuelist
else:
return value
def IsStructurePcd(self, PcdToken, PcdTokenSpaceGuid):
if GlobalData.gStructurePcd and (self.Arch in GlobalData.gStructurePcd) and ((PcdToken, PcdTokenSpaceGuid) in GlobalData.gStructurePcd[self.Arch]):
return True
else:
return False
##
# Reports platform and module Prediction information
#
# This class reports the platform execution order prediction section and
# module load fixed address prediction subsection in the build report file.
#
class PredictionReport(object):
##
# Constructor function for class PredictionReport
#
# This constructor function generates PredictionReport object for the platform.
#
# @param self: The object pointer
# @param Wa Workspace context information
#
def __init__(self, Wa):
self._MapFileName = os.path.join(Wa.BuildDir, Wa.Name + ".map")
self._MapFileParsed = False
self._EotToolInvoked = False
self._FvDir = Wa.FvDir
self._EotDir = Wa.BuildDir
self._FfsEntryPoint = {}
self._GuidMap = {}
self._SourceList = []
self.FixedMapDict = {}
self.ItemList = []
self.MaxLen = 0
#
# Collect all platform reference source files and GUID C Name
#
for Pa in Wa.AutoGenObjectList:
for Module in Pa.LibraryAutoGenList + Pa.ModuleAutoGenList:
#
# BASE typed modules are EFI agnostic, so we need not scan
# their source code to find PPI/Protocol produce or consume
# information.
#
if Module.ModuleType == SUP_MODULE_BASE:
continue
#
# Add module referenced source files
#
self._SourceList.append(str(Module))
IncludeList = {}
for Source in Module.SourceFileList:
if os.path.splitext(str(Source))[1].lower() == ".c":
self._SourceList.append(" " + str(Source))
FindIncludeFiles(Source.Path, Module.IncludePathList, IncludeList)
for IncludeFile in IncludeList.values():
self._SourceList.append(" " + IncludeFile)
for Guid in Module.PpiList:
self._GuidMap[Guid] = GuidStructureStringToGuidString(Module.PpiList[Guid])
for Guid in Module.ProtocolList:
self._GuidMap[Guid] = GuidStructureStringToGuidString(Module.ProtocolList[Guid])
for Guid in Module.GuidList:
self._GuidMap[Guid] = GuidStructureStringToGuidString(Module.GuidList[Guid])
if Module.Guid and not Module.IsLibrary:
EntryPoint = " ".join(Module.Module.ModuleEntryPointList)
if int(str(Module.AutoGenVersion), 0) >= 0x00010005:
RealEntryPoint = "_ModuleEntryPoint"
else:
RealEntryPoint = EntryPoint
if EntryPoint == "_ModuleEntryPoint":
CCFlags = Module.BuildOption.get("CC", {}).get("FLAGS", "")
Match = gGlueLibEntryPoint.search(CCFlags)
if Match:
EntryPoint = Match.group(1)
self._FfsEntryPoint[Module.Guid.upper()] = (EntryPoint, RealEntryPoint)
#
# Collect platform firmware volume list as the input of EOT.
#
self._FvList = []
if Wa.FdfProfile:
for Fd in Wa.FdfProfile.FdDict:
for FdRegion in Wa.FdfProfile.FdDict[Fd].RegionList:
if FdRegion.RegionType != BINARY_FILE_TYPE_FV:
continue
for FvName in FdRegion.RegionDataList:
if FvName in self._FvList:
continue
self._FvList.append(FvName)
for Ffs in Wa.FdfProfile.FvDict[FvName.upper()].FfsList:
for Section in Ffs.SectionList:
try:
for FvSection in Section.SectionList:
if FvSection.FvName in self._FvList:
continue
self._FvList.append(FvSection.FvName)
except AttributeError:
pass
##
# Parse platform fixed address map files
#
# This function parses the platform final fixed address map file to get
# the database of predicted fixed address for module image base, entry point
# etc.
#
# @param self: The object pointer
#
def _ParseMapFile(self):
if self._MapFileParsed:
return
self._MapFileParsed = True
if os.path.isfile(self._MapFileName):
try:
FileContents = open(self._MapFileName).read()
for Match in gMapFileItemPattern.finditer(FileContents):
AddressType = Match.group(1)
BaseAddress = Match.group(2)
EntryPoint = Match.group(3)
Guid = Match.group(4).upper()
List = self.FixedMapDict.setdefault(Guid, [])
List.append((AddressType, BaseAddress, "*I"))
List.append((AddressType, EntryPoint, "*E"))
except:
EdkLogger.warn(None, "Cannot open file to read", self._MapFileName)
##
# Invokes EOT tool to get the predicted the execution order.
#
# This function invokes EOT tool to calculate the predicted dispatch order
#
# @param self: The object pointer
#
def _InvokeEotTool(self):
if self._EotToolInvoked:
return
self._EotToolInvoked = True
FvFileList = []
for FvName in self._FvList:
FvFile = os.path.join(self._FvDir, FvName + ".Fv")
if os.path.isfile(FvFile):
FvFileList.append(FvFile)
if len(FvFileList) == 0:
return
#
# Write source file list and GUID file list to an intermediate file
# as the input for EOT tool and dispatch List as the output file
# from EOT tool.
#
SourceList = os.path.join(self._EotDir, "SourceFile.txt")
GuidList = os.path.join(self._EotDir, "GuidList.txt")
DispatchList = os.path.join(self._EotDir, "Dispatch.txt")
TempFile = open(SourceList, "w+")
for Item in self._SourceList:
FileWrite(TempFile, Item)
TempFile.close()
TempFile = open(GuidList, "w+")
for Key in self._GuidMap:
FileWrite(TempFile, "%s %s" % (Key, self._GuidMap[Key]))
TempFile.close()
try:
from Eot.EotMain import Eot
#
# Invoke EOT tool and echo its runtime performance
#
EotStartTime = time.time()
Eot(CommandLineOption=False, SourceFileList=SourceList, GuidList=GuidList,
FvFileList=' '.join(FvFileList), Dispatch=DispatchList, IsInit=True)
EotEndTime = time.time()
EotDuration = time.strftime("%H:%M:%S", time.gmtime(int(round(EotEndTime - EotStartTime))))
EdkLogger.quiet("EOT run time: %s\n" % EotDuration)
#
# Parse the output of EOT tool
#
for Line in open(DispatchList):
if len(Line.split()) < 4:
continue
(Guid, Phase, FfsName, FilePath) = Line.split()
Symbol = self._FfsEntryPoint.get(Guid, [FfsName, ""])[0]
if len(Symbol) > self.MaxLen:
self.MaxLen = len(Symbol)
self.ItemList.append((Phase, Symbol, FilePath))
except:
EdkLogger.quiet("(Python %s on %s\n%s)" % (platform.python_version(), sys.platform, traceback.format_exc()))
EdkLogger.warn(None, "Failed to generate execution order prediction report, for some error occurred in executing EOT.")
##
# Generate platform execution order report
#
# This function generates the predicted module execution order.
#
# @param self The object pointer
# @param File The file object for report
#
def _GenerateExecutionOrderReport(self, File):
self._InvokeEotTool()
if len(self.ItemList) == 0:
return
FileWrite(File, gSectionStart)
FileWrite(File, "Execution Order Prediction")
FileWrite(File, "*P PEI phase")
FileWrite(File, "*D DXE phase")
FileWrite(File, "*E Module INF entry point name")
FileWrite(File, "*N Module notification function name")
FileWrite(File, "Type %-*s %s" % (self.MaxLen, "Symbol", "Module INF Path"))
FileWrite(File, gSectionSep)
for Item in self.ItemList:
FileWrite(File, "*%sE %-*s %s" % (Item[0], self.MaxLen, Item[1], Item[2]))
FileWrite(File, gSectionStart)
##
# Generate Fixed Address report.
#
# This function generate the predicted fixed address report for a module
# specified by Guid.
#
# @param self The object pointer
# @param File The file object for report
# @param Guid The module Guid value.
# @param NotifyList The list of all notify function in a module
#
def _GenerateFixedAddressReport(self, File, Guid, NotifyList):
self._ParseMapFile()
FixedAddressList = self.FixedMapDict.get(Guid)
if not FixedAddressList:
return
FileWrite(File, gSubSectionStart)
FileWrite(File, "Fixed Address Prediction")
FileWrite(File, "*I Image Loading Address")
FileWrite(File, "*E Entry Point Address")
FileWrite(File, "*N Notification Function Address")
FileWrite(File, "*F Flash Address")
FileWrite(File, "*M Memory Address")
FileWrite(File, "*S SMM RAM Offset")
FileWrite(File, "TOM Top of Memory")
FileWrite(File, "Type Address Name")
FileWrite(File, gSubSectionSep)
for Item in FixedAddressList:
Type = Item[0]
Value = Item[1]
Symbol = Item[2]
if Symbol == "*I":
Name = "(Image Base)"
elif Symbol == "*E":
Name = self._FfsEntryPoint.get(Guid, ["", "_ModuleEntryPoint"])[1]
elif Symbol in NotifyList:
Name = Symbol
Symbol = "*N"
else:
continue
if "Flash" in Type:
Symbol += "F"
elif "Memory" in Type:
Symbol += "M"
else:
Symbol += "S"
if Value[0] == "-":
Value = "TOM" + Value
FileWrite(File, "%s %-16s %s" % (Symbol, Value, Name))
##
# Generate report for the prediction part
#
# This function generate the predicted fixed address report for a module or
# predicted module execution order for a platform.
# If the input Guid is None, then, it generates the predicted module execution order;
# otherwise it generated the module fixed loading address for the module specified by
# Guid.
#
# @param self The object pointer
# @param File The file object for report
# @param Guid The module Guid value.
#
def GenerateReport(self, File, Guid):
if Guid:
self._GenerateFixedAddressReport(File, Guid.upper(), [])
else:
self._GenerateExecutionOrderReport(File)
##
# Reports FD region information
#
# This class reports the FD subsection in the build report file.
# It collects region information of platform flash device.
# If the region is a firmware volume, it lists the set of modules
# and its space information; otherwise, it only lists its region name,
# base address and size in its sub-section header.
# If there are nesting FVs, the nested FVs will list immediate after
# this FD region subsection
#
class FdRegionReport(object):
##
# Discover all the nested FV name list.
#
# This is an internal worker function to discover the all the nested FV information
# in the parent firmware volume. It uses deep first search algorithm recursively to
# find all the FV list name and append them to the list.
#
# @param self The object pointer
# @param FvName The name of current firmware file system
# @param Wa Workspace context information
#
def _DiscoverNestedFvList(self, FvName, Wa):
FvDictKey=FvName.upper()
if FvDictKey in Wa.FdfProfile.FvDict:
for Ffs in Wa.FdfProfile.FvDict[FvName.upper()].FfsList:
for Section in Ffs.SectionList:
try:
for FvSection in Section.SectionList:
if FvSection.FvName in self.FvList:
continue
self._GuidsDb[Ffs.NameGuid.upper()] = FvSection.FvName
self.FvList.append(FvSection.FvName)
self.FvInfo[FvSection.FvName] = ("Nested FV", 0, 0)
self._DiscoverNestedFvList(FvSection.FvName, Wa)
except AttributeError:
pass
##
# Constructor function for class FdRegionReport
#
# This constructor function generates FdRegionReport object for a specified FdRegion.
# If the FdRegion is a firmware volume, it will recursively find all its nested Firmware
# volume list. This function also collects GUID map in order to dump module identification
# in the final report.
#
# @param self: The object pointer
# @param FdRegion The current FdRegion object
# @param Wa Workspace context information
#
def __init__(self, FdRegion, Wa):
self.Type = FdRegion.RegionType
self.BaseAddress = FdRegion.Offset
self.Size = FdRegion.Size
self.FvList = []
self.FvInfo = {}
self._GuidsDb = {}
self._FvDir = Wa.FvDir
self._WorkspaceDir = Wa.WorkspaceDir
#
# If the input FdRegion is not a firmware volume,
# we are done.
#
if self.Type != BINARY_FILE_TYPE_FV:
return
#
# Find all nested FVs in the FdRegion
#
for FvName in FdRegion.RegionDataList:
if FvName in self.FvList:
continue
self.FvList.append(FvName)
self.FvInfo[FvName] = ("Fd Region", self.BaseAddress, self.Size)
self._DiscoverNestedFvList(FvName, Wa)
PlatformPcds = {}
#
# Collect PCDs declared in DEC files.
#
for Pa in Wa.AutoGenObjectList:
for Package in Pa.PackageList:
for (TokenCName, TokenSpaceGuidCName, DecType) in Package.Pcds:
DecDefaultValue = Package.Pcds[TokenCName, TokenSpaceGuidCName, DecType].DefaultValue
PlatformPcds[(TokenCName, TokenSpaceGuidCName)] = DecDefaultValue
#
# Collect PCDs defined in DSC file
#
for Pa in Wa.AutoGenObjectList:
for (TokenCName, TokenSpaceGuidCName) in Pa.Platform.Pcds:
DscDefaultValue = Pa.Platform.Pcds[(TokenCName, TokenSpaceGuidCName)].DefaultValue
PlatformPcds[(TokenCName, TokenSpaceGuidCName)] = DscDefaultValue
#
# Add PEI and DXE a priori files GUIDs defined in PI specification.
#
self._GuidsDb[PEI_APRIORI_GUID] = "PEI Apriori"
self._GuidsDb[DXE_APRIORI_GUID] = "DXE Apriori"
#
# Add ACPI table storage file
#
self._GuidsDb["7E374E25-8E01-4FEE-87F2-390C23C606CD"] = "ACPI table storage"
for Pa in Wa.AutoGenObjectList:
for ModuleKey in Pa.Platform.Modules:
M = Pa.Platform.Modules[ModuleKey].M
InfPath = mws.join(Wa.WorkspaceDir, M.MetaFile.File)
self._GuidsDb[M.Guid.upper()] = "%s (%s)" % (M.Module.BaseName, InfPath)
#
# Collect the GUID map in the FV firmware volume
#
for FvName in self.FvList:
FvDictKey=FvName.upper()
if FvDictKey in Wa.FdfProfile.FvDict:
for Ffs in Wa.FdfProfile.FvDict[FvName.upper()].FfsList:
try:
#
# collect GUID map for binary EFI file in FDF file.
#
Guid = Ffs.NameGuid.upper()
Match = gPcdGuidPattern.match(Ffs.NameGuid)
if Match:
PcdTokenspace = Match.group(1)
PcdToken = Match.group(2)
if (PcdToken, PcdTokenspace) in PlatformPcds:
GuidValue = PlatformPcds[(PcdToken, PcdTokenspace)]
Guid = GuidStructureByteArrayToGuidString(GuidValue).upper()
for Section in Ffs.SectionList:
try:
ModuleSectFile = mws.join(Wa.WorkspaceDir, Section.SectFileName)
self._GuidsDb[Guid] = ModuleSectFile
except AttributeError:
pass
except AttributeError:
pass
##
# Internal worker function to generate report for the FD region
#
# This internal worker function to generate report for the FD region.
# It the type is firmware volume, it lists offset and module identification.
#
# @param self The object pointer
# @param File The file object for report
# @param Title The title for the FD subsection
# @param BaseAddress The base address for the FD region
# @param Size The size of the FD region
# @param FvName The FV name if the FD region is a firmware volume
#
def _GenerateReport(self, File, Title, Type, BaseAddress, Size=0, FvName=None):
FileWrite(File, gSubSectionStart)
FileWrite(File, Title)
FileWrite(File, "Type: %s" % Type)
FileWrite(File, "Base Address: 0x%X" % BaseAddress)
if self.Type == BINARY_FILE_TYPE_FV:
FvTotalSize = 0
FvTakenSize = 0
FvFreeSize = 0
if FvName.upper().endswith('.FV'):
FileExt = FvName + ".txt"
else:
FileExt = FvName + ".Fv.txt"
if not os.path.isfile(FileExt):
FvReportFileName = mws.join(self._WorkspaceDir, FileExt)
if not os.path.isfile(FvReportFileName):
FvReportFileName = os.path.join(self._FvDir, FileExt)
try:
#
# Collect size info in the firmware volume.
#
FvReport = open(FvReportFileName).read()
Match = gFvTotalSizePattern.search(FvReport)
if Match:
FvTotalSize = int(Match.group(1), 16)
Match = gFvTakenSizePattern.search(FvReport)
if Match:
FvTakenSize = int(Match.group(1), 16)
FvFreeSize = FvTotalSize - FvTakenSize
#
# Write size information to the report file.
#
FileWrite(File, "Size: 0x%X (%.0fK)" % (FvTotalSize, FvTotalSize / 1024.0))
FileWrite(File, "Fv Name: %s (%.1f%% Full)" % (FvName, FvTakenSize * 100.0 / FvTotalSize))
FileWrite(File, "Occupied Size: 0x%X (%.0fK)" % (FvTakenSize, FvTakenSize / 1024.0))
FileWrite(File, "Free Size: 0x%X (%.0fK)" % (FvFreeSize, FvFreeSize / 1024.0))
FileWrite(File, "Offset Module")
FileWrite(File, gSubSectionSep)
#
# Write module offset and module identification to the report file.
#
OffsetInfo = {}
for Match in gOffsetGuidPattern.finditer(FvReport):
Guid = Match.group(2).upper()
OffsetInfo[Match.group(1)] = self._GuidsDb.get(Guid, Guid)
OffsetList = sorted(OffsetInfo.keys())
for Offset in OffsetList:
FileWrite (File, "%s %s" % (Offset, OffsetInfo[Offset]))
except IOError:
EdkLogger.warn(None, "Fail to read report file", FvReportFileName)
else:
FileWrite(File, "Size: 0x%X (%.0fK)" % (Size, Size / 1024.0))
FileWrite(File, gSubSectionEnd)
##
# Generate report for the FD region
#
# This function generates report for the FD region.
#
# @param self The object pointer
# @param File The file object for report
#
def GenerateReport(self, File):
if (len(self.FvList) > 0):
for FvItem in self.FvList:
Info = self.FvInfo[FvItem]
self._GenerateReport(File, Info[0], TAB_FV_DIRECTORY, Info[1], Info[2], FvItem)
else:
self._GenerateReport(File, "FD Region", self.Type, self.BaseAddress, self.Size)
##
# Reports FD information
#
# This class reports the FD section in the build report file.
# It collects flash device information for a platform.
#
class FdReport(object):
##
# Constructor function for class FdReport
#
# This constructor function generates FdReport object for a specified
# firmware device.
#
# @param self The object pointer
# @param Fd The current Firmware device object
# @param Wa Workspace context information
#
def __init__(self, Fd, Wa):
self.FdName = Fd.FdUiName
self.BaseAddress = Fd.BaseAddress
self.Size = Fd.Size
self.FdRegionList = [FdRegionReport(FdRegion, Wa) for FdRegion in Fd.RegionList]
self.FvPath = os.path.join(Wa.BuildDir, TAB_FV_DIRECTORY)
self.VPDBaseAddress = 0
self.VPDSize = 0
for index, FdRegion in enumerate(Fd.RegionList):
if str(FdRegion.RegionType) is 'FILE' and Wa.Platform.VpdToolGuid in str(FdRegion.RegionDataList):
self.VPDBaseAddress = self.FdRegionList[index].BaseAddress
self.VPDSize = self.FdRegionList[index].Size
break
##
# Generate report for the firmware device.
#
# This function generates report for the firmware device.
#
# @param self The object pointer
# @param File The file object for report
#
def GenerateReport(self, File):
FileWrite(File, gSectionStart)
FileWrite(File, "Firmware Device (FD)")
FileWrite(File, "FD Name: %s" % self.FdName)
FileWrite(File, "Base Address: %s" % self.BaseAddress)
FileWrite(File, "Size: 0x%X (%.0fK)" % (self.Size, self.Size / 1024.0))
if len(self.FdRegionList) > 0:
FileWrite(File, gSectionSep)
for FdRegionItem in self.FdRegionList:
FdRegionItem.GenerateReport(File)
if VPDPcdList:
VPDPcdList.sort(key=lambda x: int(x[2], 0))
FileWrite(File, gSubSectionStart)
FileWrite(File, "FD VPD Region")
FileWrite(File, "Base Address: 0x%X" % self.VPDBaseAddress)
FileWrite(File, "Size: 0x%X (%.0fK)" % (self.VPDSize, self.VPDSize / 1024.0))
FileWrite(File, gSubSectionSep)
for item in VPDPcdList:
# Add BaseAddress for offset
Offset = '0x%08X' % (int(item[2], 16) + self.VPDBaseAddress)
IsByteArray, ArrayList = ByteArrayForamt(item[-1])
Skuinfo = item[1]
if len(GlobalData.gSkuids) == 1 :
Skuinfo = GlobalData.gSkuids[0]
if IsByteArray:
FileWrite(File, "%s | %s | %s | %s | %s" % (item[0], Skuinfo, Offset, item[3], '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
FileWrite(File, "%s | %s | %s | %s | %s" % (item[0], Skuinfo, Offset, item[3], item[-1]))
FileWrite(File, gSubSectionEnd)
FileWrite(File, gSectionEnd)
##
# Reports platform information
#
# This class reports the whole platform information
#
class PlatformReport(object):
##
# Constructor function for class PlatformReport
#
# This constructor function generates PlatformReport object a platform build.
# It generates report for platform summary, flash, global PCDs and detailed
# module information for modules involved in platform build.
#
# @param self The object pointer
# @param Wa Workspace context information
# @param MaList The list of modules in the platform build
#
def __init__(self, Wa, MaList, ReportType):
self._WorkspaceDir = Wa.WorkspaceDir
self.PlatformName = Wa.Name
self.PlatformDscPath = Wa.Platform
self.Architectures = " ".join(Wa.ArchList)
self.ToolChain = Wa.ToolChain
self.Target = Wa.BuildTarget
self.OutputPath = os.path.join(Wa.WorkspaceDir, Wa.OutputDir)
self.BuildEnvironment = platform.platform()
self.PcdReport = None
if "PCD" in ReportType:
self.PcdReport = PcdReport(Wa)
self.FdReportList = []
if "FLASH" in ReportType and Wa.FdfProfile and MaList is None:
for Fd in Wa.FdfProfile.FdDict:
self.FdReportList.append(FdReport(Wa.FdfProfile.FdDict[Fd], Wa))
self.PredictionReport = None
if "FIXED_ADDRESS" in ReportType or "EXECUTION_ORDER" in ReportType:
self.PredictionReport = PredictionReport(Wa)
self.DepexParser = None
if "DEPEX" in ReportType:
self.DepexParser = DepexParser(Wa)
self.ModuleReportList = []
if MaList is not None:
self._IsModuleBuild = True
for Ma in MaList:
self.ModuleReportList.append(ModuleReport(Ma, ReportType))
else:
self._IsModuleBuild = False
for Pa in Wa.AutoGenObjectList:
ModuleAutoGenList = []
for ModuleKey in Pa.Platform.Modules:
ModuleAutoGenList.append(Pa.Platform.Modules[ModuleKey].M)
if GlobalData.gFdfParser is not None:
if Pa.Arch in GlobalData.gFdfParser.Profile.InfDict:
INFList = GlobalData.gFdfParser.Profile.InfDict[Pa.Arch]
for InfName in INFList:
InfClass = PathClass(NormPath(InfName), Wa.WorkspaceDir, Pa.Arch)
Ma = ModuleAutoGen(Wa, InfClass, Pa.BuildTarget, Pa.ToolChain, Pa.Arch, Wa.MetaFile)
if Ma is None:
continue
if Ma not in ModuleAutoGenList:
ModuleAutoGenList.append(Ma)
for MGen in ModuleAutoGenList:
self.ModuleReportList.append(ModuleReport(MGen, ReportType))
##
# Generate report for the whole platform.
#
# This function generates report for platform information.
# It comprises of platform summary, global PCD, flash and
# module list sections.
#
# @param self The object pointer
# @param File The file object for report
# @param BuildDuration The total time to build the modules
# @param AutoGenTime The total time of AutoGen Phase
# @param MakeTime The total time of Make Phase
# @param GenFdsTime The total time of GenFds Phase
# @param ReportType The kind of report items in the final report file
#
def GenerateReport(self, File, BuildDuration, AutoGenTime, MakeTime, GenFdsTime, ReportType):
FileWrite(File, "Platform Summary")
FileWrite(File, "Platform Name: %s" % self.PlatformName)
FileWrite(File, "Platform DSC Path: %s" % self.PlatformDscPath)
FileWrite(File, "Architectures: %s" % self.Architectures)
FileWrite(File, "Tool Chain: %s" % self.ToolChain)
FileWrite(File, "Target: %s" % self.Target)
if GlobalData.gSkuids:
FileWrite(File, "SKUID: %s" % " ".join(GlobalData.gSkuids))
if GlobalData.gDefaultStores:
FileWrite(File, "DefaultStore: %s" % " ".join(GlobalData.gDefaultStores))
FileWrite(File, "Output Path: %s" % self.OutputPath)
FileWrite(File, "Build Environment: %s" % self.BuildEnvironment)
FileWrite(File, "Build Duration: %s" % BuildDuration)
if AutoGenTime:
FileWrite(File, "AutoGen Duration: %s" % AutoGenTime)
if MakeTime:
FileWrite(File, "Make Duration: %s" % MakeTime)
if GenFdsTime:
FileWrite(File, "GenFds Duration: %s" % GenFdsTime)
FileWrite(File, "Report Content: %s" % ", ".join(ReportType))
if GlobalData.MixedPcd:
FileWrite(File, gSectionStart)
FileWrite(File, "The following PCDs use different access methods:")
FileWrite(File, gSectionSep)
for PcdItem in GlobalData.MixedPcd:
FileWrite(File, "%s.%s" % (str(PcdItem[1]), str(PcdItem[0])))
FileWrite(File, gSectionEnd)
if not self._IsModuleBuild:
if "PCD" in ReportType:
self.PcdReport.GenerateReport(File, None)
if "FLASH" in ReportType:
for FdReportListItem in self.FdReportList:
FdReportListItem.GenerateReport(File)
for ModuleReportItem in self.ModuleReportList:
ModuleReportItem.GenerateReport(File, self.PcdReport, self.PredictionReport, self.DepexParser, ReportType)
if not self._IsModuleBuild:
if "EXECUTION_ORDER" in ReportType:
self.PredictionReport.GenerateReport(File, None)
## BuildReport class
#
# This base class contain the routines to collect data and then
# applies certain format to the output report
#
class BuildReport(object):
##
# Constructor function for class BuildReport
#
# This constructor function generates BuildReport object a platform build.
# It generates report for platform summary, flash, global PCDs and detailed
# module information for modules involved in platform build.
#
# @param self The object pointer
# @param ReportFile The file name to save report file
# @param ReportType The kind of report items in the final report file
#
def __init__(self, ReportFile, ReportType):
self.ReportFile = ReportFile
if ReportFile:
self.ReportList = []
self.ReportType = []
if ReportType:
for ReportTypeItem in ReportType:
if ReportTypeItem not in self.ReportType:
self.ReportType.append(ReportTypeItem)
else:
self.ReportType = ["PCD", "LIBRARY", "BUILD_FLAGS", "DEPEX", "HASH", "FLASH", "FIXED_ADDRESS"]
##
# Adds platform report to the list
#
# This function adds a platform report to the final report list.
#
# @param self The object pointer
# @param Wa Workspace context information
# @param MaList The list of modules in the platform build
#
def AddPlatformReport(self, Wa, MaList=None):
if self.ReportFile:
self.ReportList.append((Wa, MaList))
##
# Generates the final report.
#
# This function generates platform build report. It invokes GenerateReport()
# method for every platform report in the list.
#
# @param self The object pointer
# @param BuildDuration The total time to build the modules
# @param AutoGenTime The total time of AutoGen phase
# @param MakeTime The total time of Make phase
# @param GenFdsTime The total time of GenFds phase
#
def GenerateReport(self, BuildDuration, AutoGenTime, MakeTime, GenFdsTime):
if self.ReportFile:
try:
File = BytesIO('')
for (Wa, MaList) in self.ReportList:
PlatformReport(Wa, MaList, self.ReportType).GenerateReport(File, BuildDuration, AutoGenTime, MakeTime, GenFdsTime, self.ReportType)
Content = FileLinesSplit(File.getvalue(), gLineMaxLength)
SaveFileOnChange(self.ReportFile, Content, True)
EdkLogger.quiet("Build report can be found at %s" % os.path.abspath(self.ReportFile))
except IOError:
EdkLogger.error(None, FILE_WRITE_FAILURE, ExtraData=self.ReportFile)
except:
EdkLogger.error("BuildReport", CODE_ERROR, "Unknown fatal error when generating build report", ExtraData=self.ReportFile, RaiseError=False)
EdkLogger.quiet("(Python %s on %s\n%s)" % (platform.python_version(), sys.platform, traceback.format_exc()))
File.close()
# This acts like the main() function for the script, unless it is 'import'ed into another script.
if __name__ == '__main__':
pass
|
cluster.py | # Copyright 2019 รcole Polytechnique Fรฉdรฉrale de Lausanne. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from . import runtime, dist_common, recording
from common import parse
import Pyro4
import Pyro4.socketutil
import multiprocessing
import time
import enum
import tensorflow as tf
from .worker import SessionState
import tensorflow.contrib.gate as gate
import threading
from contextlib import ExitStack, contextmanager
import concurrent.futures as futures
import random
import re
from collections import namedtuple
import logging
logging.getLogger("Pyro4.core").setLevel(level=logging.DEBUG)
class MasterState(enum.Enum):
starting = "starting"
running = "running"
stopping = "stopping"
exitted = "exitted"
error = "error"
not_yet_started = "not_yet_started"
DeviceAssignment = namedtuple("DeviceAssignment", ("worker", "tf_device", "cluster_endpoint"))
class ClusterRuntime(runtime.Runtime):
master_job_name = "master"
worker_job_name = "worker"
_name_parser = re.compile("(?P<worker_type>[a-zA-Z0-9\-_]+)\.\d+$")
def __init__(self):
super().__init__()
self.executor = None
self.app_sess_coord = None # tuple so that these can be assigned in one swoop
self._app_name = None
recording.recording_cleanup()
@staticmethod
def name():
return "cluster"
@staticmethod
def help_message():
return "run an application on a cluster"
@classmethod
def add_arguments(cls, parser):
cls.add_record_args(parser=parser)
parser.add_argument("--summary", default=False, action="store_true", help="record a Tensorflow graph summary")
parser.add_argument("--summary-interval", default=1, type=parse.numeric_min_checker(numeric_type=float,
minimum=0.1,
message="Can't have too small of an interval"),
help="interval in seconds for recording summary intervals")
# related to the timing for the master worker
parser.add_argument("--master-startup-poll-interval", default=1, type=parse.numeric_min_checker(minimum=0.1, numeric_type=float, message="must have a sensible (>100ms) wait time for startup check"), help="the amount of time to wait when checking for worker status on startup")
parser.add_argument("--master-shutdown-interval", default=1, type=parse.numeric_min_checker(minimum=0.1, numeric_type=float, message="must have a sensible (>100ms) wait time for startup check"), help="the amount of time to wait when checking for worker status on startup")
# related to the pyro server which to connect
parser.add_argument("-n", "--pyro-number", default=random.randint(0, 2**30), type=int, help="number to assign to this server in the naming system")
parser.add_argument("--pyro-ns-port", type=int, help="override default Pyro4 nameserver port")
parser.add_argument("--pyro-ns-host", help="override default Pyro4 nameserver port")
def _get_workers(self, ns_host, ns_port):
with Pyro4.locateNS(host=ns_host, port=ns_port) as ns:
prefix = dist_common.pyro_worker_prefix
prefix_slice = len(prefix) + 1 # the +1 to consume the dot after the prefix name
for k, worker_uri in ns.list(prefix=dist_common.pyro_worker_prefix).items():
self.log.info("Found worker at {}".format(worker_uri))
yield k[prefix_slice:], Pyro4.Proxy(worker_uri)
def _run_filewriter_thread(self, sess, coord, outdir, event, interval):
with sess.graph.as_default():
summaries = tf.summary.merge_all()
writer = tf.summary.FileWriter(graph=sess.graph, logdir=str(outdir))
interval = float(interval)
global_step = 0
try:
if summaries is None:
self.log.error("SummaryWriter has no summaries!")
return
while not (coord.should_stop() or event.is_set()):
event_results = sess.run(summaries)
writer.add_summary(summary=event_results,
global_step=global_step)
time.sleep(interval)
global_step += 1
if coord.should_stop():
self.log.debug("SummaryWriter thread detected stopped coordinator")
if event.is_set():
self.log.debug("SummaryWriter thread detected set event")
except Exception as e:
self.log.error("SummaryWriter thread got exception '{e}'".format(e=e))
else:
self.log.debug("SummaryWriter thread exited normally")
finally:
writer.close()
def _start_workers(self, assignments, cluster_dict, startup_poll_interval):
meta_graph_def = tf.train.export_meta_graph()
meta_graph_def_as_string = meta_graph_def.SerializeToString()
assert isinstance(meta_graph_def_as_string, bytes)
with futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count()*2) as tpe:
for job_name, devices_by_index in assignments.items():
for idx, dev_assignment in devices_by_index.items():
tpe.submit(dev_assignment.worker.run,
job_name=job_name,
task_index=idx,
cluster_dict=cluster_dict,
graph_def=meta_graph_def_as_string)
self.log.info("Started all workers. Now will wait for them to fully start...")
waiting = True
while waiting:
waiting = False
for job_name, devices_by_index in assignments.items():
for idx, dev_assignment in devices_by_index.items():
device = dev_assignment.worker
state = SessionState(device.state)
if state == SessionState.running:
continue
elif state == SessionState.starting:
self.log.info("Waiting for worker {name}:{idx} to start".format(name=job_name, idx=idx))
waiting = True
else:
raise Exception("Worker {name}:{idx} has bad status: '{status}' on startup".format(idx=idx,
name=job_name,
status=state))
if waiting:
time.sleep(startup_poll_interval)
self.log.debug("All workers started. Now serving...")
def _setup_workers(self, app_name, device_counts, ns_host, ns_port):
def repeated_device_context(device_name):
@contextmanager
def _my_func():
with tf.device(device_name):
yield
return _my_func
all_workers = dict(self._get_workers(ns_host=ns_host,
ns_port=ns_port))
workers_by_type = {}
for worker_name, worker in all_workers.items():
match = self._name_parser.search(worker_name)
if match is None:
continue
name = match.group("worker_type")
if name not in workers_by_type:
workers_by_type[name] = [worker]
else:
workers_by_type[name].append(worker)
assignments = {}
port_mappings_by_host = {}
for device_key, num_requested in device_counts.items():
if num_requested == 0:
continue
if device_key not in workers_by_type:
raise Exception("Application '{app_name}' requested workers of type '{t}', but none are registered".format(t=device_key,
app_name=app_name))
available_workers = workers_by_type[device_key]
if len(available_workers) < num_requested:
raise Exception("Application '{app_name}' requested {req} workers of type '{t}', but only found {found}".format(
app_name=app_name, req=num_requested, t=device_key, found=len(available_workers)
))
chosen_workers = available_workers[:num_requested]
devices_by_idx = {}
for idx, worker in enumerate(chosen_workers):
device_info = worker.get_tf_dist_info(current_reservations=port_mappings_by_host)
host = device_info["host"]
port = device_info["port"]
if host not in port_mappings_by_host:
port_mappings_by_host[host] = { port }
else:
assert port not in port_mappings_by_host[host], "Worker return port that was already in the mapping!"
port_mappings_by_host[host].add(port)
cluster_info_string = "{h}:{p}".format(h=host, p=port)
devices_by_idx[idx] = DeviceAssignment(worker=worker,
cluster_endpoint=cluster_info_string,
tf_device=repeated_device_context(dist_common.make_tf_device_name(
job_name=device_key,
task_index=idx
)))
assignments[device_key] = devices_by_idx
return assignments, port_mappings_by_host
def _start_pyro(self, ns_host, ns_port, pyro_number):
daemon = Pyro4.Daemon(host=Pyro4.socketutil.getIpAddress(None, workaround127=True))
master_uri = daemon.register(self)
self.log.debug("master uri: {}".format(master_uri))
with Pyro4.locateNS(host=ns_host,
port=ns_port) as ns:
ns.register(":".join((dist_common.pyro_master_name, str(pyro_number))), master_uri)
self.log.info("Registered Pyro4 daemon: {}".format(master_uri))
return daemon
def _construct_application(self, assignments, port_mappings_by_host,
ApplicationClass, args):
master_device_name = dist_common.make_tf_device_name(
job_name=self.master_job_name, task_index=0
)
master_info = dist_common.get_tf_dist_info(current_reservations=port_mappings_by_host)
cluster_dict = { self.master_job_name: {0: ":".join((master_info["host"], str(master_info["port"])))}}
cluster_dict.update((job_key, { idx: dev_assignment.cluster_endpoint for idx, dev_assignment in v.items() })
for job_key, v in assignments.items())
with tf.device(master_device_name):
devices = {
job_key: tuple(da.tf_device for da in v.values())
for job_key, v in assignments.items()
}
application = ApplicationClass(args=args, devices=devices)
return cluster_dict, application, master_device_name
def _run_master(self, sess, coord, application, master_device_name):
init_ops = (tf.local_variables_initializer(), tf.global_variables_initializer())
tf.report_uninitialized_variables()
sess.run(init_ops)
run_first = application.run_first
if len(run_first) > 0:
self.log.warning("App has {} run_first tensors, but can't run them across sessions".format(len(run_first)))
threads = []
queue_runner_threads = tf.train.start_queue_runners(sess=sess, coord=coord, device=master_device_name)
self.log.info("Queue runners ({device}): {ths}".format(device=master_device_name, ths=", ".join(t.name for t in queue_runner_threads)))
gate_runner_threads = gate.start_gate_runners(sess=sess, coord=coord, device=master_device_name)
self.log.info("Gate runners ({device}): {ths}".format(device=master_device_name, ths=", ".join(t.name for t in gate_runner_threads)))
credit_runner_threads = gate.start_credit_suppliers(sess=sess, coord=coord, device=master_device_name)
self.log.info("Credit runners ({device}): {ths}".format(device=master_device_name, ths=", ".join(t.name for t in credit_runner_threads)))
threads.extend(queue_runner_threads)
threads.extend(gate_runner_threads)
threads.extend(credit_runner_threads)
time.sleep(1)
if coord.should_stop():
raise Exception("Coordinator stopped on initialization. Check for other errors!")
else:
self.log.debug("Starting successful")
self.app_sess_coord = (application, sess, coord)
coord.wait_for_stop()
# TODO not sure if I need this
coord.raise_requested_exception()
def _run_application(self, ApplicationClass, args):
assert self.executor is None, "Cluster is restarting somehow!"
self.executor = futures.ThreadPoolExecutor(max_workers=args.max_parallel_clients)
app_name = ApplicationClass.name()
self._app_name = app_name
device_counts = ApplicationClass.device_counts(args=args)
assignments, port_mappings_by_host = self._setup_workers(app_name=app_name,
device_counts=device_counts,
ns_host=args.pyro_ns_host,
ns_port=args.pyro_ns_port)
cluster_dict, application, master_device_name = self._construct_application(assignments=assignments,
port_mappings_by_host=port_mappings_by_host,
ApplicationClass=ApplicationClass, args=args)
# must do this AFTER app construction because app can modify args (e.g. queue lengths)
if args.record_args:
self.write_out_args(args=args)
daemon = self._start_pyro(
ns_host=args.pyro_ns_host,
ns_port=args.pyro_ns_port,
pyro_number=args.pyro_number
)
daemon_thread = threading.Thread(target=daemon.requestLoop, name="pyro4_master_daemon")
cluster_spec = tf.train.ClusterSpec(cluster=cluster_dict)
server = tf.train.Server(server_or_cluster_def=cluster_spec,
job_name=self.master_job_name, task_index=0)
coord = tf.train.Coordinator()
try:
daemon_thread.start()
time.sleep(1.25) # make sure that the master is up and ready to accept requests
with ExitStack() as context_stack:
sess = context_stack.enter_context(tf.Session(target=server.target))
post_session_sleep_time = 5
self.log.info("Waiting {s} seconds for master Session to fully start...".format(s=post_session_sleep_time))
time.sleep(post_session_sleep_time)
self.log.info("Done waiting for master Session start")
self._start_workers(
assignments=assignments,
cluster_dict=cluster_dict,
startup_poll_interval=args.master_startup_poll_interval
)
if args.record_stats:
context_stack.enter_context(cm=recording.record_self(outdir=args.output_directory))
if args.summary:
summary_event = threading.Event()
summary_thread = threading.Thread(target=self._run_filewriter_thread, kwargs={
"sess": sess,
"coord": coord,
"outdir": args.output_directory,
"event": summary_event,
"interval": args.summary_interval
})
summary_thread.start()
try:
self._run_master(
sess=sess,
coord=coord,
master_device_name=master_device_name,
application=application
)
finally:
if args.summary:
summary_event.set()
self.log.debug("Attempting application.stop()...")
try:
application.stop(sess=sess)
sess.close()
except Exception as e:
self.log.warning("Ignoring exception '{e}' thrown by Application.stop".format(e=e))
else:
self.log.debug("Successfully ran application.stop()")
except Exception as e:
self.log.error("Master shutting down due to exception: {e}".format(e=e))
raise e
else:
self.log.debug("Master shutting down normally.")
finally:
coord.request_stop()
self.app_sess_coord = None
wait_timeout = 60
if coord.wait_for_stop(timeout=wait_timeout):
self.log.debug("coord stop successful")
else:
self.log.error("Couldn't stop master thread after {} seconds of timeout".format(wait_timeout))
# will definitely be available, based on construction. No need to check.
self.executor.shutdown()
self.executor = None
# Tell the workers to shut down
timeout=10
with futures.ThreadPoolExecutor(max_workers=8) as pool:
for job_name, devices_by_index in assignments.items():
for idx, dev_assignment in devices_by_index.items():
worker = dev_assignment.worker
pool.submit(worker.stop_and_reset, timeout=timeout)
# Finally, shut down the daemon. Do this last if workers call back into this
daemon.shutdown()
daemon_join_timeout = 10
daemon_thread.join(timeout=daemon_join_timeout)
if daemon_thread.is_alive():
self.log.error("Daemon thread still alive after timeout of {} seconds".format(daemon_join_timeout))
@staticmethod
def _populate_app_args(parser, app):
# only graph args, no client args
app.make_graph_args(parser=parser)
@Pyro4.expose
def run_client_request(self, ingress_args):
def run_request():
a = self.app_sess_coord
if a is None:
raise Exception("Can't run client request. Application is not running. App/Sess/Coord is None.")
app, sess, coord = a
assert app is not None and sess is not None
with sess.as_default():
results = app.run_client_request(client_args=ingress_args,
sess=sess)
return {
dist_common.results_key: results.results,
# these return the actual UNIX time bounds for when this got a slot and was submitted
"start_time": results.start_time,
"end_time": results.end_time,
# run_time is just start_time - end_time
# wait_time is the time BEFORE start_time that this request waited for a slot
"run_time": results.run_time,
"wait_time": results.wait_time,
}
if self.executor is None or self.app_sess_coord is None:
raise Exception("Application is not running.{e}{s}".format(
e="" if self.executor is None else " Executor is None.",
s="" if self.app_sess_coord is None else " Session is None."
))
assert isinstance(self.executor, futures.Executor)
result = self.executor.submit(run_request)
return result.result()
@Pyro4.expose
def kill(self):
if self.app_sess_coord is not None:
app, sess, coord = self.app_sess_coord
coord.request_stop()
@Pyro4.expose
@property
def app_name(self):
return self._app_name
|
ytdownloader_gui.py | # ============================================================================================
from tkinter import *
import tkinter as tk
from tkinter import messagebox
import tkinter.ttk as ttk
from tkinter.filedialog import askdirectory
import subprocess
import threading
import os
import sys
import eyed3
from pytube.cli import on_progress
from pytube import YouTube, Playlist
import concurrent.futures
from time import sleep
# ============================================================================================
class Application:
def __init__(self, root, videos_dir=None):
self.videos_dir = videos_dir
self.songs = 0
self.max_songs = 0
root.minsize(500, 700)
root.title('YTDownloader')
self.frame = tk.Frame(root)
self.frame.grid(row=0, pady=30)
# url stuff------------------------------------------------
self.label_url = tk.Label(self.frame, font=40, text="URL:")
self.label_url.grid(row=3, sticky=W, padx=30)
self.url_entry = tk.Entry(self.frame, font=40, width="50")
self.url_entry.grid(row=3, column=1, columnspan=20)
self.btnDownload = tk.Button(self.frame, text="Download", font=40, height=1, width=8,
command=lambda: self.btnDownload_callback(self.url_entry.get()))
self.btnDownload.grid(row=3, column=21, sticky=E, padx=30)
# radio buttons
self.radioOption = StringVar()
self.radioSong = Radiobutton(
self.frame, text="song", variable=self.radioOption, value='song')
self.radioSong.deselect()
self.radioSong.grid(row=4, column=1, columnspan=1)
self.radioPlaylist = Radiobutton(
self.frame, text="playlist", variable=self.radioOption, value='playlist')
self.radioPlaylist.grid(row=4, column=2, columnspan=1)
# # save stuff-----------------------------------------------
self.label_dir = tk.Label(self.frame, font=40, text="Path:")
self.label_dir.grid(row=5, sticky=W, pady=1, padx=30)
self.path_entry = tk.Entry(self.frame, font=40, width="50")
self.path_entry.grid(row=5, column=1, columnspan=20, pady=1)
self.btnChooseDir = tk.Button(self.frame, text="Folder", font=40, height=1, width=8,
command=lambda: self.btnChooseDir_callback())
self.btnChooseDir.grid(row=5, column=21, sticky=E, padx=30, pady=1)
self.text = tk.Text(self.frame, height=30, width=70)
self.text.config(state='normal')
self.text.grid(row=7, column=0, columnspan=100,
pady=20, padx=30, sticky=W)
# progress bar stuff
self.progress = ttk.Progressbar(
self.frame, orient="horizontal", length=520, mode="determinate")
self.progress.grid(row=8, column=0, columnspan=100,
pady=20, padx=30, sticky=W)
self.progress["maximum"] = 1 # default value
# cancel button
self.btnCancel = tk.Button(self.frame, text="Exit", font=40, height=1, width=8,
command=lambda: sys.exit(1))
self.btnCancel.grid(row=10, column=21, sticky=E, pady=10, padx=30)
root.mainloop()
# --------------------------------------------------------------------------------------------
# Buttons callbacks functions
def btnDownload_callback(self, url_entry):
"""Callback function called when download button is pressed"""
if(self.videos_dir != None):
self.text.insert(tk.INSERT, "Please wait....\n")
if(self.radioOption.get() == "playlist"):
subThread = threading.Thread(
target=self.download_playlist, args=(url_entry,))
subThread.start()
else:
subThread = threading.Thread(
target=self.download_song, args=(url_entry,))
subThread.start()
# self.setSongsInfo()
else:
dialog_title = 'Error'
dialog_text = 'Please select folder!'
tk.messagebox.showwarning(dialog_title, dialog_text)
def btnChooseDir_callback(self):
"""Opens file chooser dialog in order to choose path for saving downloaded songs"""
Tk().withdraw()
path_to_save = askdirectory()
# print(path_to_save)
self.path_entry.insert(tk.INSERT, path_to_save)
self.videos_dir = path_to_save
# --------------------------------------------------------------------------------------------
# YOUTUBE STUFF
def download_song(self, url):
"""function for downloading a single song"""
# need to add delay between requests so youtube
# doesn't complain
sleep(10)
try:
# Need to add proxy otherwise youtube sends 429 error.
yt = YouTube(url)
except Exception as e:
self.text.insert(tk.INSERT, f'[-]Connection Error! {str(e)}\n')
# print(f'[*]Downloading : {yt.title}')
self.text.insert(tk.INSERT, f'[*]Downloading : {yt.title}\n')
stream = yt.streams.filter(only_audio=True).first()
# download and save to the selected path.
try:
stream.download(self.videos_dir)
except:
self.text.insert(
tk.INSERT, f'[-]Downloading : {yt.title} failed\n')
self.songs += 1
self.progress["value"] = self.songs
# show proper messages for a single song
if(self.radioOption.get() == 'song'):
self.text.insert(tk.INSERT, '[+]Download completed!\n')
tk.messagebox.showinfo("YTDownloader", "Download completed!")
# --------------------------------------------------------------------------------------------
def download_playlist(self, playlist_url=None):
"""Downloads all songs from a playlist"""
pl = Playlist(playlist_url)
self.videos_dir += "/"+pl.title()
# set values for progress bar
self.progress["value"] = 0
self.max_songs = len(pl)
self.progress["maximum"] = len(pl)
# print(f'[+]Saving playlist at: {self.videos_dir}')
# print(f'[+]Downloading playlist, total: {len(pl)} songs')
self.text.insert(
tk.INSERT, f'[+]Saving playlist at: {self.videos_dir}\n')
self.text.insert(
tk.INSERT, f'[+]Downloading playlist;"{pl.title()}" , total: {len(pl)} songs\n')
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(self.download_song, pl, chunksize=5)
# print('[+]Download completed!')
self.text.insert(tk.INSERT, '[+]Download completed!\n')
tk.messagebox.showinfo("YTDownloader", "Download completed!")
# self.clean_up()
# --------------------------------------------------------------------------------------------
# def setSongsInfo(self):
# """Set info for the downloaded songs like artist , title etc"""
# folder = os.listdir(self.videos_dir)
# for songTitle in folder:
# if songTitle.endswith(".mp4"):
# info = songTitle.split("-")
# audiofile = eyed3.load(
# os.path.join(self.videos_dir, songTitle))
# audiofile.tag.artist = info[0].split('.')[0]
# audiofile.tag.title = info[1]
# audiofile.tag.save()
# ============================================================================================
if __name__ == "__main__":
root = tk.Tk()
Application(root)
# ============================================================================================
|
test_operator.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
from __future__ import division
import numpy as np
import mxnet as mx
import copy
import math
import random
import itertools
from distutils.version import LooseVersion
from numpy.testing import assert_allclose, assert_array_equal
from mxnet.test_utils import *
from mxnet.operator import *
from mxnet.base import py_str, MXNetError, _as_list
from common import assert_raises_cudnn_not_satisfied, assert_raises_cuda_not_satisfied, assertRaises
from common import xfail_when_nonstandard_decimal_separator, with_environment
import pytest
import os
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
@pytest.mark.serial
def test_rnn_with_new_param():
rnn_modes = ['rnn_relu', 'rnn_tanh', 'gru', 'lstm']
ngates_ = [1, 1, 3, 4]
num_layers, input_size, seq_len, batch_size, state_size = 3, 128, 5, 64, 8
for bidirectional in [False, True]:
directions = 2 if bidirectional else 1
for mode, ngates in zip(rnn_modes, ngates_):
first_layer_size = (input_size * state_size + state_size * state_size + state_size * 2) * ngates
rest_layer_size = (state_size * directions * state_size + state_size * state_size + state_size * 2) \
* ngates * (num_layers - 1)
param_size = (first_layer_size + rest_layer_size) * directions
sym = mx.sym.RNN(mode=mode, num_layers=num_layers, bidirectional=bidirectional,
state_outputs=False, state_size=state_size, name='rnn')
bind_dict = {
'rnn_data': mx.ndarray.random.uniform(low=-1, high=1, shape=(seq_len, batch_size, input_size)),
'rnn_parameters': mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size)),
'rnn_state': mx.ndarray.zeros(shape=(num_layers * directions, batch_size, state_size))
}
if mode == 'lstm':
bind_dict['rnn_state_cell'] = mx.ndarray.zeros(
shape=(num_layers * directions, batch_size, state_size))
ex = sym._bind(default_context(), bind_dict)
ex.forward(is_train=True)
ex01 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex02 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex01, ex02, rtol=1e-2, atol=1e-4)
bind_dict['rnn_parameters'] = mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size))
ex.copy_params_from(bind_dict)
ex.forward(is_train=True)
ex03 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex04 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex03, ex04, rtol=1e-2, atol=1e-4)
@pytest.mark.serial
def test_lstm_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
CX = mx.sym.Variable('state_cell')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX, state_cell=CX,
state_size=H, num_layers=5, mode='lstm', p=0.5, state_outputs=True, name='LSTM')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@pytest.mark.serial
def test_gru_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='gru', p=0.5, state_outputs=True, name='GRU')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@pytest.mark.serial
def test_rnntanh_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_tanh', p=0.5, state_outputs=True, name='RNN_TANH')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@pytest.mark.serial
def test_rnnrelu_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_relu', p=0.5, state_outputs=True, name='RNN_RELU')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
def test_RNN_float64():
if default_context().device_type == 'gpu':
return
sym = mx.sym.RNN(
mx.sym.Variable('in'),
mx.sym.Variable('par'),
mx.sym.Variable('s'),
state_size = (2),
num_layers = 1,
mode = 'rnn_tanh'
)
dtype = 'float64'
explicit_grad = {
'in': mx.nd.ones([2, 1, 2], dtype=dtype),
'par': mx.nd.ones([12], dtype=dtype),
's': mx.nd.ones([1, 1, 2], dtype=dtype)
}
args_grad = explicit_grad
grad_req = 'write'
ex = sym._bind(default_context(),
{
'in': mx.nd.ones([2, 1, 2], dtype=dtype),
'par': mx.nd.ones([12], dtype=dtype),
's': mx.nd.ones([1, 1, 2], dtype=dtype)
},
args_grad = args_grad,
grad_req = grad_req
)
ex.forward()
ex.outputs[0].wait_to_read()
def np_softmax(x, axis=-1, temperature=1.0):
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x/temperature)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def check_elementwise_sum_with_shape(shape, n):
# forward
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.ElementWiseSum(*inputs, name='esum')
arr = [mx.nd.empty(shape) for i in range(n)]
arr_grad = [mx.nd.empty(shape) for i in range(n)]
for i in range(n):
arr[i][:] = np.random.uniform(-10, 10, shape)
exec1 = out._bind(default_context(),
args=arr,
args_grad=arr_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
out = sum(a.asnumpy() for a in arr)
assert_almost_equal(out, out1, rtol=1e-5, atol=1e-5)
out_grad = mx.nd.empty(shape)
out_grad[:] = np.random.uniform(-10, 10, shape)
# backward
exec1.backward([out_grad])
for a in arr_grad:
assert_almost_equal(a, out_grad, rtol=1e-5, atol=1e-5)
@pytest.mark.serial
def test_elementwise_sum():
nrepeat = 2
maxdim = 4
for _ in range(nrepeat):
for dim in range(1, maxdim):
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
check_elementwise_sum_with_shape(shape, np.random.randint(1, 8))
def check_concat_with_shape(shapes, dimension, skip_second):
# if skip_second is True, second argument will not have gradient.
# it is to test #1130
n = len(shapes)
# forward
target_dim = 0
for shape in shapes:
target_dim += shape[dimension]
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.Concat(*inputs, name='conc',dim=dimension)
arr = [mx.nd.empty(shape) for shape in shapes]
for i in range(n):
arr[i][:] = shapes[i][dimension]
arr_np = [np.copy(narray.asnumpy()) for narray in arr]
arr_grad = [mx.nd.empty(shape) for shape in shapes]
dict_grad = {}
arg_names = out.list_arguments()
for name, g in zip(arg_names, arr_grad):
if not skip_second or name != 'arg1':
dict_grad[name] = g
args = out.list_arguments()
arg_shapes, out_shapes, aux_shapes = out.infer_shape(**dict(zip(args, shapes)))
out_grad = mx.nd.empty(out_shapes[0])
exec1 = out._bind(default_context(),
args=arr,
args_grad=dict_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
ret = np.concatenate([narray.asnumpy() for narray in arr], axis=dimension)
assert_almost_equal(out1, ret)
# backward
out1.copyto(out_grad)
out_grad[:] += 1
exec1.backward([out_grad])
for i, name in enumerate(arg_names):
if not skip_second or name != 'arg1':
grad = dict_grad[name]
np_grad = arr_np[i]
assert_almost_equal(grad, np_grad + 1)
def test_concat():
for dimension in range(4):
n = 2
merge = [2, 3, 4, 5, 6]
a = 2
b = 3
c = 4
# test 2D
if dimension<2:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a))
elif dimension == 1:
shapes.append((a, merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 2, True)
check_concat_with_shape(shapes, dimension - 2, False)
#test 3D
if dimension<3:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a,b))
elif dimension ==1:
shapes.append((a,merge[i],b))
elif dimension ==2:
shapes.append((a,b,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 3, True)
check_concat_with_shape(shapes, dimension - 3, False)
# test 4D
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i],a,b,c))
elif dimension == 1:
shapes.append((a,merge[i],b,c))
elif dimension ==2:
shapes.append((a,b,merge[i],c))
elif dimension ==3:
shapes.append((a,b,c,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 4, True)
check_concat_with_shape(shapes, dimension - 4, False)
def test_slice_channel():
def check_slice_channel(data_ndim, axis, num_outputs, squeeze_axis):
ins = []
if squeeze_axis:
shape = np.random.randint(2, 5, data_ndim).tolist()
shape[axis] = num_outputs
out_ele_shape = [ele for ele in shape]
del out_ele_shape[axis]
else:
shape = np.random.randint(1, 5, data_ndim).tolist()
shape[axis] *= num_outputs
out_ele_shape = [ele for ele in shape]
out_ele_shape[axis] //= num_outputs
data_npy = np.random.normal(size=shape)
out_grads_npy = [np.random.normal(size=out_ele_shape) for i in range(num_outputs)]
data = mx.sym.Variable('data')
sym = mx.sym.SliceChannel(data=data, num_outputs=num_outputs, axis=axis, squeeze_axis=squeeze_axis)
exe = sym._simple_bind(ctx=default_context(), data=data_npy.shape)
outputs = exe.forward(is_train=True, data=data_npy)
assert len(exe.outputs) == num_outputs
for i in range(num_outputs):
gt = data_npy.take(np.arange(i * shape[axis]/num_outputs,
(i+1) * shape[axis]/num_outputs).astype(np.int), axis=axis)
if squeeze_axis:
assert_almost_equal(outputs[i], gt.reshape(outputs[i].shape))
else:
assert_almost_equal(outputs[i], gt)
# test backward
ograd = [mx.nd.array(ele, dtype=outputs[i].dtype) for i, ele in enumerate(out_grads_npy)]
exe.backward(out_grads=ograd)
if squeeze_axis:
assert_almost_equal(exe.grad_arrays[0],
np.concatenate([np.expand_dims(ele, axis=axis) for ele in out_grads_npy],
axis=axis))
else:
assert_almost_equal(exe.grad_arrays[0],
np.concatenate(out_grads_npy, axis=axis))
check_slice_channel(data_ndim=2, axis=1, num_outputs=3, squeeze_axis=True)
check_slice_channel(data_ndim=4, axis=2, num_outputs=3, squeeze_axis=False)
check_slice_channel(data_ndim=3, axis=-1, num_outputs=2, squeeze_axis=False)
check_slice_channel(data_ndim=5, axis=-2, num_outputs=3, squeeze_axis=True)
def test_python_op():
X = mx.symbol.Variable('X')
op = mx.operator.NumpyOp()
s = op.get_symbol(X, name='numpy_op')
x = mx.ndarray.ones((10))*10
dx = mx.ndarray.zeros((10))
dy = mx.ndarray.ones((10))
exec1 = s._bind(default_context(), args=[x], args_grad = {'X': dx})
exec1.forward(is_train=True)
assert_almost_equal(x, exec1.outputs[0])
exec1.backward(dy)
assert_almost_equal(dy, dx)
def test_swapaxes():
data = mx.symbol.Variable('data')
shape = (2, 3, 4)
data_tmp = np.ones(shape)
data_tmp[0] = 1
data_tmp[1] = 2
arr_data = mx.nd.array(data_tmp)
swap0 = mx.symbol.SwapAxis(data=data, dim1=0, dim2=2)
swap = mx.symbol.SwapAxis(data=swap0, dim1=1, dim2=2)
exe_c = swap._bind(default_context(), args=[arr_data])
exe_c.forward(is_train=True)
out = exe_c.outputs[0]
swap0_ = np.swapaxes(data_tmp, 0, 2)
swap_ = np.swapaxes(swap0_, 1, 2)
assert_almost_equal(out, swap_)
config = [((1, 1, 2), 0, 1),
((1, 1, 2), -1, -2),
((4, 5, 6, 7), 1, 1),
((4, 5, 6, 7), 2, 3),
((4, 5, 6, 7), -2, 2),
((4, 5, 6, 7), -2, -3)]
for shape, axis1, axis2 in config:
data_np = np.random.uniform(size=shape)
data_mx = mx.nd.array(data_np, dtype=data_np.dtype)
ret_np = np.swapaxes(data_np, axis1=axis1, axis2=axis2)
ret_mx = mx.symbol.SwapAxis(data, dim1=axis1, dim2=axis2)
exe_c = ret_mx._bind(default_context(), args=[data_mx])
exe_c.forward(is_train=True)
out = exe_c.outputs[0]
assert_almost_equal(out, ret_np)
@xfail_when_nonstandard_decimal_separator
def test_scalarop():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)*5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = 2 / (4-((1+data+1)*2/5)-0.8-(data!=0))
npout_1 = (4-((1+data_tmp+1)*2/5)-0.8-(data_tmp!=0))
npout = 2/npout_1
check_symbolic_forward(test, [data_tmp], [npout])
npout_grad = 2.*2/5
npout_grad = 2*npout_grad /(npout_1 *npout_1 )
check_symbolic_backward(test, [data_tmp], [np.ones(shape)*2], [npout_grad])
def test_scalar_pow():
data = mx.symbol.Variable('data')
shape = (1, 1)
data_tmp = np.ones(shape)
test = data ** 2
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [data_tmp ** 2])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)], [2 * data_tmp])
def test_symbol_pow():
shape = (1, 1)
data = mx.symbol.Variable('data')
data_tmp = np.ones(shape)*2
exp = mx.symbol.Variable('exp')
exp_tmp = np.ones(shape)*3
test = data**exp
check_numeric_gradient(test, [data_tmp, exp_tmp])
check_symbolic_forward(test, [data_tmp, exp_tmp], [data_tmp**exp_tmp])
data_dir = data_tmp**(exp_tmp - 1) * exp_tmp
exp_dir = data_tmp**(exp_tmp) * np.log(data_tmp)
check_symbolic_backward(test, [data_tmp, exp_tmp], [np.ones(shape)], [data_dir, exp_dir])
def test_fully_connected():
# Create data of given shape as a uniform distribution centered on 0.0
def random_data(shape, dtype=np.float32):
return mx.nd.random.uniform(low=-0.5,
high=0.5, shape=shape, dtype=dtype)
data = mx.sym.var("data")
fc_weight = mx.sym.var("weight")
fc_bias = mx.sym.var("bias")
fc = mx.sym.FullyConnected(data=data, weight=fc_weight, bias=fc_bias, num_hidden=10, no_bias=False, name='fc')
data = random_data(shape=(5, 5, 5, 13))
fc_weight = random_data(shape=(10, 325))
fc_bias = random_data(shape=(10))
fc_bias2 = random_data(shape=(10, 1))
data_np = data.asnumpy().reshape(5, 325)
fc_weight_np = np.transpose(fc_weight.asnumpy())
fc_bias_np = fc_bias.asnumpy()
res = np.dot(data_np, fc_weight_np) + fc_bias.asnumpy()
check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np}, {'fc_output': res})
check_numeric_gradient(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np})
# TODO: Fix Bug #15032 when bias has ndim > 1
#check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias2.asnumpy()}, {'fc_output': res})
def test_pow_fn():
shape = (3, 4)
exp = mx.symbol.Variable("exp")
x = np.ones(shape)*3
for y in [mx.sym.pow(2, exp), mx.sym.power(2, exp)]:
check_numeric_gradient(y, [x], numeric_eps=1E-3)
check_symbolic_forward(y, [x], [2**x])
check_symbolic_backward(y, [x], [np.ones(shape)], [np.log(2) * 2**x])
def test_relu():
def frelu(x):
return np.maximum(x, 0.0)
def frelu_grad(x):
return np.float32(1.0) * (x > np.float32(0.0))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.relu(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype('float32')
eps = 1e-4
# Avoid finite difference method inaccuracies due to discontinuous gradient at the origin.
# Here we replace small problematic inputs with 1.0. Repro issue with seed 97264195.
xa[abs(xa) < eps] = 1.0
ya = frelu(xa)
ga = frelu_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=eps)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga])
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
def test_leaky_relu():
def fleaky_relu(x, act_type, slope=0.25):
neg_indices = x < 0
out = x.copy()
if act_type == 'elu':
out[neg_indices] = slope * np.expm1(out[neg_indices])
elif act_type == 'leaky':
out[neg_indices] = slope * out[neg_indices]
return out
def fleaky_relu_grad(grad, x, y, act_type, slope=0.25):
neg_indices = x < 0
out = np.ones(x.shape)
if act_type == 'elu':
out[neg_indices] = y[neg_indices] + slope
elif act_type == 'leaky':
out[neg_indices] = slope
return out * grad
for ndim in range(1, 4):
shape = rand_shape_nd(ndim)
x = mx.symbol.Variable("x")
slp = 0.25
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
eps = 1e-4
rtol = 1e-2
atol = 1e-3
xa[abs(xa) < eps] = 1.0
for act_type in ['elu', 'leaky']:
y = mx.symbol.LeakyReLU(data=x, slope=slp, act_type=act_type)
ya = fleaky_relu(xa, slope=slp, act_type=act_type)
ga = fleaky_relu_grad(np.ones(shape), xa, ya, slope=slp, act_type=act_type)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape, dtype=dtype)], [ga], rtol=rtol, atol=atol, dtype=dtype)
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
def test_prelu():
def fprelu(x, gamma):
pos_indices = x > 0
out = x.copy()
if len(x.shape) == 4:
out = out.transpose(2,3,0,1)
out = np.multiply(out, gamma)
out = out.transpose(2,3,0,1)
else:
out = np.multiply(out, gamma)
out[pos_indices] = x[pos_indices]
return out
def fprelu_grad(x, y, gamma):
pos_indices = x > 0
if len(x.shape) == 4:
grad_x = np.multiply(np.ones(x.shape).transpose(2,3,0,1), gamma)
grad_x = grad_x.transpose(2,3,0,1)
else:
grad_x = np.multiply(np.ones(x.shape), gamma)
grad_gam = np.zeros(gamma.shape)
copy_x = x.copy()
copy_x[pos_indices] = 0.0
grad_x[pos_indices] = 1.0
if len(gamma.shape) > 1 and len(x.shape) != 4:
grad_gam = copy_x
elif len(gamma.shape) > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(2,3))
elif gamma.shape[0] == 1:
grad_gam = np.sum(np.sum(copy_x))
elif gamma.shape[0] > 1 and len(x.shape) != 4:
grad_gam = np.sum(copy_x, axis=0)
elif gamma.shape[0] > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(0,2,3))
return (grad_x, grad_gam)
x = mx.symbol.Variable("x")
gamma = mx.symbol.Variable("gamma")
for shape in [(3,4), (3,4,4,5)]:
for dtype in [np.float16, np.float32, np.float64]:
for gam in [np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype)]:
gam_full = np.array([gam, gam, gam])
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
rtol = 1e-2
atol = 1e-3
eps = 1e-4
xa[abs(xa) < eps] = 1.0
y = mx.symbol.LeakyReLU(data=x, gamma=gamma, act_type='prelu')
ya = fprelu(xa, gam)
ya_full = fprelu(xa, gam_full)
g_xa, g_gam = fprelu_grad(xa, ya, gamma=gam)
g_xa_full, g_gam_full = fprelu_grad(xa, ya_full, gamma=gam_full)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa, gam], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_numeric_gradient(y, [xa, gam_full], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam], [np.ones(ya.shape, dtype=dtype)],
[g_xa, g_gam], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam_full], [ya_full], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam_full], [np.ones(ya_full.shape, dtype=dtype)],
[g_xa_full, g_gam_full], rtol=rtol, atol=atol, dtype=dtype)
def test_selu():
alpha = 1.6732632423543772848170429916717
lamb = 1.0507009873554804934193349852946
def fselu(x):
neg_indices = x < 0
out = x.copy()
out[neg_indices] = alpha * np.expm1(out[neg_indices])
return out * lamb
def fselu_grad(grad, x, y):
neg_indices = x < 0
out = np.ones(x.shape).astype(x.dtype)
out[neg_indices] = y[neg_indices] + alpha
return out * lamb
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="selu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 1e-1, 1e-2) if dtype is np.float16 else (1e-4, 1e-2, 1e-4)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fselu(xa)
ga = fselu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape, dtype=dtype)], [ga], rtol=rtol, atol=atol, dtype=dtype)
def test_gelu():
CUBE_CONSTANT = 0.044715
ROOT_TWO_OVER_PI = 0.7978845608028654
def g(x):
return ROOT_TWO_OVER_PI * (x + CUBE_CONSTANT * np.power(x, 3))
def g_grad(x):
return ROOT_TWO_OVER_PI * (1.0 + 3.0 * CUBE_CONSTANT * np.power(x, 2))
def f(x):
return 1.0 + np.tanh(g(x))
def f_grad(x):
return (1.0 - np.tanh(g(x)) * np.tanh(g(x))) * g_grad(x)
def fgelu(x):
return 0.5 * x * f(x)
def fgelu_grad(grad, x, y):
return grad * (y / x + y * (1 - np.tanh(g(x))) * g_grad(x))
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="gelu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 2e-2, 1e-3) if dtype is np.float16 else (1e-4, 1e-3, 1e-5)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fgelu(xa)
ga = fgelu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
def test_sigmoid():
def fsigmoid(a):
return np.divide(1.0, (1.0 + np.exp(-a)))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.sigmoid(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsigmoid(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya * (1 - ya)])
def test_log_sigmoid():
def flog_sigmoid(a):
return np.log(np.divide(1.0, np.add(1.0, np.exp(-a))))
def flog_sigmoid_grad(a):
return np.divide(1.0, np.add(1.0, np.exp(a)))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.log_sigmoid(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = flog_sigmoid(xa)
ya_grad = flog_sigmoid_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad])
def test_mish():
def fmish(a):
return a * np.tanh(np.log1p(np.exp(a)))
def fmish_grad(a):
softrelu = np.log1p(np.exp(a))
tanh = np.tanh(softrelu)
sigmoid = np.divide(1.0, (1.0 + np.exp(-a)))
return tanh + a * sigmoid * (1.0 - tanh * tanh)
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.mish(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fmish(xa)
ya_grad = fmish_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad])
def test_shape_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.shape_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.shape(xa)
yg = mx.nd.ones(ya)
exe = y._bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg, np.zeros_like(xg.asnumpy()))
def test_size_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.size_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.size(xa)
yg = mx.nd.ones(ya)
exe = y._bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg, np.zeros_like(xg.asnumpy()))
def test_hard_sigmoid():
def fhardsigmoid(a, alpha=0.2, beta=0.5):
return np.maximum(np.zeros(a.shape, dtype=a.dtype),
np.minimum(np.ones(a.shape, dtype=a.dtype), alpha*a+beta))
def fhardsigmoid_grad(a, out_grad, alpha=0.2, beta=0.5):
orig_out = fhardsigmoid(a, alpha, beta)
res = out_grad * alpha
res[orig_out <= 0.0] = 0.0
res[orig_out >= 1.0] = 0.0
return res
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.hard_sigmoid(x)
for dtype in [np.float16, np.float32, np.float64]:
if dtype is np.float16:
rtol = 1e-2
else:
rtol = 1e-3
atol = 1e-3
eps = 1e-3
xa = np.random.uniform(low=-3.0,high=3.0,size=shape).astype(dtype)
# function not differentiable at x=2.5 and -2.5
xa[abs(xa-2.5) < eps] -= 2 * eps
xa[abs(xa+2.5) < eps] += 2 * eps
ya = fhardsigmoid(xa)
grad_xa = fhardsigmoid_grad(xa, np.ones(shape))
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [grad_xa], rtol=rtol, atol=atol, dtype=dtype)
def test_softsign():
def fsoftsign(a):
return np.divide(a, (1.0 + np.abs(a)))
def fsoftsign_grad(a):
return np.divide(1.0, np.square((1.0 + np.abs(a))))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.softsign(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsoftsign(xa)
ya_grad = fsoftsign_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad])
def test_binary_logic():
def _inner_test(forward_gt, logic_sym, x_shape, y_shape, test_scalar=True):
x = mx.symbol.Variable("x")
y = mx.symbol.Variable("y")
z = logic_sym(x, y)
x_npy = np.random.randint(0, 4, size=x_shape).astype(np.float32)
y_npy = np.random.randint(0, 4, size=y_shape).astype(np.float32)
exe = z._simple_bind(ctx=default_context(), x=x_shape, y=y_shape)
mx_out = exe.forward(is_train=True, x=x_npy, y=y_npy)[0]
assert_almost_equal(mx_out, forward_gt(x_npy, y_npy))
exe.backward()
if test_scalar:
z_lscalar = logic_sym(1, y)
z_rscalar = logic_sym(x, 1)
exe_lscalar = z_lscalar._simple_bind(ctx=default_context(), y=y_shape)
exe_rscalar = z_rscalar._simple_bind(ctx=default_context(), x=x_shape)
mx_lscalar_out = exe_lscalar.forward(is_train=True, y=y_npy)[0]
mx_rscalar_out = exe_rscalar.forward(is_train=True, x=x_npy)[0]
assert_almost_equal(mx_lscalar_out, forward_gt(1, y_npy))
assert_almost_equal(mx_rscalar_out, forward_gt(x_npy, 1))
exe_lscalar.backward()
exe_rscalar.backward()
# Test the no-broadcasting binary logic ops + scalar logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: x == y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: x > y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: x >= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: x < y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: x <= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: x != y, x_shape=(10, 10), y_shape=(10, 10))
# Test the broadcasting binary logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: mx.sym.broadcast_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: mx.sym.broadcast_greater(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: mx.sym.broadcast_greater_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: mx.sym.broadcast_not_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
def test_unary_logic():
def reference(a, dtype):
return np.logical_not(a).astype(dtype)
shape = (3, 4)
xa = np.random.randint(-2, 2, size=shape).astype(np.float32)
mx_xa = mx.nd.array(xa)
mx_out = mx.nd.logical_not(mx_xa)
assert_almost_equal(mx_out, reference(xa, dtype=xa.dtype))
x = mx.sym.Variable('x')
y = mx.sym.logical_not(data=x)
exe = y._simple_bind(ctx=default_context(), x=shape)
sym_out = exe.forward(is_train=True, x=mx_xa)[0]
assert_almost_equal(sym_out, reference(xa, dtype=xa.dtype))
def test_embedding():
in_dim = 10
out_dim = 4
batch = 24
data = mx.sym.Variable("data")
embed = mx.sym.Embedding(data=data, input_dim=in_dim, output_dim=out_dim, name="embed")
exe_test = embed._simple_bind(default_context(), grad_req={'data': 'null', 'embed_weight': 'write'}, data=(batch,))
arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays))
grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays))
np_data = np.random.randint(low=0, high=in_dim, size=batch)
np_weight = np.random.uniform(-0.01, 0.01, arg_map["embed_weight"].shape)
np_onehot = np.zeros((batch, in_dim))
np_onehot[np.arange(batch), np_data] = 1.0
# forward
arg_map["data"][:] = np_data
arg_map["embed_weight"][:] = np_weight
exe_test.forward(is_train=True)
# Non-zero atol required, as exposed by seed 781663739
rtol = 1e-5
atol = 1e-5
assert_almost_equal(exe_test.outputs[0], np.dot(np_onehot, np_weight), rtol=rtol, atol=atol)
# backward
np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape)
grad = mx.nd.zeros(np_grad.shape)
grad[:] = np_grad
exe_test.backward([grad])
assert_almost_equal(grad_map["embed_weight"], np.dot(np_onehot.T, np_grad), rtol=rtol, atol=atol)
# check ops handle duplicate input correctly.
def test_binary_op_duplicate_input():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = 5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
out_grad = mx.nd.empty(shape)
out_grad[:] = 1
square = data * data
exe_square = square._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_square.forward(is_train=True)
assert_almost_equal(exe_square.outputs[0], data_tmp * data_tmp)
exe_square.backward(out_grad)
assert_almost_equal(arr_grad, 2.0 * data_tmp)
def test_sign():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.sign(data)
exe_test = test._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.sign(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = 0;
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def test_round_ceil_floor():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5.543
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]= 2
test = mx.sym.round(data) + mx.sym.ceil(data) + mx.sym.floor(data)
exe_test = test._bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.round(data_tmp) + np.ceil(data_tmp) + np.floor(data_tmp)
assert_almost_equal(out, npout)
def test_trunc():
data_tmp = np.random.rand(3, 4) * 10 - 5
arr_data = mx.nd.array(data_tmp)
data = mx.symbol.Variable('data')
test = mx.sym.trunc(data)
exe_test = test._bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
# 'trunc' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
# Repro issue with seed 1660190454
npout = np.trunc(np.float32(data_tmp))
assert_almost_equal(out, npout)
def test_rsqrt_cos_sin():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.rsqrt(data) + mx.sym.cos(data) + mx.sym.sin(data)
exe_test = test._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = 1/ np.sqrt(data_tmp) + np.cos(data_tmp) + np.sin(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * -(1.0 / (2.0 * data_tmp * np.sqrt(data_tmp))) + npout_grad * -1 * np.sin(data_tmp) + npout_grad * np.cos(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def test_maximum_minimum():
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp2 = np.random.rand(3,4)
data_tmp1[:] = 2
data_tmp2[:] = 3
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,data2) + mx.sym.minimum(data1,data2)
exe_test = test._bind(default_context(), args=[arr_data1,arr_data2], args_grad=[arr_grad1,arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.maximum(data_tmp1,data_tmp2) + np.minimum(data_tmp1,data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > data_tmp2).astype('float')
mask2 = (data_tmp1 < data_tmp2).astype('float')
npout_grad1 = npout_grad * mask1 + npout_grad * mask2
npout_grad2 = (npout_grad - npout_grad * mask1) + (npout_grad - npout_grad * mask2)
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
def test_maximum_minimum_scalar():
data1 = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp1[:] = 2
arr_data1 = mx.nd.array(data_tmp1)
arr_grad1 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,3) + mx.sym.maximum(9,data1) + mx.sym.minimum(5,data1) + mx.sym.minimum(data1,4)
exe_test = test._bind(default_context(), args=[arr_data1], args_grad=[arr_grad1])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.maximum(data_tmp1,3) + np.maximum(9,data_tmp1) + np.minimum(5,data_tmp1) + np.minimum(data_tmp1,4)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > 3).astype('float')
mask2 = (9 > data_tmp1).astype('float')
mask3 = (5 < data_tmp1).astype('float')
mask4 = (data_tmp1 < 4).astype('float')
npout_grad1 = npout_grad * mask1 + (npout_grad - npout_grad * mask2) + (npout_grad - npout_grad * mask3) + npout_grad * mask4
assert_almost_equal(arr_grad1, npout_grad1)
def test_abs():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.abs(data)
exe_test = test._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = abs(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * np.sign(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def check_deconvolution_forward_backward(input_shape, num_filter, kernel, stride, pad):
"""configure A: input --> conv --> deconv --> output.
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output, and the same weights between conv
and deconv;
If the input value of forward() and backwrad() is the same, then
the output value of them should also the same;
"""
assert input_shape[1] == num_filter
data = mx.sym.Variable(name="data")
conv = mx.sym.Convolution(
data=data, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
deconv = mx.sym.Deconvolution(
data=conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
input_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
out_grad = input_data
args = {}
args["data"] = input_data
args['conv_weight'] = args['deconv_weight'] = mx.random.normal(0, 1,
(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
args_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = deconv._bind(default_context(), args=args, args_grad=args_grad)
exe.forward(is_train=True)
out = exe.outputs[0]
exe.backward(out_grad)
assert_almost_equal(out, args_grad[0], rtol=1E-3, atol=1e-3)
args_grad_addto_npy = [np.random.normal(size=s) for s in arg_shapes]
args_grad_addto = [mx.nd.array(ele) for ele in args_grad_addto_npy]
exe = deconv._bind(default_context(), args=args, args_grad=args_grad_addto, grad_req="add")
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out + args_grad_addto_npy[0], args_grad_addto[0].asnumpy(), rtol=1e-3, atol=1e-3)
def check_deconvolution_gradient(input_shape, num_filter, pad):
"""configure A: input --> conv --> output.
configure B: input --> deconv --> output
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output;
During backward(), if the input of A equals output of B, and the output
of A equals input of B, then the grad of weight should be the same;
"""
ndim = len(pad)
stride = (1,) * ndim
kernel = tuple(2 * np.array(pad) + 1)
data_conv = mx.sym.Variable(name="data_conv")
conv = mx.sym.Convolution(
data=data_conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
data_deconv = mx.sym.Variable(name="data_deconv")
deconv = mx.sym.Deconvolution(
data=data_deconv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
conv_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
conv_args = {}
conv_args["data_conv"] = conv_data
conv_args['conv_weight'] = \
mx.random.normal(0, 1,(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
conv_args_grad = [mx.nd.zeros(conv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
exe_conv = conv._bind(default_context(), args=conv_args, args_grad=conv_args_grad)
exe_conv.forward(is_train=True)
conv_out_grad = mx.random.normal(0, 2, exe_conv.outputs[0].shape, ctx=mx.cpu()).copyto(default_context())
exe_conv.backward(conv_out_grad)
deconv_data = conv_out_grad
deconv_args = {}
deconv_args['data_deconv'] = deconv_data
deconv_args['deconv_weight'] = conv_args['conv_weight']
deconv_args_grad = [mx.nd.zeros(deconv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad_npy = [np.random.normal(size=deconv_data.shape),
np.random.normal(size=(num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad = [mx.nd.array(deconv_addto_args_grad_npy[0]),
mx.nd.array(deconv_addto_args_grad_npy[1])]
exe_deconv = deconv._bind(default_context(), args=deconv_args, args_grad=deconv_args_grad)
exe_deconv.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1], deconv_args_grad[1], rtol=1e-3, atol=1e-2)
# Test AddTo
exe_deconv_addto = deconv._bind(default_context(), args=deconv_args,
args_grad=deconv_addto_args_grad,
grad_req="add")
exe_deconv_addto.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv_addto.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy() + deconv_addto_args_grad_npy[1],
deconv_addto_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
def check_deconvolution_target_shape(input_shape, kernel, stride, pad, adj, target_shape=None):
data = mx.sym.Variable(name="data")
if target_shape:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5,
target_shape = target_shape)
else:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5)
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
default_target_size = 8
if target_shape is None:
target_shape = (default_target_size,) * len(kernel)
assert out_shapes[0] == (input_shape[0], 5) + target_shape
@pytest.mark.serial
def test_deconvolution():
# 2D
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
target_shape = (8,8),
pad = (99,99), # will be ignored
adj = (101,101), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
pad = (1,1),
adj = (1,1),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5,5),
num_filter = 1,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28,28),
num_filter = 3,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403, 403),
num_filter = 3,
kernel = (7,7),
stride = (5,5),
pad = (2,2)
)
check_deconvolution_gradient(
input_shape = (1,3,5,5),
num_filter = 3,
pad = (1,1)
)
check_deconvolution_gradient(
input_shape = (5,3,100,100),
num_filter = 3,
pad = (3,3)
)
# 1D
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
target_shape = (8,),
pad = (99,), # will be ignored
adj = (101,), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
pad = (1,),
adj = (1,),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5),
num_filter = 1,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28),
num_filter = 3,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403),
num_filter = 3,
kernel = (7,),
stride = (5,),
pad = (2,)
)
check_deconvolution_gradient(
input_shape = (1,3,5),
num_filter = 3,
pad = (1,)
)
check_deconvolution_gradient(
input_shape = (5,3,100),
num_filter = 3,
pad = (3,)
)
def test_deconvolution_forward_with_bias():
"""Check if deconvolution forward can work well with bias=True
"""
def check_deconvolution_forward_with_bias(shape=(1, 16, 5, 5), num_filter=32, num_group=1, kernel=(3, 3), pad=(1, 1)):
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
input_data = mx.random.uniform(-5, 5, shape, ctx=mx.cpu())
y = mx.sym.Deconvolution(data=x, weight=w, num_filter=num_filter, num_group=num_group, kernel=kernel, no_bias=False, pad=pad)
exe = y._simple_bind(ctx=mx.cpu(), x=shape, grad_req='null')
exe.arg_arrays[0][:] = np.random.normal(size=exe.arg_arrays[0].shape)
exe.arg_arrays[1][:] = np.random.normal(size=exe.arg_arrays[1].shape)
exe.forward(is_train=False)
o = exe.outputs[0]
t = o.asnumpy()
check_deconvolution_forward_with_bias((1, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((32, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((1, 16, 5, 5), 32, 1, (3, 3), (1, 1))
check_deconvolution_forward_with_bias((32, 16, 5, 5), 32, 1, (3, 3), (1, 1))
def check_nearest_upsampling_with_shape(shapes, scale, root_scale):
arr = {'arg_%d'%i: mx.random.uniform(-10.0, 10.0, shape, ctx=mx.cpu()).copyto(default_context()) for i, shape in zip(range(len(shapes)), shapes)}
arr_grad = {'arg_%d'%i: mx.nd.zeros(shape) for i, shape in zip(range(len(shapes)), shapes)}
up = mx.sym.UpSampling(*[mx.sym.Variable('arg_%d'%i) for i in range(len(shapes))], sample_type='nearest', scale=root_scale)
exe = up._bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
exe.backward(exe.outputs)
for k in range(len(shapes)):
name = 'arg_%d'%k
assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4)
def check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter):
def _init_bilinear(arr, f):
weight = np.zeros(np.prod(arr.shape), dtype='float32')
shape = arr.shape
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(np.prod(shape)):
x = i % shape[3]
y = (i // shape[3]) % shape[2]
weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
arr[:] = weight.reshape(shape)
return arr
up = mx.sym.UpSampling(mx.sym.Variable("data"),
mx.sym.Variable('weight'), sample_type='bilinear', scale=root_scale,
num_filter=num_filter, num_args=2)
arg_shapes, out_shapes, _ = up.infer_shape(data=data_shape)
arr = {'data': mx.random.uniform(-5, 5, data_shape, ctx=mx.cpu()).copyto(default_context()),
'weight': mx.nd.array(_init_bilinear(mx.ndarray.empty(arg_shapes[1]).asnumpy(), root_scale))}
arr_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = up._bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(exe.outputs)
target_shape = (data_shape[2] * root_scale, data_shape[3] * root_scale)
assert out.shape == data_shape[:2] + target_shape
def test_nearest_upsampling():
for root_scale in [1,2,3]:
for scale in [1,2,3]:
for num_shape in [1,2,3]:
for base in [1,2,3]:
shapes = [(1,3,base*root_scale*scale**(num_shape-1-i),base*root_scale*scale**(num_shape-1-i)) for i in range(num_shape)]
check_nearest_upsampling_with_shape(shapes, scale, root_scale)
def test_bilinear_upsampling():
rootscale = [2,3]
scales = [1,2,3]
filters = [1,2,3]
bases = [1,2,3]
for params in itertools.product(rootscale, scales, filters, bases):
root_scale, scale, num_filter, base = params
# bilinear upsampling takes only 1 data and 1 weight
# multi input mode is not applicable
dimension = base*root_scale*scale
kernel = 2 * root_scale - root_scale % 2
data_shape = (1, num_filter, dimension, dimension)
weight_shape = (1, num_filter, kernel, kernel)
check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter)
def test_batchnorm_training():
def check_batchnorm_training(stype):
for shape in [(2, 3), (2, 3, 2, 2), (2, 8, 2, 2)]:
data_tmp = np.random.normal(-0.1, 0.1, size=shape)
s = shape[1],
gamma = np.ones(s)
beta = np.ones(s)
gamma[1] = 3
beta[0] = 3
rolling_mean = np.random.uniform(size=s)
rolling_std = np.random.uniform(size=s)
data = mx.symbol.Variable('data', stype=stype)
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
mean_std = [mx.nd.array(rolling_mean).tostype(stype), mx.nd.array(rolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
# Test varying channel axis
dim = len(shape)
for chaxis in range(-dim, dim):
chaxis_true = chaxis
if chaxis < 0:
chaxis_true = dim + chaxis
shapex = shape
channel_count = shapex[chaxis_true]
data_tmp = np.random.normal(-0.1, 0.1, size=shapex)
gamma = np.ones(channel_count)
beta = np.ones(channel_count)
if channel_count > 1:
gamma[1] = 3
beta[0] = 3
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
xrolling_mean = np.random.uniform(size=channel_count)
xrolling_std = np.random.uniform(size=channel_count)
xmean_std = [mx.nd.array(xrolling_mean).tostype(stype),
mx.nd.array(xrolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
check_batchnorm_training('default')
@xfail_when_nonstandard_decimal_separator
@pytest.mark.parametrize('op_name', ['BatchNorm', 'SyncBatchNorm'])
@pytest.mark.parametrize('shape', [(4, 2), (4, 3, 4),
(4, 6, 4, 5), (4, 5, 6, 4, 5)])
@pytest.mark.parametrize('fix_gamma', [False, True])
@pytest.mark.parametrize('cudnn_off', [False, True])
@pytest.mark.parametrize('output_mean_var', [False, True])
def test_batchnorm(op_name, shape, fix_gamma, cudnn_off, output_mean_var):
if op_name == 'BatchNorm':
op = mx.nd.BatchNorm
elif op_name == 'SyncBatchNorm':
op = mx.nd.contrib.SyncBatchNorm
else:
raise ValueError(f'Not supported {op_name}')
momentum = 0.9
epsilon = 1e-5
def _test_batchnorm_impl(axis,
data_grad_req, gamma_grad_req, beta_grad_req):
kwargs = dict(output_mean_var=output_mean_var)
if op_name == 'SyncBatchNorm':
if axis != 1:
return
key = str(op) + str(shape) + str(axis)
kwargs.update(dict(key=key))
if cudnn_off:
return
else:
kwargs.update(dict(axis=axis, cudnn_off=cudnn_off))
nch = shape[axis]
if not fix_gamma:
bn_gamma = mx.nd.random.uniform(shape=(nch,))
bn_gamma.attach_grad(grad_req=gamma_grad_req)
else:
bn_gamma = mx.nd.ones(shape=(nch,))
bn_beta = mx.nd.random.uniform(shape=(nch,))
bn_beta.attach_grad(grad_req=beta_grad_req)
bn_running_mean = mx.nd.zeros(nch)
bn_running_var = mx.nd.ones(nch)
running_mean = mx.nd.zeros(nch)
running_var = mx.nd.ones(nch)
num_iters = 10
expand_shape = [1] * len(shape)
expand_shape[axis] = shape[axis]
data = mx.nd.random.uniform(shape=shape)
data.attach_grad(grad_req=data_grad_req)
adX, adW, adb = 0, 0, 0
is_train = data_grad_req != 'null' or \
(not fix_gamma and gamma_grad_req != 'null') or \
beta_grad_req != 'null'
for _ in range(num_iters):
if data_grad_req != 'add':
data = mx.nd.random.uniform(shape=shape)
data.attach_grad(grad_req=data_grad_req)
ograd = mx.nd.random.uniform(shape=shape)
with mx.autograd.record():
output = op(data, bn_gamma, bn_beta,
bn_running_mean, bn_running_var,
momentum=momentum, eps=epsilon,
fix_gamma=fix_gamma, **kwargs)
if output_mean_var:
output, output_mean, output_std = output
if is_train:
output.backward(ograd)
mx.nd.waitall()
data_mean = data.mean(
axis=axis, exclude=True, keepdims=True)
data_var = (data - data_mean).square().mean(axis=axis,
exclude=True,
keepdims=True)
target_output = (data - data_mean) / \
(data_var + epsilon).sqrt() * \
bn_gamma.reshape(expand_shape) + \
bn_beta.reshape(expand_shape)
# squeeze data_mean and data_var
data_mean_flat = data_mean.squeeze()
data_var_flat = data_var.squeeze()
running_mean = running_mean * momentum + \
data_mean_flat * (1 - momentum)
m = np.prod(shape) / shape[axis]
# cudnn uses m-1 in the denominator of its sample variance calculation, not m
sample_var_adjust = 1.0 if cudnn_off or fix_gamma else m / (m-1)
running_var = running_var * momentum + \
data_var_flat * sample_var_adjust * (1 - momentum)
W = bn_gamma.reshape(expand_shape)
dnx = ograd * W
xsm = data - data_mean
nd = 1.0 / mx.nd.sqrt(data_var + epsilon)
nx = xsm * nd
dvar = (dnx * xsm).sum(axis=axis, keepdims=True,
exclude=True) * (-0.5) * mx.nd.power(nd, 3)
dmean = -nd * dnx.sum(axis=axis, keepdims=True, exclude=True) - \
dvar * xsm.mean(axis=axis, keepdims=True,
exclude=True) * 2.0
dX = dnx * nd + dvar * xsm * (2.0 / m) + dmean * (1.0 / m)
dW = (ograd * nx).sum(axis=axis, exclude=True)
db = ograd.sum(axis=axis, exclude=True)
adX = dX if data_grad_req != 'add' else adX + dX
adW = dW if gamma_grad_req != 'add' else adW + dW
adb = db if beta_grad_req != 'add' else adb + db
atol, rtol = 5e-2, 5e-2
if output_mean_var:
assert_almost_equal(output_mean.asnumpy(),
data_mean_flat.asnumpy(),
atol=atol, rtol=rtol)
if op != mx.nd.contrib.SyncBatchNorm:
assert_almost_equal(output_std.asnumpy(),
(1.0 / (data_var_flat +
epsilon).sqrt()).asnumpy(),
atol=atol, rtol=rtol)
else:
assert_almost_equal(output_std.asnumpy(),
data_var_flat.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(output.asnumpy(), target_output.asnumpy(),
atol=atol, rtol=rtol)
if is_train:
assert_almost_equal(bn_running_mean.asnumpy(
), running_mean.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(bn_running_var.asnumpy(
), running_var.asnumpy(), atol=atol, rtol=rtol)
if data_grad_req != 'null':
assert_almost_equal(data.grad.asnumpy(),
adX.asnumpy(), atol=atol, rtol=rtol)
if not fix_gamma:
if gamma_grad_req != 'null':
assert_almost_equal(
bn_gamma.grad.asnumpy(), adW.asnumpy(),
atol=atol, rtol=rtol)
else:
assert((bn_gamma.asnumpy() == 1).all())
if beta_grad_req != 'null':
assert_almost_equal(
bn_beta.grad.asnumpy(), adb.asnumpy(), atol=atol, rtol=rtol)
grad_reqs = ['write'] if len(shape) != 4 else ['null', 'write', 'add']
for data_grad_req in grad_reqs:
for gamma_grad_req in grad_reqs:
if fix_gamma and gamma_grad_req != 'null':
continue
for beta_grad_req in grad_reqs:
for axis in range(len(shape)):
_test_batchnorm_impl(axis,
data_grad_req, gamma_grad_req, beta_grad_req)
def test_groupnorm():
acc_types = {'float16': 'float32', 'float32': 'float64', 'float64': 'float64'}
def x_hat_helper(x, num_groups, eps):
dtype = x.dtype
dshape = x.shape
assert len(dshape) == 4
acc_type = acc_types[str(dtype)]
new_shape = (dshape[0], num_groups, int(dshape[1] / num_groups), dshape[2], dshape[3])
new_moments_shape = (dshape[0], num_groups, 1, 1, 1)
data = x.reshape(new_shape)
mean = np.mean(data, axis=(2, 3, 4), keepdims=False, dtype=acc_type).astype(dtype)
std = np.sqrt(np.var(data, axis=(2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype) + eps)
x_hat = (data - mean.reshape(new_moments_shape)) / std.reshape(new_moments_shape)
return x_hat, mean, std
def np_groupnorm(data, gamma, beta, num_groups, eps):
new_param_shape = (1, dshape[1], 1, 1)
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
out = x_hat.reshape(dshape) * gamma.reshape(new_param_shape) + beta.reshape(new_param_shape)
return out, mean, std
def np_groupnorm_grad(ograd, data, gamma, beta, mean, std, num_groups, eps):
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
new_shape = x_hat.shape
dshape = data.shape
dtype = data.dtype
new_moments_shape = (new_shape[0], num_groups, 1, 1, 1)
new_param_shape = (1, dshape[1], 1, 1)
acc_type = acc_types[str(dtype)]
ograd = ograd.reshape(new_shape)
data = data.reshape(new_shape)
gamma = gamma.reshape(new_param_shape)
beta = beta.reshape(new_param_shape)
mean = mean.reshape(new_moments_shape)
std = std.reshape(new_moments_shape)
beta_grad = np.sum(ograd, axis=(0, 3, 4), dtype=acc_type, keepdims=False).astype(dtype).flatten()
gamma_grad = np.sum(x_hat * ograd, axis=(0, 3, 4), dtype=acc_type, keepdims=False).astype(dtype).flatten()
x_hat_grad = ograd * gamma.reshape(1, num_groups, dshape[1] // num_groups, 1, 1)
ograd_mult = x_hat_grad / std
red_out = np.mean(ograd_mult, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = ograd_mult - red_out
red_out = np.mean(ograd_mult * x_hat, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = data_grad - x_hat * red_out
return data_grad.reshape(dshape), gamma_grad, beta_grad
batch_size = random.randint(1, 8)
num_groups = random.randint(2, 3)
num_channels = random.randint(2, 3) * num_groups
height = random.randint(1, 5)
width = random.randint(1, 5)
dshape = (batch_size, num_channels, height, width)
param_shape = (num_channels,)
temp_shape = (batch_size, num_groups, int(num_channels / num_groups), height, width)
np_data = np.random.uniform(0.2, 1.0, dshape)
np_gamma = np.random.uniform(-1.0, 1.0, param_shape)
np_beta = np.random.uniform(-1.0, 1.0, param_shape)
data_sym = mx.sym.Variable("data")
gamma_sym = mx.sym.Variable("gamma")
beta_sym = mx.sym.Variable("beta")
for dtype in [np.float16, np.float32, np.float64]:
eps = 1e-2 if dtype == np.float16 else 1e-5
mx_data = mx.nd.array(np_data, dtype=dtype)
mx_gamma = mx.nd.array(np_gamma, dtype=dtype)
mx_beta = mx.nd.array(np_beta, dtype=dtype)
np_out, np_mean, np_std = np_groupnorm(np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
num_groups=num_groups,
eps=eps)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=True)
check_symbolic_forward(mx_sym, [mx_data, mx_gamma, mx_beta], [np_out, np_mean, np_std],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-3 if dtype == np.float16 else 1e-4, dtype=dtype)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=False)
np_ograd = np.random.uniform(-1.0, 1.0, dshape).astype(dtype)
np_data_grad, np_gamma_grad, np_beta_grad = np_groupnorm_grad(np_ograd,
np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
np_mean, np_std,
num_groups, eps)
check_symbolic_backward(mx_sym, [mx_data, mx_gamma, mx_beta], [mx.nd.array(np_ograd, dtype=np_ograd.dtype)],
[np_data_grad, np_gamma_grad, np_beta_grad],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-2 if dtype == np.float16 else 1e-4, dtype=dtype)
def test_convolution_grouping():
for dim in [1, 2, 3]:
num_filter = 4
for num_group in [1, 2]:
kernel = (3,) * dim
shape = (1, 4) + (9,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group, kernel=kernel)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel)
for i in range(num_group)])
exe1 = y1._simple_bind(default_context(), x=shape)
exe2 = y2._simple_bind(default_context(), x=shape, w=(num_filter, shape[1]//num_group) + kernel, b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.float32(np.random.normal(size=arr1.shape))
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3)
@pytest.mark.skip(reason="Flaky test https://github.com/apache/incubator-mxnet/issues/14052")
def test_depthwise_convolution():
for dim in [1,2]:
for num_base in [1, 4, 16, 32, 64]:
for kernel_x in [3, 5]:
for stride_x in [1, 2]:
for pad_x in [0, 1]:
for in_size in [7, 32]:
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
num_group = num_base
shape = (2, num_base) + (in_size,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group,
kernel=kernel, stride=stride, pad=pad)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel,
stride=stride, pad=pad)
for i in range(num_group)])
dev = default_context()
exe1 = y1._simple_bind(dev, x=shape)
exe2 = y2._simple_bind(dev, x=shape, w=(num_filter, shape[1]//num_group)+kernel,
b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.random.normal(size=arr1.shape)
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
assert_allclose(arr1, arr2, rtol=1e-3, atol=1e-3)
def test_convolution_independent_gradients():
# NOTE(zixuanweeei): Flaky test tracked by https://github.com/apache/incubator-mxnet/issues/15603.
# GPU context will be enabled after figuring out the possible issue tracked at
# https://github.com/apache/incubator-mxnet/issues/15638.
ctx = mx.cpu()
atol = 1.0e-3
rtol = 1.0e-3
reqs = ["null", "write", "add"]
var_names = ["x", "w", "b"]
dims = [1, 2]
num_bases = [1, 8]
kernel_xs = [3, 5]
stride_xs = [1, 2]
pad_xs = [0, 1]
in_sizes = [7, 32]
no_biases = [True, False]
for dim, num_base, kernel_x, stride_x, pad_x , in_size, no_bias in \
itertools.product(dims, num_bases, kernel_xs, stride_xs, pad_xs, in_sizes, no_biases):
# Prepare params shape
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
x_shape = (2, num_base) + (in_size,) * dim
w_shape = (num_filter, num_base) + kernel
# Symbols definition
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b') if not no_bias else None
conv = mx.sym.Convolution(x, w, b, num_filter=num_filter,
kernel=kernel, stride=stride, pad=pad, no_bias=no_bias)
for req_kind in reqs:
# Binding args for conv with possible dependent gradients
base_args = {
'x': mx.nd.random.normal(shape=x_shape, ctx=ctx),
'w': mx.nd.random.normal(shape=w_shape, ctx=ctx),
'b': mx.nd.random.normal(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
args1 = copy.deepcopy(base_args)
grad1 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req1 = [req_kind] * 3
grad_req1 = dict(zip(var_names, grad_req1))
exe1 = conv._bind(ctx, args1, args_grad=grad1, grad_req=grad_req1)
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
for x_req, w_req, b_req in itertools.product(reqs, repeat=3):
# Binding args for conv with independent gradients
args2 = copy.deepcopy(base_args) # Deepcopy the same params of `exe1`
grad2 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req2 = {"x": x_req, "w": w_req, "b": b_req}
exe2 = conv._bind(ctx, args2, args_grad=grad2, grad_req=grad_req2)
exe2.forward(is_train=True)
np.testing.assert_allclose(exe1.outputs[0].asnumpy(),
exe2.outputs[0].asnumpy(), rtol=rtol, atol=atol)
exe2.backward(exe2.outputs[0])
for var_name in var_names:
if var_name == "b" and no_bias:
continue
if grad_req2[var_name] == "null":
exe2_var_grad = grad2[var_name].asnumpy()
np.testing.assert_allclose(exe2_var_grad,
np.zeros_like(exe2_var_grad), rtol=rtol, atol=atol)
if grad_req2[var_name] != grad_req1[var_name]:
continue
np.testing.assert_allclose(args1[var_name].asnumpy(),
args2[var_name].asnumpy(), rtol=rtol, atol=atol)
np.testing.assert_allclose(grad1[var_name].asnumpy(),
grad2[var_name].asnumpy(), rtol=rtol, atol=atol)
def gen_broadcast_data(idx):
# Manually set test cases
binary_op_data_shape = np.array(
[[[2, 5, 1, 30, 7], [1, 5, 448, 30, 1]],
[[10, 49, 1, 77, 17], [10, 1, 2, 1, 17]],
[[13, 2, 65, 2, 1], [13, 1, 65, 1, 225]],
[[9, 434, 4, 2, 37], [9, 1, 4, 1, 37]],
[[2, 52, 1, 4, 1], [1, 52, 60, 1, 37]],
[[1, 23, 7, 122, 50], [2, 1, 7, 1, 50]],
[[1, 17, 1, 5, 1], [22, 1, 2, 1, 28]],
[[29, 1, 2, 1, 8], [29, 22, 1, 130, 1]],
[[2, 36, 1, 427, 3], [1, 36, 11, 427, 1]],
[[1, 2, 1, 100, 7], [1, 2, 448, 100, 1]],
[[1, 2, 495, 77, 7], [1, 2, 1, 1, 7]],
[[1, 43, 65, 2, 1], [1, 43, 65, 1, 225]],
[[1, 92, 434, 2, 2], [1, 92, 1, 2, 2]],
[[1, 92, 1, 4, 1], [1, 92, 134, 1, 17]],
[[1, 53, 2, 122, 143], [1, 1, 2, 1, 143]],
[[1, 179, 1, 87, 17], [1, 179, 1, 1, 17]],
[[1, 1, 17, 5, 1], [1, 22, 1, 1, 28]],
[[1, 2, 1, 1, 8], [1, 2, 52, 430, 1]],
[[1, 163, 1, 22, 3], [1, 163, 116, 22, 1]],
[[1, 1, 44, 30, 7], [1, 1, 44, 30, 1]],
[[1, 1, 1, 1, 28], [1, 127, 1, 5, 28]],
[[1, 2, 394, 38, 1], [1, 2, 394, 38, 16]],
[[1, 10, 49, 77, 17], [1, 1, 1, 1, 17]],
[[1, 431, 6, 2, 225], [1, 1, 6, 2, 225]],
[[1, 15, 1, 28, 1], [1, 15, 1, 28, 463]],
[[1, 129, 2, 48, 96], [1, 129, 2, 1, 1]],
[[1, 1, 403, 17, 2], [1, 44, 403, 17, 2]],
[[1, 1, 65, 2, 22], [1, 1, 65, 1, 1]],
[[1, 24, 103, 17, 18], [1, 24, 1, 1, 1]],
[[1, 1, 1, 1, 2], [1, 24, 194, 50, 1]],
[[1, 1, 107, 84, 9], [1, 1, 1, 1, 1]]])
if idx < binary_op_data_shape.shape[0]:
l_shape = binary_op_data_shape[idx][0]
r_shape = binary_op_data_shape[idx][1]
else:
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
l_same_dim = np.random.randint(0, 5)
r_same_dim = np.random.randint(0, 5)
l_axis_flags = np.random.randint(0, 2, size=ndim)
r_axis_flags = np.random.randint(0, 2, size=ndim)
if l_same_dim == 4:
l_axis_flags = np.ones(ndim)
if r_same_dim == 4:
r_axis_flags = np.ones(ndim)
l_shape = shape.copy()
r_shape = shape.copy()
l_shape[np.where(l_axis_flags == 0)] = 1
r_shape[np.where(r_axis_flags == 0)] = 1
return [np.random.random(l_shape), np.random.random(r_shape)]
def gen_broadcast_data_int(idx):
d = gen_broadcast_data(idx);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def gen_binary_data(dummy):
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
#print("gen shape {}".format(shape))
return [np.random.random(shape), np.random.random(shape)]
def gen_binary_data_int(dummy):
d = gen_binary_data(dummy);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def check_binary_op_forward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5, mx_nd_func=None):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
y = symbol._bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])})
y.forward(is_train=True)
y = y.outputs[0].asnumpy()
x = baseline(d[0], d[1]).astype(y.dtype)
#np.set_printoptions(precision=20)
a = d[0]
b = d[1]
#print("a: {} {}".format(a.dtype, a))
#print("a: {} {}".format(b.dtype, b))
#print("x: {} {}".format(x.dtype, x))
#print("y: {} {}".format(y.dtype, y))
if mx_nd_func is not None:
d0 = mx.nd.array(d[0], dtype=d[0].dtype)
d1 = mx.nd.array(d[1], dtype=d[1].dtype)
assert_almost_equal(y, mx_nd_func(d0, d1).asnumpy(), rtol=rtol, atol=atol)
idx = np.abs(x-y) > atol+rtol*np.abs(x)
if idx.any():
import binascii
np.set_printoptions(precision=20)
logging.error('found precision problem:')
d[0] = np.broadcast_to(d[0], x.shape)
d[1] = np.broadcast_to(d[1], x.shape)
logging.error('input a: {}'.format(d[0][idx]))
logging.error('input b: {}'.format(d[1][idx]))
logging.error("output x: {} {}".format(x.dtype, x))
logging.error("output y: {} {}".format(y.dtype, y))
def ftohex(xs):
import struct
return list(map(lambda x: binascii.hexlify(struct.pack('d', x)), xs.flatten()))
logging.error('output x in baseline(a, b): {}'.format(x[idx]))
logging.error('output y in symbol(a, b): {}'.format(y[idx]))
logging.error('output x in baseline(a,b) hex: {}'.format(ftohex(x[idx])))
logging.error('output y in symbol(a,b) hex: {}'.format(ftohex(y[idx])))
logging.error('input a hex: {}'.format(ftohex(d[0][idx])))
logging.error('input a hex: {}'.format(ftohex(d[1][idx])))
logging.error('diff: {}'.format(np.abs(x-y)[idx] - atol-rtol*np.abs(x)[idx]))
assert_allclose(y, x, rtol=rtol, atol=atol)
def check_binary_op_backward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
out = np.random.random((d[0] + d[1]).shape)
def reduce_op(shape, x):
if shape == x.shape:
return x
keepdims_shape = list(x.shape)
for i in range(len(shape)):
if x.shape[i] != shape[i]:
keepdims_shape[i] = 1
x = np.sum(x, axis=i).reshape(keepdims_shape)
return x
baseline_grad1, baseline_grad2 = baseline(out, d[0], d[1])
x_1 = reduce_op(d[0].shape, baseline_grad1)
x_2 = reduce_op(d[1].shape, baseline_grad2)
y_1 = mx.nd.empty(d[0].shape)
y_2 = mx.nd.empty(d[1].shape)
y = symbol._bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])},
args_grad=[y_1, y_2])
o = y.forward(is_train=True)
y.backward([mx.nd.array(out, dtype=o[0].dtype)])
assert_allclose(y_1.asnumpy(), x_1, rtol=rtol, atol=atol)
assert_allclose(y_2.asnumpy(), x_2, rtol=rtol, atol=atol)
def test_binary_op():
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = a + b
check_binary_op_forward(c, lambda a, b: a + b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_binary_data)
def test_bminus(a, b):
c = a - b
check_binary_op_forward(c, lambda a, b: a - b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_binary_data)
def test_bmul(a, b):
c = a * b
check_binary_op_forward(c, lambda a, b: a * b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_binary_data)
def test_bdiv(a, b):
c = a / b
check_binary_op_forward(c, lambda a, b: a / b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_binary_data)
def test_bmod(a, b):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
#c = a % b
c = mx.sym.cast(a, dtype='float64') % mx.sym.cast(b, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
check_binary_op_forward(c, lambda a, b: np.float32(a) % np.float32(b), gen_binary_data, rtol=0, atol=0)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.cast(a, dtype='int32') % mx.sym.cast(b, dtype='int32')
check_binary_op_forward(c, lambda a, b: a % b, gen_binary_data_int)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data_int)
def test_bpow(a, b):
c = a ** b
check_binary_op_forward(c, lambda a, b: a ** b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_binary_data)
def test_bneq(a, b):
c = a != b
# '!=' is sensitive to the precision of the comparison. Force numpy to match mxnet's float32.
# Issue exposed with seed 1644387363
check_binary_op_forward(c, lambda a, b: (np.float32(a) != np.float32(b)).astype(a.dtype), gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bneq(a, b)
def test_broadcast_binary_op():
def check_bmaxmin_gradient(test_sym, x, y, delta, rtol, atol):
"""This function ensures that checking the numerical gradient of
broadcast_max/min is not crossing the boundary y=x where there
is no gradient definition at those sigularities."""
x_max = np.max(x)
y = x_max + 2 * delta + np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
x_min = np.min(x)
y = x_min - 2 * delta - np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = mx.sym.broadcast_plus(a, b)
check_binary_op_forward(c, lambda a, b: a + b, gen_broadcast_data, mx_nd_func=mx.nd.add)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_broadcast_data)
def test_bminus(a, b):
c = mx.sym.broadcast_minus(a, b)
check_binary_op_forward(c, lambda a, b: a - b, gen_broadcast_data, mx_nd_func=mx.nd.subtract)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_broadcast_data)
def test_bmul(a, b):
c = mx.sym.broadcast_mul(a, b)
check_binary_op_forward(c, lambda a, b: a * b, gen_broadcast_data, mx_nd_func=mx.nd.multiply)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_broadcast_data)
def test_bdiv(a, b):
c = mx.sym.broadcast_div(a, b)
check_binary_op_forward(c, lambda a, b: a / b, gen_broadcast_data, mx_nd_func=mx.nd.divide)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_broadcast_data)
def test_bmod(a_, b_):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
a = mx.sym.cast(a_, dtype='float64')
b = mx.sym.cast(b_, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
c = mx.sym.broadcast_mod(a, b)
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data, atol=1, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.broadcast_mod(mx.sym.cast(a, dtype='int32'), mx.sym.cast(b, dtype='int32'))
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data_int, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bpow(a, b):
c = mx.sym.broadcast_power(a, b)
check_binary_op_forward(c, lambda a, b: a ** b, gen_broadcast_data, mx_nd_func=mx.nd.power)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_broadcast_data)
def test_bequal(a, b):
c = mx.sym.broadcast_equal(a, b)
check_binary_op_forward(c, lambda a, b: (a == b).astype(a.dtype), gen_broadcast_data_int,
mx_nd_func=mx.nd.equal)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bmax(a, b):
c = mx.sym.broadcast_maximum(a, b)
check_binary_op_forward(c, lambda x, y: np.maximum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.maximum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bmin(a, b):
c = mx.sym.broadcast_minimum(a, b)
check_binary_op_forward(c, lambda x, y: np.minimum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.minimum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_band(a, b):
c = mx.sym.broadcast_logical_and(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_and(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_and)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bor(a, b):
c = mx.sym.broadcast_logical_or(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_or(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_or)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bxor(a, b):
c = mx.sym.broadcast_logical_xor(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_xor(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_xor)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bequal(a, b)
test_bmax(a, b)
test_bmin(a, b)
test_band(a, b)
test_bor(a, b)
test_bxor(a, b)
def test_run_convolution_dilated_impulse_response(dil=(1,1), kernel_shape=(3,3), verbose=False):
dim = len(dil)
assert(len(kernel_shape) == dim)
# Input for spike response
data_size = 33
data_shape = (1, 1) + (data_size,) * dim
center = (0,0) + (data_size // 2,) * dim
spike_imgs = np.zeros(shape=data_shape, dtype=np.float32)
spike_imgs[center] = 1.0
spike_img = mx.nd.array(spike_imgs)
spike_img2 = mx.nd.array(spike_imgs)
kernel_weights = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel_weights2 = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel = mx.symbol.Variable('kernel')
in_img = mx.symbol.Variable('input')
net = mx.symbol.Convolution(in_img, num_filter=1,kernel=kernel_shape, dilate=dil, no_bias="true", name='test_convolution')
net.list_arguments()
be = net._bind(default_context(), args={ 'input' : spike_img, 'test_convolution_weight' : kernel_weights},
args_grad={'input' : spike_img2, 'test_convolution_weight' : kernel_weights2 } )
be.forward(True)
out_o = be.outputs[0].asnumpy()
ndo = be.outputs[0]
out_grads = np.zeros(shape=be.outputs[0].shape, dtype=np.float32)
out_grads[center] = 1.0
out_grad = mx.nd.array(out_grads)
be.backward([out_grad])
vgrad = be.grad_arrays[0].asnumpy()
out = out_o.reshape(out_o.shape[2:])
nz_loc = np.nonzero(out)
assert_allclose(np.sum(out),np.prod(kernel_shape),atol=1e-5)
assert_allclose(np.sum(vgrad),np.prod(kernel_shape),atol=1e-5)
# Now check whether the input gradient was computed correctly
input_grad = mx.nd.array(vgrad)
be = net._bind(default_context(), args={ 'input' : input_grad, 'test_convolution_weight' : kernel_weights})
be.forward(True)
out_o = be.outputs[0].asnumpy()
assert_allclose(out_o[center],np.prod(kernel_shape),atol=1e-5)
rnd_kernel_s = np.random.uniform(low=0.0, high=1.0, size=tuple([1,1]+list(kernel_shape))).astype(np.float32)
impulse_error = mx.nd.array(out_o/np.sum(out_o)) # This should be 1.0 at [0,0,16,16]
rnd_kernel = mx.nd.array(rnd_kernel_s)
rnd_kernel2 = mx.nd.array(rnd_kernel_s)
white_in = mx.nd.ones(shape=data_shape)
white_in2 = mx.nd.ones(shape=data_shape)
be = net._bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : rnd_kernel},
args_grad={'input' : white_in2, 'test_convolution_weight' : rnd_kernel2 } )
be.forward(True)
be.backward([impulse_error])
out_orig = be.outputs[0].asnumpy()
kernel_gradient = be.grad_arrays[1].asnumpy()
dkernel = mx.nd.array(rnd_kernel_s + kernel_gradient)
be = net._bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : dkernel})
be.forward(True)
out = be.outputs[0].asnumpy()
# Now do a simple check of the kernel gradient
assert(out[center] - np.sum(kernel_gradient) - out_orig[center] < 0.001)
def test_convolution_dilated_impulse_response():
# 1D
for dil in [ (1,), (2,), (3,) ]:
for ks in [ (1,), (2,), (3,), (4,)]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 2D
for dil in [ (1,1), (2,2), (3,3) ]:
for ks in [ (3,3), (4,4), (2,3), (3,2), (1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 3D
for dil in [ (1,1,1), (2,2,2), (3,3,3) ]:
for ks in [ (3,3,3), (4,4,4), (2,3,4), (3,2,4), (1,1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
@pytest.mark.serial
@pytest.mark.parametrize('src_shape,shape_args,reverse,dst_shape', [
((2, 3, 5, 5), (0, -1), False, (2, 75)),
((2, 3, 5, 5), (0, 0, -1), False, (2, 3, 25)),
((5, 3, 4, 5), (0, -1, 0), False, (5, 15, 4)),
((2, 3, 5, 4), (-1, 0, 0), False, (8, 3, 5)),
((2, 3, 5, 5), (0, 0, 0, 0), False, (2, 3, 5, 5)),
((2, 4, 5, 3), (-1, 2, 2, 1), False, (30, 2, 2, 1)),
((2, 3, 5, 6), (-2,), False, (2, 3, 5, 6)),
((2, 3, 5, 6), (6, 1, -2), False, (6, 1, 5, 6)),
((2, 3, 5, 6), (-3, -3), False, (6, 30)),
((2, 3, 5, 6), (-3, -1), False, (6, 30)),
((64,), (-4, 16, 4), False, (16, 4)),
((64,), (-4, 16, -1), False, (16, 4)),
((64, 1, 2, 3), (-4, 16, -1, -2), False, (16, 4, 1, 2, 3)),
((2, 3, 5, 5), (0, -1), True, (5, 30)),
((2, 3, 5, 5), (0, 0, -1), True, (3, 5, 10)),
((5, 3, 4, 5), (0, -1, 0), True, (3, 20, 5)),
((2, 3, 5, 4), (-1, 0, 0), True, (6, 5, 4)),
((2, 3, 4, 5), (3, -1, 0), True, (3, 8, 5)),
((2, 3, 5, 5), (5, 3, 0, -1), True, (5, 3, 5, 2)),
((2, 3, 5, 5), (0, 0, 0, 0), True, (2, 3, 5, 5)),
((2, 3, 5, 6), (-2,), True, (2, 3, 5, 6)),
((2, 3, 5, 6), (-2, 1, 30), True, (2, 3, 1, 30)),
((2, 3, 5, 6), (-3, -3), True, (6, 30)),
((64,), (16, 4, -4), True, (16, 4)),
((64,), (16, -1, -4), True, (16, 4)),
((1, 2, 3, 64), (-2, -1, 16, -4), True, (1, 2, 3, 4, 16))
])
def test_reshape_new(src_shape, shape_args, reverse, dst_shape):
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, shape=shape_args, reverse=reverse)
js = net.tojson()
net = mx.sym.fromjson(js)
_, output_shape, __ = net.infer_shape(data=src_shape)
assert output_shape[0] == dst_shape, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
dat_npy = np.random.rand(*src_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net._simple_bind(default_context(), data=src_shape)
exe.arg_dict['data'][:] = dat_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - dat_npy.reshape(dst_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['data'].asnumpy() - grad_npy.reshape(src_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
for i in range(len(src_shape)):
holdout_src_shape = list(src_shape)
holdout_src_shape[i] = 0
holdout_src_shape = tuple(holdout_src_shape)
net = mx.sym.Variable('data')
net = mx.sym.elemwise_add(net.reshape(shape_args, reverse=reverse), mx.sym.ones(shape=dst_shape))
input_shape, output_shape, __ = net.infer_shape(data=holdout_src_shape)
assert output_shape[0] == dst_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
assert input_shape[0] == src_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
def test_reshape_old():
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, target_shape=(2, 0))
js = net.tojson()
net = mx.sym.fromjson(js)
_, output_shape, __ = net.infer_shape(data=(2, 3, 5, 5))
assert(output_shape[0] == (2, 75))
# Test for Flatten
data = mx.sym.Variable("data")
net = mx.sym.Flatten(data)
exe = net._simple_bind(ctx=default_context(), data=(5, 4, 3, 7))
data_npy = np.random.normal(size=(5, 4, 3, 7))
out_grad_npy = np.random.normal(size=(5, 4 * 3 * 7))
outputs = exe.forward(is_train=True, data=data_npy)[0].asnumpy()
assert_allclose(outputs, data_npy.reshape((5, 4 * 3 * 7)))
exe.backward(out_grads=[mx.nd.array(out_grad_npy, ctx=default_context())])
assert_allclose(exe.grad_arrays[0].asnumpy(), out_grad_npy.reshape((5, 4, 3, 7)))
def test_reshape_like():
def test_reshape_like_new(lhs_shape, rhs_shape, lbeg, lend, rbeg, rend, dst_shape):
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs, lhs_begin=lbeg, lhs_end=lend, rhs_begin=rbeg, rhs_end=rend)
js = net.tojson()
net = mx.sym.fromjson(js)
_, output_shape, __ = net.infer_shape(lhs=lhs_shape, rhs=rhs_shape)
assert output_shape[0] == dst_shape, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
lhs_npy = np.random.rand(*lhs_shape)
rhs_npy = np.random.rand(*rhs_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net._simple_bind(default_context(), lhs=lhs_shape, rhs=rhs_shape)
exe.arg_dict['lhs'][:] = lhs_npy
exe.arg_dict['rhs'][:] = rhs_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - lhs_npy.reshape(dst_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['lhs'].asnumpy() - grad_npy.reshape(lhs_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
# Test new api (Using shape)
test_cases = [
[(30,), (15,2,4), 0, None, 0, 2, (15,2)],
[(30,), (15,2,4), None, 1, None, 2, (15,2)],
[(30,7), (15,2,4), 0, 1, 0, 2, (15,2,7)],
[(3,5), (1,15,4), 0, 2, 1, 2, (15,)],
[(3,5), (1,15,4), 0, None, 1, -1, (15,)],
[(30,12), (4,2,2,3), -1, None, 1, None, (30,2,2,3)],
[(1,1,7,3,1,1), (81,1,1,21), 1, -1, 1, None, (1,1,1,21,1)]
]
# for test_case in test_cases:
for test_case in test_cases:
test_reshape_like_new(*test_case)
# Test old api
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs)
js = net.tojson()
net = mx.sym.fromjson(js)
_, output_shape, __ = net.infer_shape(lhs=(40, 30), rhs=(30,20,2))
assert(output_shape[0] == (30,20,2))
def test_reduce():
sample_num = 500
def test_reduce_inner(numpy_reduce_func, numpy_reduce_grad_func, mx_reduce_sym, nan_prob=0,
test_exclude=True, test_none_axis=False):
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
# Insert a NaN with probability equal to nan_prob
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
axis_num = np.random.randint(0, ndim, size=1)
axis_flags = np.random.randint(0, 2, size=ndim)
if test_exclude:
exclude = np.random.randint(0, 2)
else:
exclude = False
axes = []
for (axis, flag) in enumerate(axis_flags):
if flag:
axes.append(axis)
if 0 == len(axes):
axes = None
elif 1 == len(axes):
axes = axes[0]
else:
axes = tuple(axes)
keepdims = np.random.randint(0, 2)
a = mx.symbol.Variable('a')
if axes is None:
if test_none_axis:
b = mx_reduce_sym(a, keepdims=keepdims, axis=axes)
else:
b = mx_reduce_sym(a, keepdims=keepdims)
elif exclude and isinstance(axes, tuple) and len(axes) < ndim:
naxes = [i for i in range(ndim) if i not in axes]
b = mx_reduce_sym(a, axis=naxes, keepdims=keepdims, exclude=True)
else:
b = mx_reduce_sym(a, axis=axes, keepdims=keepdims)
dat_npy = np.random.rand(*shape)
# Test with both negative and positive values (randomly). Avoid having both in the same
# test, which can be problematic for error checking due to near-zero values.
if np.random.rand() > 0.5:
dat_npy = -dat_npy
if nan_prob > 0:
dat_npy[np.random.rand(*shape) < nan_prob] = np.nan
sum_groundtruth = np.array(numpy_reduce_func(dat_npy, axis=axes, keepdims=keepdims))
if sum_groundtruth.shape == ():
sum_groundtruth = np.array([sum_groundtruth])
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.array(np.random.rand(*sum_groundtruth.shape))
keepdim_shape = np_reduce(dat_npy, axes, 1, np.sum).shape
grad_groundtruth = numpy_reduce_grad_func(outgrad=outgrad_npy, data=dat_npy,
outdata=sum_groundtruth,
axis=axes, keepdims=keepdims,
keepdim_shape=keepdim_shape)
net = b._bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
# check forward
assert_almost_equal_ignore_nan(net.outputs[0].asnumpy(), sum_groundtruth, rtol=1e-4, atol=1e-4)
net.backward(out_grads=mx.nd.array(outgrad_npy))
bc_grad_groundtruth = np.broadcast_to(grad_groundtruth, grad_nd.shape)
# check backward
assert_almost_equal_ignore_nan(grad_nd.asnumpy(), bc_grad_groundtruth, rtol=1e-4, atol=1e-4)
test_none_axis = [True, False]
for test_none in test_none_axis:
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.sum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape),
mx.symbol.sum, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.mean),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape)/(data.size/outdata.size),
mx.symbol.mean, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.prod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (outdata.reshape(keepdim_shape) / data),
mx.symbol.prod, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nansum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape)),
mx.symbol.nansum, 0.3, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nanprod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape) *
(outdata.reshape(keepdim_shape) / data)),
mx.symbol.nanprod, 0.3, test_none_axis=test_none)
# grad of max and min are sensitive to the precision of the calculation.
# Force numpy to match mxnet's float32.
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.max),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.max)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.min),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.min)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.linalg.norm),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (data / outdata.reshape(keepdim_shape)),
mx.symbol.norm, test_exclude=False, test_none_axis=test_none)
def test_broadcast():
sample_num = 200
for _ in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
target_shape = np.random.randint(1, 6, size=(ndim,))
axis = tuple(set(np.random.randint(0, ndim, np.random.randint(1, ndim + 1))))
shape = target_shape.copy()
size = tuple([shape[ele] for ele in axis])
for ele in axis:
shape[ele] = 1
target_shape_with_zero = list(target_shape)
for idx in range(len(target_shape_with_zero)):
if idx not in axis:
target_shape_with_zero[idx] = 0
break
a = mx.symbol.Variable('a')
sym_bcast_axis = mx.symbol.broadcast_axis(a, axis=axis, size=size)
sym_bcast_to = mx.symbol.broadcast_to(a, shape=tuple(target_shape))
sym_bcast_to_with_zero = mx.symbol.broadcast_to(a, shape=tuple(target_shape_with_zero))
sym_bcast_like = mx.symbol.broadcast_like(a, sym_bcast_to)
def test_broadcasting_ele(sym_bcast):
dat_npy = np.random.rand(*shape)
groundtruth = dat_npy
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.random.rand(*target_shape)
grad_groundtruth = np_reduce(outgrad_npy, axis=axis, keepdims=True,
numpy_reduce_func=np.sum)
net = sym_bcast._bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
assert (net.outputs[0].shape == target_shape).all()
assert_almost_equal(net.outputs[0], groundtruth, rtol=1e-4)
net.backward(out_grads=mx.nd.array(outgrad_npy))
assert_almost_equal(grad_nd, grad_groundtruth, rtol=1e-4)
test_broadcasting_ele(sym_bcast_axis)
test_broadcasting_ele(sym_bcast_to)
test_broadcasting_ele(sym_bcast_to_with_zero)
test_broadcasting_ele(sym_bcast_like)
def test_transpose():
for ndim in range(1, 10):
for _ in range(5):
dims = list(np.random.randint(1, 5, size=ndim))
axes = list(range(ndim))
random.shuffle(axes)
axes = tuple(axes)
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
@pytest.mark.serial
def test_pseudo2dtranspose():
def getTwoInts(mn, mx):
n1 = np.random.randint(mn, mx)
n2 = np.random.randint(mn, mx-1)
n2 = n2 if n2 < n1 else n2+1
return tuple(np.sort([n1, n2]))
def getTranspAxes(ndim):
axes = list(range(ndim))
n1, n2 = getTwoInts(0,ndim)
return tuple(axes[:n1]+axes[n2:]+axes[n1:n2])
for ndim in range(2, 7):
for dt in ['int8', 'half', 'int32', 'int64']:
for _ in range(5):
dims = list(np.random.randint(5, 20, size=ndim))
axes = getTranspAxes(ndim)
x = mx.nd.array(np.random.normal(size=dims), dtype=dt)
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
@pytest.mark.serial
def test_big_transpose():
n = [1]
d = list(np.random.randint(132, 160, size=1))
hw = list(np.random.randint(256, 320, size=2))
c = [10]
dims = n + d + hw + c
axes = (0,4,1,2,3)
x_np = np.random.normal(size=dims).astype('uint8')
x = mx.nd.array(x_np, dtype='uint8')
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x_np, axes=axes), y.asnumpy().astype('uint8'))
axes = (0,2,3,4,1)
z = mx.nd.transpose(y, axes=axes)
assert_allclose(x_np, z.asnumpy().astype('uint8'))
@pytest.mark.serial
def test_larger_transpose():
x = mx.nd.random.normal(shape=(50,51))
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
def test_expand_dims():
for ndim in range(1, 6):
for axis in range(-ndim + 1, ndim):
x = np.random.normal(size=list(np.random.randint(1, 10, size=ndim)))
y = mx.nd.array(x)
x1 = np.expand_dims(x, axis=axis)
y1 = mx.nd.expand_dims(y, axis=axis)
assert_allclose(x1, y1.asnumpy())
assert_allclose(x1.shape, y1.shape)
def test_crop():
for ndim in range(1, 6):
for _ in range(5):
dims = []
begin = []
end = []
idx = []
for _ in range(ndim):
d = random.randint(1, 5)
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if b == 0 and random.randint(0, 1):
b = None
elif b != 0 and random.randint(0, 1):
b -= d
if e == d and random.randint(0, 1):
e = None
elif e != d and random.randint(0, 1):
e -= d
dims.append(d)
begin.append(b)
end.append(e)
idx.append(slice(b, e))
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.crop(x, begin=tuple(begin), end=tuple(end))
assert_allclose(x.asnumpy()[idx], y.asnumpy())
vx = mx.sym.Variable('x')
vy = mx.sym.crop(vx, begin=tuple(begin), end=tuple(end))
check_numeric_gradient(vy, [x.asnumpy()])
def test_slice_axis():
for ndim in range(1, 6):
shape = np.random.randint(1, 11, size=(ndim,))
for t in range(ndim):
d = shape[t]
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if np.random.rand() > 0.6:
e = None
else:
if e < d and np.random.rand() > 0.5:
e = e - d
if np.random.rand() > 0.5:
b = b - d
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
idx[t] = slice(b, e)
X = mx.symbol.Variable('X')
x = mx.nd.array(np.random.normal(size=shape))
Y = mx.symbol.slice_axis(data=X, axis=t, begin=b, end=e)
xgrad = mx.nd.empty(x.shape)
exec1 = Y._bind(default_context(), args = [x], args_grad = {'X': xgrad})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
x_grad_npy = np.random.normal(size=x.shape)
xgrad = mx.nd.array(x_grad_npy)
exec2 = Y._bind(default_context(), args=[x], args_grad={'X': xgrad}, grad_req="add")
exec2.forward(is_train=True)
exec2.backward([exec2.outputs[0]])
xx = np.zeros(shape=x.shape, dtype=np.float32)
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx + x_grad_npy, xgrad.asnumpy(), atol=1E-5)
def test_slice_like():
for ndim in range(1, 6):
from_shape = np.random.randint(1, 11, size=(ndim,))
shape = [s + np.random.randint(0, 3) for s in from_shape]
for t in range(ndim):
if t > 0:
axes = np.random.randint(0, ndim, size=t).tolist()
else:
axes = []
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
if i in axes or not axes:
idx[i] = slice(0, from_shape[i])
if axes:
pos = np.random.randint(0, t)
if axes[pos] > 0:
axes[pos] -= ndim # negative index
X = mx.symbol.Variable('X')
X_1 = mx.symbol.Variable('X1')
x = mx.nd.array(np.random.normal(size=shape))
x1 = mx.nd.array(np.random.normal(size=from_shape))
Y = mx.symbol.slice_like(data=X, shape_like=X_1, axes=axes)
xgrad = mx.nd.empty(x.shape)
xgrad1 = mx.nd.empty(x1.shape)
exec1 = Y._bind(default_context(), args = [x, x1],
args_grad = {'X': xgrad, 'X1': xgrad1})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
assert_allclose(xgrad1.asnumpy(), mx.nd.zeros_like(xgrad1).asnumpy())
def test_slice_like_different_types():
x = [[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.]]
y = [[ 0., 0., 0.],
[ 0., 0., 0.]]
x = mx.nd.array(x)
y = mx.nd.array(y).astype('int32')
z = mx.nd.slice_like(x, y)
assert_allclose(z.asnumpy(), [[1,2,3],[5,6,7]])
def test_reshape_like_different_types():
x = mx.nd.zeros((2, 3))
y = mx.nd.array([[1, 2], [3, 4], [5, 6]])
y = mx.nd.array(y).astype('int32')
z = mx.nd.reshape_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0],[0,0]])
def test_broadcast_like_different_types():
x = mx.nd.zeros((2, 1))
y = mx.nd.ones((2, 2))
y = mx.nd.array(y).astype('int32')
z = mx.nd.broadcast_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0]])
assert x.dtype == z.dtype
def test_flip():
for ndim in range(1, 6):
for _ in range(5):
dims = [random.randint(1,10) for i in range(ndim)]
axis = random.randint(0, ndim-1)
idx = [slice(None, None, -1) if i == axis else slice(None, None) for i in range(ndim)]
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.flip(x, axis=axis)
assert_allclose(x.asnumpy()[idx], y.asnumpy())
def test_stn():
import sys
np.set_printoptions(threshold=sys.maxsize)
num_filter = 2 # conv of loc net
kernel = (3, 3) # conv of loc net
num_hidden = 6 # fc of loc net
for n in [1, 2, 3, 4]:
for c in [1, 2, 3, 4]:
for h in [5, 9, 13, 17]: # for convenience test, this third and forth input dim should be 4x + 1
for w in [5, 9, 13, 17]:
data_shape = (n, c, h, w)
target_shape = (int((data_shape[2]+1)/2), int((data_shape[3]+1)/2))
data = mx.sym.Variable(name="data")
loc = mx.sym.Convolution(data=data, kernel=kernel, pad=(1, 1), num_filter=num_filter, name="loc_conv")
loc = mx.sym.Flatten(data=loc)
loc = mx.sym.FullyConnected(data=loc, num_hidden=num_hidden, name="loc_fc")
stn = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=target_shape,
transform_type="affine", sampler_type="bilinear")
arg_names = stn.list_arguments()
arg_shapes, out_shapes, _ = stn.infer_shape(data=data_shape)
# check shape
assert out_shapes[0] == (data_shape[0], data_shape[1], target_shape[0], target_shape[1])
dev = default_context()
#dev = mx.gpu(0)
args = {}
args['data'] = mx.random.normal(0, 1, data_shape, ctx=mx.cpu()).copyto(dev)
args['loc_conv_weight'] = mx.nd.zeros((num_filter, data_shape[1], kernel[0], kernel[1]), ctx=dev)
args['loc_conv_bias'] = mx.nd.zeros((num_filter,), ctx=dev)
args['loc_fc_weight'] = mx.nd.zeros((6, num_filter*data_shape[2]*data_shape[3]), ctx=dev)
args['loc_fc_bias'] = mx.nd.array([0.5, 0, 0, 0, 0.5, 0], ctx=dev)
grad_grad = [mx.nd.zeros(shape, ctx=dev) for shape in arg_shapes]
exe = stn._bind(dev, args=args, args_grad=grad_grad)
exe.forward(is_train=True)
out = exe.outputs[0]
# check forward
assert_almost_equal(out, args['data'].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
out_grad = mx.nd.ones(out.shape, ctx=dev)
exe.backward([out_grad])
# check backward
assert_almost_equal(out_grad, grad_grad[0].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
def test_stn_valid_sampling():
target_shape = (
28,
28,
)
src_shape = (
42,
42,
)
data = mx.sym.Variable(name="data")
loc = mx.sym.Variable(name="loc")
data_array = np.zeros((
1,
1,
) + src_shape)
# Have an ever so slight rotation.
loc_array = np.array(
[[9.03887e-05, 1.00015, 0.00174931, 1.0003, 0.000311901,
-0.000919065]])
stn = mx.sym.SpatialTransformer(
data=data,
loc=loc,
target_shape=target_shape,
transform_type="affine",
sampler_type="bilinear")
grad_req = {k: 'write' for k in stn.list_arguments()}
grads = {
'data': mx.nd.array(np.zeros_like(data_array)),
'loc': mx.nd.array(np.zeros_like(loc_array))
}
executor = stn._bind(
ctx=default_context(),
args={'data': mx.nd.array(data_array),
'loc': mx.nd.array(loc_array)},
grad_req=grad_req,
args_grad=grads)
executor.forward(is_train=True)
executor.backward(mx.nd.ones((
1,
1,
) + target_shape))
def test_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
ndims = [2]
if ctx.device_type == 'gpu':
dtypes += ['float16']
ndims += [1]
# Test normal dot.
for ndim in ndims:
for data_type in dtypes:
tol = 1e-2 if data_type == 'float16' else 1e-3
for m in range(1, 5):
for k in range(1, 5):
if ndim == 1 and k != 1:
pass
for n in range(1, 5):
a_shape = (m, k) if ndim == 2 else (m,)
b_shape = (k, n) if ndim == 2 else (n,)
a_npy = np.random.normal(0, 1, (m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((m, k), dtype=data_type)
bgrad_npy = np.empty((k, n), dtype=data_type)
c_npy[:, :] = np.dot(a_npy[:, :], b_npy[:, :])
bgrad_npy[:, :] = np.dot(a_npy[:, :].T, ograd_npy[:, :])
agrad_npy[:, :] = np.dot(ograd_npy[:, :], b_npy[:, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.dot(a, b)
exe = c._simple_bind(ctx=ctx, a=a_npy.shape, b=b_npy.shape)
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0], c_npy, rtol=tol, atol=tol)
exe.backward(out_grads=[mx.nd.array(ograd_npy, mx.cpu()).astype(data_type)])
assert_almost_equal(exe.grad_dict['a'], agrad_npy, rtol=tol, atol=tol)
assert_almost_equal(exe.grad_dict['b'], bgrad_npy, rtol=tol, atol=tol)
# Test dot with transpose flag using gradient checker.
def dot_sym(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y)
def dot_sym_xT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True)
def dot_sym_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_b=True)
def dot_sym_xT_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True, transpose_b=True)
for data_type in dtypes:
for ashape, bshape in [((3, 4), (4, 5)), ((2, 3, 4), (4, 5, 6))]:
m1_npy = np.random.uniform(-1, 1, ashape)
m1_npy = m1_npy.astype(data_type)
m2_npy = np.random.uniform(-1, 1, bshape)
m2_npy = m2_npy.astype(data_type)
check_numeric_gradient(dot_sym(data_type), [m1_npy, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT(data_type), [m1_npy.T, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_yT(data_type), [m1_npy, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT_yT(data_type), [m1_npy.T, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
def test_batch_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
if ctx.device_type == 'gpu':
dtypes += ['float16']
for data_type in dtypes:
for batch_size in range(1, 5):
for m in range(1, 5):
for k in range(1, 5):
for n in range(1, 5):
transpose_a = (np.random.rand() > 0.5)
transpose_b = (np.random.rand() > 0.5)
a_npy = np.random.normal(0, 1, (batch_size, m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (batch_size, k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((batch_size, m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (batch_size, m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((batch_size, m, k), dtype=data_type)
bgrad_npy = np.empty((batch_size, k, n), dtype=data_type)
a_init_grad_npy = np.random.normal(size=(batch_size, m, k))
a_init_grad_npy = a_init_grad_npy.astype(data_type)
b_init_grad_npy = np.random.normal(size=(batch_size, k, n))
b_init_grad_npy = b_init_grad_npy.astype(data_type)
for i in range(batch_size):
c_npy[i, :, :] = np.dot(a_npy[i, :, :], b_npy[i, :, :])
bgrad_npy[i, :, :] = np.dot(a_npy[i, :, :].T, ograd_npy[i, :, :])
agrad_npy[i, :, :] = np.dot(ograd_npy[i, :, :], b_npy[i, :, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.batch_dot(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
if transpose_a:
a_npy = np.transpose(a_npy, axes=(0, 2, 1))
agrad_npy = np.transpose(agrad_npy, axes=(0, 2, 1))
a_init_grad_npy = np.transpose(a_init_grad_npy, axes=(0, 2, 1))
if transpose_b:
b_npy = np.transpose(b_npy, axes=(0, 2, 1))
bgrad_npy = np.transpose(bgrad_npy, axes=(0, 2, 1))
b_init_grad_npy = np.transpose(b_init_grad_npy, axes=(0, 2, 1))
exe = c._simple_bind(ctx=ctx,
a=a_npy.shape, b=b_npy.shape, grad_req='write')
exe_add = c._simple_bind(ctx=ctx,
a=a_npy.shape, b=b_npy.shape, grad_req='add')
exe_add.grad_dict['a'][:] = a_init_grad_npy
exe_add.grad_dict['b'][:] = b_init_grad_npy
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0], c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe.backward(out_grads=[mx.nd.array(ograd_npy, dtype=outputs[0].dtype, ctx=exe._ctx)])
assert_almost_equal(exe.grad_dict['a'], agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe.grad_dict['b'], bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe_add.forward(is_train=True, a=a_npy, b=b_npy)
exe_add.backward(out_grads=[mx.nd.array(ograd_npy, dtype=exe_add.outputs[0].dtype, ctx=exe._ctx)])
assert_almost_equal(exe_add.grad_dict['a'],
agrad_npy + a_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe_add.grad_dict['b'],
bgrad_npy + b_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
def get_correlation(data1,data2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply):
img1 = mx.sym.Variable('img1')
img2 = mx.sym.Variable('img2')
return mx.sym.Correlation(data1=img1,data2=img2,kernel_size =kernel_size,max_displacement = max_displacement,
stride1 = stride1,stride2 = stride2,pad_size= pad_size,is_multiply = is_multiply)
def correlation_forward(data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp2 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp1[:, :, pad_size:pad_size + data1.shape[2], pad_size:pad_size + data1.shape[3]] = data1[:,:,:,:]
tmp2[:, :, pad_size:pad_size + data2.shape[2], pad_size:pad_size + data2.shape[3]] = data2[:,:,:,:]
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
out[nbatch, top_channel, i, j] += tmp1[nbatch, channel,y1 + h, x1 + w] * tmp2[nbatch, channel, y2 + h,x2 + w]
else:
out[nbatch, top_channel, i, j] += abs(tmp1[nbatch, channel, y1 + h, x1 + w] - tmp2[nbatch, channel, y2 + h, x2 + w])
out /= float(kernel_size**2*data1.shape[1])
return out,tmp1,tmp2
def correlation_backward(out_grad,tmp1,tmp2,data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1_grad = np.zeros(tmp1.shape)
tmp2_grad = np.zeros(tmp2.shape)
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*tmp2[nbatch, channel, y2 + h,x2 + w]
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*tmp1[nbatch, channel, y1 + h,x1 + w]
else:
sgn = 1 if (tmp1[nbatch, channel, y1 + h,x1 + w]>=tmp2[nbatch, channel, y2 + h,x2 + w]) else -1
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*sgn
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*(-sgn)
tmp1_grad = tmp1_grad / float(kernel_size**2*data1.shape[1])
tmp2_grad = tmp2_grad / float(kernel_size**2*data1.shape[1])
return tmp1_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],tmp2_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],
def unittest_correlation(data_shape,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply,dtype):
img1 = np.random.random(data_shape)
img1 = img1.astype(dtype)
img2 = np.random.random(data_shape)
img2 = img2.astype(dtype)
net1 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply)
net2 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply )
exe1 = net1._simple_bind(default_context(),img1=img1.shape,img2=img1.shape)
exe1.arg_dict['img1'][:] = img1
exe1.arg_dict['img2'][:] = img2
#cpu forward
exe1.forward(is_train=True)
# python forward
forward_result,tmp1,tmp2 = correlation_forward(img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# forward error
assert_almost_equal(exe1.outputs[0], forward_result, rtol=1e-4, atol=1e-4)
# out_grad
a = np.ones(forward_result.shape)
out_grad1 = mx.nd.array(a,default_context())
# cpu backward
exe1.backward(out_grads=out_grad1)
# python backward
grad1,grad2 = correlation_backward(a,tmp1,tmp2,img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# backward error
assert_almost_equal(exe1.grad_dict['img1'], grad1, rtol=1e-3, atol=1e-4)
assert_almost_equal(exe1.grad_dict['img2'], grad2, rtol=1e-3, atol=1e-4)
def test_correlation():
def test_infer_type(dtype):
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
corr = mx.sym.Correlation(data1=a, data2=b)
arg_type1, out_type1, _ = corr.infer_type(a=dtype)
if arg_type1[0] != np.dtype(dtype) and arg_type1[1] != np.dtype(dtype) and out_type1[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from a is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
arg_type2, out_type2, _ = corr.infer_type(b=dtype)
if arg_type2[0] != np.dtype(dtype) and arg_type2[1] != np.dtype(dtype) and out_type2[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from b is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
for dtype in ['float16', 'float32']:
test_infer_type(dtype)
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 1,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 10,stride1 = 1,stride2 = 2,pad_size = 10,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,6,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,11,11), kernel_size = 5,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
with pytest.raises(MXNetError):
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 0,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype)
with pytest.raises(MXNetError):
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 0,pad_size = 5,is_multiply = False, dtype = dtype)
with pytest.raises(MXNetError):
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 0,pad_size = 5,is_multiply = True, dtype = dtype)
with pytest.raises(MXNetError):
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 0,stride2 = 1,pad_size = 4,is_multiply = True, dtype = dtype)
# Seed set because the test is not robust enough to operate on random data
@pytest.mark.seed(1234)
def test_roipooling():
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.ROIPooling(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1)
x1 = np.random.rand(4, 3, 12, 8).astype('float32')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2], [1, 3.1, 1.1, 5.2, 10.2], [0, 3, 3, 3, 3]], dtype='float32')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'write', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'add', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1E-4)
def check_pad_with_shape(shape, xpu, pad_width, mode, dtype="float64"):
# bind with label
X = mx.symbol.Variable('X', dtype=dtype)
Y = mx.symbol.Pad(data=X, mode=mode, pad_width=pad_width)
x = mx.random.uniform(-1, 1, shape, ctx=mx.cpu(), dtype=dtype).copyto(xpu)
# numpy result
pad_grouped = list(zip(*[iter(list(pad_width))] * 2))
np_out = np.pad(x.asnumpy(), pad_grouped, mode)
# mxnet result
grad = mx.nd.empty(shape, ctx = xpu, dtype=dtype)
exec1 = Y._bind(xpu, args = [x], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0]
# compare numpy + mxnet
assert_almost_equal(out, np_out)
# grad check
check_numeric_gradient(Y, [x.asnumpy()], numeric_eps=1e-2, rtol=1e-2)
def test_pad():
ctx = default_context()
shape1 = (2, 3, 3, 5)
pad1 = (0, 0, 0, 0, 1, 2, 3, 4)
shape2 = (2, 3, 3, 5, 4)
pad2 = (0, 0, 0, 0, 1, 2, 3, 4, 3, 1)
# note: this op doesn't support ints yet. Add tests when supported
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
check_pad_with_shape(shape1, ctx, pad1, 'constant', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'edge', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'constant', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'edge', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'reflect', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'reflect', dtype)
def np_instance_norm(data, weight, bias, eps):
spatial_dims = data.shape[2::]
num_spatial_vals = np.prod(np.array(spatial_dims))
scale = 1/float(num_spatial_vals)
sum_axis = tuple(range(2, data.ndim))
mean = scale * np.sum(data, axis = sum_axis)
mean = np.reshape(np.repeat(mean, num_spatial_vals), data.shape)
var = scale * np.sum((data - mean)**2, axis = sum_axis)
var = np.reshape(np.repeat(var, num_spatial_vals), data.shape)
weightBatch = np.tile(weight, (data.shape[0], 1))
weightBatch = np.reshape(np.repeat(weightBatch, num_spatial_vals), data.shape)
biasBatch = np.tile(bias, (data.shape[0], 1))
biasBatch = np.reshape(np.repeat(biasBatch, num_spatial_vals), data.shape)
return weightBatch * (data - mean)/np.sqrt(var + eps) + biasBatch
def check_instance_norm_with_shape(shape, xpu):
# bind with label
eps = 0.001
X = mx.symbol.Variable('X')
G = mx.symbol.Variable('G')
B = mx.symbol.Variable('B')
Y = mx.symbol.InstanceNorm(data=X, beta=B, gamma=G, eps=eps)
x = mx.random.normal(0, 1, shape, ctx=mx.cpu()).copyto(xpu)
gamma = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
beta = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
np_out = np_instance_norm(x.asnumpy(), gamma.asnumpy(), beta.asnumpy(), eps)
exec1 = Y._bind(xpu, args = {'X':x, 'G':gamma, 'B':beta})
exec1.forward(is_train=False)
out = exec1.outputs[0]
assert_almost_equal(out, np_out, rtol=1e-4, atol=1e-4)
check_numeric_gradient(Y, {'X':x.asnumpy(), 'G':gamma.asnumpy(), 'B':beta.asnumpy()},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
def test_instance_normalization():
check_instance_norm_with_shape((1, 1, 1), default_context())
check_instance_norm_with_shape((2, 1, 2), default_context())
check_instance_norm_with_shape((2,4,5,6), default_context())
check_instance_norm_with_shape((3,3,2,3,2,1,1), default_context())
def check_l2_normalization(in_shape, mode, dtype, norm_eps=1e-10):
ctx = default_context()
data = mx.symbol.Variable('data')
out = mx.symbol.L2Normalization(data=data, mode=mode, eps=norm_eps)
in_data = np.random.uniform(-1, 1, in_shape).astype(dtype)
# calculate numpy results
if mode == 'channel':
assert in_data.ndim > 2
np_norm = np.linalg.norm(in_data, axis=1) + norm_eps
np_norm = np.repeat(1. / np.expand_dims(np_norm, axis=1), in_data.shape[1], axis=1)
np_out = np.multiply(in_data, np_norm)
elif mode == 'spatial':
assert in_data.ndim > 2
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], s[1], -1)), axis=2) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0] / s[1], axis=2)
np_out = np.multiply(in_data, np_norm.reshape(s))
elif mode == 'instance':
assert in_data.ndim > 1
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], -1)), axis=1) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0], axis=1)
np_out = np.multiply(in_data, np_norm.reshape(s))
else:
raise RuntimeError('Unknown l2 normalization mode')
exe = out._simple_bind(ctx=ctx, data=in_data.shape)
output = exe.forward(is_train=True, data=in_data)
# compare numpy + mxnet
assert_almost_equal(exe.outputs[0], np_out, rtol=1e-2 if dtype is 'float16' else 1e-5, atol=1e-5)
# check gradient
check_numeric_gradient(out, [in_data], numeric_eps=1e-3, rtol=1e-2, atol=5e-3)
def test_l2_normalization():
for dtype in ['float16', 'float32', 'float64']:
for mode in ['channel', 'spatial', 'instance']:
nbatch = random.randint(1, 4)
nchannel = random.randint(3, 5)
height = random.randint(4, 6)
check_l2_normalization((nbatch, nchannel, height), mode, dtype)
width = random.randint(5, 7)
check_l2_normalization((nbatch, nchannel, height, width), mode, dtype)
def check_layer_normalization(in_shape, axis, eps, dtype=np.float32,
forward_check_eps=1E-3, backward_check_eps=1E-3,
npy_grad_check=True, finite_grad_check=True):
def npy_layer_norm(data, gamma, beta, axis=1, eps=1E-5):
if axis < 0:
axis += data.ndim
broadcast_shape = [1 for _ in range(data.ndim)]
broadcast_shape[axis] = data.shape[axis]
mean = data.mean(axis=axis, keepdims=True).astype(dtype)
var = data.var(axis=axis, keepdims=True).astype(dtype)
std = np.sqrt(var + dtype(eps)).astype(dtype)
out = np.reshape(gamma, broadcast_shape) * (data - mean) / std + \
np.reshape(beta, broadcast_shape)
return out
def npy_layer_norm_grad(data, gamma, out_grad, axis, eps):
if axis < 0:
axis += data.ndim
exclude_axis = tuple([ele for ele in range(data.ndim) if ele != axis])
data_mean = data.mean(axis=axis, keepdims=True)
data_var = data.var(axis=axis, keepdims=True)
data_std = np.sqrt(data_var + eps)
centered_data = (data - data_mean) / data_std
gamma_grad = (centered_data * out_grad).sum(axis=exclude_axis, keepdims=True)
beta_grad = out_grad.sum(axis=exclude_axis, keepdims=True)
w = out_grad * gamma.reshape([1 if i != axis else data.shape[axis] for i in range(data.ndim)])\
/ data_std
data_grad = w - w.mean(axis=axis, keepdims=True)\
- centered_data * (w * centered_data).mean(axis=axis, keepdims=True)
gamma_grad = gamma_grad.reshape((-1,))
beta_grad = beta_grad.reshape((-1,))
return data_grad, gamma_grad, beta_grad
ctx = default_context()
data = np.random.normal(0, 1, in_shape).astype(dtype)
gamma = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
beta = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
data_s = mx.symbol.Variable('data')
gamma_s = mx.symbol.Variable('gamma')
beta_s = mx.symbol.Variable('beta')
out_s = mx.symbol.LayerNorm(data=data_s, gamma=gamma_s, beta=beta_s, axis=axis, eps=eps)
exe = out_s._simple_bind(ctx, data=in_shape)
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
out_nd = exe.forward()[0]
out = npy_layer_norm(data, gamma, beta, axis, eps)
assert_almost_equal(out, out_nd, forward_check_eps, forward_check_eps)
if finite_grad_check:
for req in ['write', 'add']:
check_numeric_gradient(out_s, {'data': data, 'gamma': gamma, 'beta': beta},
grad_nodes={'data': req, 'gamma': req, 'beta': req},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
if npy_grad_check:
# Test for grad_req = write
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
exe = out_s._simple_bind(ctx, data=in_shape, grad_req='write')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad =\
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(), gt_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(), gt_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(), gt_beta_grad, backward_check_eps, backward_check_eps)
# Test for grad_req = add
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_data_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_gamma_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
init_beta_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
exe = out_s._simple_bind(ctx, data=in_shape, grad_req='add')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.grad_dict['data'][:] = init_data_grad
exe.grad_dict['gamma'][:] = init_gamma_grad
exe.grad_dict['beta'][:] = init_beta_grad
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad = \
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(),
gt_data_grad + init_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(),
gt_gamma_grad + init_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(),
gt_beta_grad + init_beta_grad, backward_check_eps, backward_check_eps)
def test_norm():
try:
import scipy
assert LooseVersion(scipy.__version__) >= LooseVersion('0.1')
from scipy.linalg import norm as sp_norm
except (AssertionError, ImportError):
print("Could not import scipy.linalg.norm or scipy is too old. "
"Falling back to numpy.linalg.norm which is not numerically stable.")
from numpy.linalg import norm as sp_norm
def l1norm(input_data, axis=0, keepdims=True):
return np.sum(abs(input_data), axis=axis, keepdims=keepdims)
def l2norm(input_data, axis=0, keepdims=True):
return sp_norm(input_data, axis=axis, keepdims=keepdims)
ctx = default_context()
data = mx.symbol.Variable('data')
in_data_dim = random_sample([2,3,4], 1)[0]
in_shape = rand_shape_nd(in_data_dim, dim=5)
epsilon = 1e-3
acc_type = {np.float16: np.float32, np.float32: np.float32, np.float64: np.float64,
np.int32: np.int32, np.int64: np.int64}
dtype_to_str = {np.float16: 'float16', np.float32: 'float32', np.float64: 'float64',
np.int32: 'int32', np.int64: 'int64'}
for enforce_safe_acc in ['1', '0']:
with environment('MXNET_SAFE_ACCUMULATION', enforce_safe_acc):
for order in [1, 2]:
for dtype in [np.float16, np.float32, np.float64]:
for i in range(in_data_dim):
for out_dtype in ['float32', 'float64']:
backward_dtype = np.float32 if out_dtype == 'float32' else np.float64
accumulation_type = acc_type[dtype]
if enforce_safe_acc == "0":
backward_dtype = dtype
out_dtype = dtype_to_str[dtype]
accumulation_type = dtype
skip_backward = 'int' in out_dtype
in_data = np.random.uniform(-1, 1, in_shape).astype(accumulation_type)
in_data[abs(in_data) < epsilon] = 2 * epsilon
norm_sym = mx.symbol.norm(data=data, ord=order, axis=i, out_dtype=out_dtype, keepdims=True)
npy_out = l1norm(in_data, i) if order is 1 else l2norm(in_data, i)
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data.astype(dtype)], [npy_out.astype(out_dtype)],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=1e-4 if dtype == np.float16 else 1e-5, ctx=ctx, dtype=dtype)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data.astype(dtype)],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward], rtol=1e-3, atol=1e-5, ctx=ctx,
dtype=backward_dtype)
# Disable numeric gradient https://github.com/apache/incubator-mxnet/issues/11509
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
if i < in_data_dim-1:
norm_sym = mx.symbol.norm(data=data, ord=order, axis=(i, i+1), keepdims=True)
npy_out = l1norm(in_data, (i, i+1)) if order is 1 else l2norm(in_data, (i, i+1))
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data], [npy_out.astype(dtype)],
rtol=1e-2 if dtype is np.float16 else 1e-3,
atol=1e-4 if dtype is np.float16 else 1e-5, ctx=ctx)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward.astype(out_dtype)],
rtol=1e-3, atol=1e-5, ctx=ctx, dtype=backward_dtype)
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
@pytest.mark.parametrize('enforce_safe_acc', ['1', '0'])
@pytest.mark.parametrize('dtype,forward_check_eps,backward_check_eps,in_shape_l,finite_grad_check_l', [
(np.float16, 1E-2, 1E-2, [(10, 6, 5), (10, 10)], [True, True]),
(np.float32, 1E-3, 1E-3, [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False]),
(np.float64, 1E-4, 1E-4, [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False])
])
def test_layer_norm(enforce_safe_acc, dtype, forward_check_eps, backward_check_eps,
in_shape_l, finite_grad_check_l):
with environment('MXNET_SAFE_ACCUMULATION', enforce_safe_acc):
for in_shape, finite_grad_check in zip(in_shape_l, finite_grad_check_l):
for axis in range(-len(in_shape), len(in_shape)):
for eps in [1E-2, 1E-3]:
if dtype == np.float16:
npy_grad_check = False
else:
npy_grad_check = True
check_layer_normalization(in_shape, axis, eps, dtype=dtype,
forward_check_eps=forward_check_eps,
backward_check_eps=backward_check_eps,
npy_grad_check=npy_grad_check,
finite_grad_check=finite_grad_check)
# Numpy Implementation of Sequence Ops
def sequence_last_numpy(array, lengths, axis):
# create new array of dims [batch, seqlen, ...]
array2 = np.moveaxis(array, axis, 1)
dims = array2.shape
if lengths is None:
return array2[:, -1]
lengths = list(lengths)
return np.array([array2[i, int(lengths[i]) - 1] for i in range(dims[0])])
def sequence_mask_numpy(array, lengths, axis, value):
if lengths is None:
return array
arrayMask = array.copy()
# conform to [batch, seqlen, ...]
arrayMask = np.moveaxis(arrayMask, axis, 1)
shape = arrayMask.shape
lengths = list(lengths)
for i in range(shape[0]):
arrayMask[i, int(lengths[i]):] = value
return np.moveaxis(arrayMask, 1, axis)
def sequence_reverse_numpy(array, lengths, axis):
rarray = array.copy()
# conform to [batch, seqlen, ...]
rarray = np.moveaxis(rarray, axis, 1)
shape = rarray.shape
if lengths is None:
lengths = [shape[1]] * shape[0]
lengths = list(lengths)
for i in range(shape[0]):
j = int(lengths[i])
rarray[i,:j] = rarray[i,:j][::-1]
return np.moveaxis(rarray, 1, axis)
def check_sequence_func(ftype, mask_value=0, axis=0):
# bind with label
xpu = default_context()
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L') # lengths
shapes = [(3, 4), (1, 1), (3, 4, 3, 1, 1)]
for seqlenQ in [True, False]:
for ary_dtype in [np.float32]:
for idx_dtype in [np.int32, np.float32]:
for s in shapes:
x = mx.random.uniform(-1, 1, s, ctx=mx.cpu()).astype(ary_dtype).copyto(xpu)
batch = s[1] if (axis == 0) else s[0]
seqlen = s[axis]
l_np = np.random.randint(1, seqlen + 1, batch)
l = mx.nd.array(l_np, ctx=mx.cpu(), dtype=idx_dtype).copyto(xpu)
if not seqlenQ:
l_np = None
args = {'data':X, 'use_sequence_length':seqlenQ, "axis":axis}
if seqlenQ:
args['sequence_length'] = L
if ftype == "last":
Y = mx.symbol.SequenceLast(**args)
np_out = sequence_last_numpy(x.asnumpy(), l_np, axis)
elif ftype == "mask":
args['value'] = mask_value
Y = mx.symbol.SequenceMask(**args)
np_out = sequence_mask_numpy(x.asnumpy(), l_np, axis, mask_value)
elif ftype == "reverse":
Y = mx.symbol.SequenceReverse(**args)
np_out = sequence_reverse_numpy(x.asnumpy(), l_np, axis)
fargs = [x, l] if seqlenQ else [x]
gargs = [x.asnumpy(), l_np] if seqlenQ else [x.asnumpy()]
check_symbolic_forward(Y, fargs, [np_out], dtype="asnumpy")
check_numeric_gradient(Y, gargs, grad_nodes={'X':'write'},
numeric_eps=1e-2, rtol=1e-2)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'add'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'null'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
@pytest.mark.skip(reason="Flaky test: https://github.com/apache/incubator-mxnet/issues/11395")
def test_sequence_last():
check_sequence_func("last", axis=0)
check_sequence_func("last", axis=1)
def test_sequence_mask():
check_sequence_func("mask", axis = 0, mask_value=-2.3)
check_sequence_func("mask", axis = 1, mask_value=0.3)
def check_sequence_reverse(xpu):
# sample data
arr = np.array(
[[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr1 = np.array(
[[[ 13., 14., 15.],
[ 16., 17., 18.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]]])
arr2 = np.array(
[[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr3 = np.array(
[[[ 7., 8., 9.],
[ 16., 17., 18.]],
[[ 1., 2., 3.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 4., 5., 6.]]])
# test for matrix case
seq_len_1 = [1, 2, 2]
arr_4 = np.array([[7., 8., 9.], [16., 17., 5.4]], dtype=np.float32)
arr_5 = np.array([[7., 17., 5.4], [16., 8., 9.]], dtype=np.float32)
def test_wrapper(arr, xpu, sequence_length=None, use_sequence_length=False):
# MxNet symbol creation
seq = mx.sym.Variable('seq')
if sequence_length and use_sequence_length:
seq_len = mx.sym.Variable('seq_len')
else:
# ensure that both are disabled, not just one
seq_len=None
use_sequence_length=False
rev = mx.sym.SequenceReverse(data=seq, sequence_length=seq_len, use_sequence_length=use_sequence_length)
# MxNet symbol execution
if sequence_length:
bound = rev._bind(xpu, {'seq': mx.nd.array(arr), 'seq_len': mx.nd.array(sequence_length)})
else:
bound = rev._bind(xpu, {'seq': mx.nd.array(arr)})
fwd = bound.forward()
return fwd[0].asnumpy()
# test cases
assert_array_equal(test_wrapper(arr, xpu, use_sequence_length=False), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[3, 3], use_sequence_length=True), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 2], use_sequence_length=True), arr2)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 3], use_sequence_length=True), arr3)
assert_array_equal(test_wrapper(arr_4, xpu, sequence_length=seq_len_1, use_sequence_length=True), arr_5)
def test_sequence_reverse():
check_sequence_func("reverse", axis=0)
check_sequence_reverse(mx.cpu())
def mathematical_core_binary(name,
forward_mxnet_call,
forward_numpy_call,
backward_numpy_call1,
backward_numpy_call2,
data1_init=2.,
data2_init=3.,
grad_init=2.):
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3, 4)
data_tmp2 = np.random.rand(3, 4)
data_tmp1[:] = data1_init
data_tmp2[:] = data2_init
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = forward_mxnet_call(data1, data2)
exe_test = test._bind(default_context(), args=[arr_data1, arr_data2], args_grad=[arr_grad1, arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp1, data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = grad_init
npout_grad1 = npout_grad * backward_numpy_call1(data_tmp1, data_tmp2)
npout_grad2 = npout_grad * backward_numpy_call2(data_tmp1, data_tmp2)
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
def mathematical_core(name, forward_mxnet_call, forward_numpy_call, backward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
test = forward_mxnet_call(data)
exe_test = test._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
npout_grad = out_grad.asnumpy()
temp = backward_numpy_call(data_tmp)
npout_grad = npout_grad * temp
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
# erf
mathematical_core("erf", lambda x: mx.sym.erf(x), lambda x: scipy_special.erf(x),
lambda x: 2.0 / math.sqrt(math.pi) * np.exp(-(x ** 2)), 0.5, 0.5)
# erfinv
mathematical_core("erfinv", lambda x: mx.sym.erfinv(x), lambda x: scipy_special.erfinv(x),
lambda x: 0.5 * math.sqrt(math.pi) * np.exp(scipy_special.erfinv(x) ** 2), 0.5, 0.5)
def rounding(name, forward_mxnet_call, forward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
test = forward_mxnet_call(data)
exe_test = test._bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
def test_mathematical():
# rsqrt
mathematical_core("rsqrt",
lambda x: mx.sym.rsqrt(x),
lambda x: 1 / np.sqrt(x),
lambda x: -(1.0 / (2.0 * x * np.sqrt(x))))
# tan
mathematical_core("tan", lambda x: mx.sym.tan(x), lambda x: np.tan(x), lambda x: np.tan(x) ** 2 + 1)
# arcsin
mathematical_core("arcsin", lambda x: mx.sym.arcsin(x), lambda x: np.arcsin(x),
lambda x: 1. / (1. - x ** 2) ** (1. / 2.), 0.5, 0.5)
# arccos
mathematical_core("arccos", lambda x: mx.sym.arccos(x), lambda x: np.arccos(x),
lambda x: -1. / (1. - x ** 2.) ** (1. / 2.), 0.5, 0.5)
# arctan
mathematical_core("arctan", lambda x: mx.sym.arctan(x), lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.), 0.5, 0.5)
# hypot
mathematical_core_binary("hypot",
lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
0.5, 0.5, 0.5)
# hypot scalar
mathematical_core("hypot scalar",
lambda x: mx.sym.hypot(x, 3),
lambda x: np.hypot(x, 3),
lambda x: x / np.hypot(x, 3),
0.5, 0.5)
# degrees
mathematical_core("degrees",
lambda x: mx.sym.degrees(x),
lambda x: np.degrees(x),
lambda x: 180./np.pi,
0.5, 0.5)
# radians
mathematical_core("radians",
lambda x: mx.sym.radians(x),
lambda x: np.radians(x),
lambda x: np.pi / 180.,
0.6, 1)
# sinh
mathematical_core("sinh", lambda x: mx.sym.sinh(x), lambda x: np.sinh(x), lambda x: np.cosh(x))
# cosh
mathematical_core("cosh", lambda x: mx.sym.cosh(x), lambda x: np.cosh(x), lambda x: np.sinh(x), 5, 5)
# tanh
mathematical_core("tanh", lambda x: mx.sym.tanh(x), lambda x: np.tanh(x), lambda x: 1. - np.tanh(x) ** 2, 0.5, 1)
# arcsinh
mathematical_core("arcsinh", lambda x: mx.sym.arcsinh(x), lambda x: np.arcsinh(x),
lambda x: 1./(x**2 + 1.)**(1./2.))
# arccosh
mathematical_core("arccosh", lambda x: mx.sym.arccosh(x), lambda x: np.arccosh(x),
lambda x: 1./(x**2 - 1.)**(1./2.))
# arctanh
mathematical_core("arctanh", lambda x: mx.sym.arctanh(x), lambda x: np.arctanh(x),
lambda x: -1./(x**2 - 1.), 0.5)
# log1p
mathematical_core("log1p", lambda x: mx.sym.log1p(x), lambda x: np.log1p(x),
lambda x: 1. / (1.0 + x), 0.5, 0.5)
# expm1
mathematical_core("expm1", lambda x: mx.sym.expm1(x), lambda x: np.expm1(x),
lambda x: np.exp(x), 0.5, 0.5)
# log10
mathematical_core("log10", lambda x: mx.sym.log10(x), lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)))
# log2
mathematical_core("log2", lambda x: mx.sym.log2(x), lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)))
# rint
rounding("rint", lambda x: mx.sym.rint(x), lambda x: np.rint(x))
# fix
rounding("fix", lambda x: mx.sym.fix(x), lambda x: np.fix(x))
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
def test_clip():
data = mx.symbol.Variable('data')
shape = (30, 30)
data_tmp = np.random.uniform(-1, 1, shape).astype('float32')
test = mx.sym.clip(data, a_max=0.6, a_min=-0.6)
check_symbolic_forward(test, [data_tmp], [np.clip(data_tmp, -0.6, 0.6)])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)],
[np.where(data_tmp <= 0.6, [1], [0]) * np.where(data_tmp >= -0.6, [1], [0])])
def test_init():
def test_basic_val_init(sym_func, np_func, shape, dtype):
x = sym_func(shape=shape, dtype=dtype)
exe = x._bind(default_context(), args=[], args_grad=[])
exe.forward(is_train=True)
assert_almost_equal(exe.outputs[0], np_func(shape=shape, dtype=dtype))
assert exe.outputs[0].asnumpy().dtype == dtype
def test_arange():
# General Random Tests
dtype_list = [np.float32, np.float64, np.int32, np.uint8]
config_list = [(10,),
(0, 10),
(5, 100, 4),
(50, -50, -2),
(-100, 100, 1),
(1.3, 456.6, 1.3)]
for dtype in dtype_list:
for config in config_list:
repeats = random.choice([1, 3])
np_out = np.repeat(np.arange(*config, dtype=dtype), repeats)
nd_out = mx.nd.arange(*config, repeat=repeats, dtype=dtype)
assert_almost_equal(np_out, nd_out)
def test_arange_inferstop():
s = mx.sym.arange(start=0, stop=None, infer_range=True)
s = mx.sym.elemwise_add(s, mx.sym.zeros(shape=[5]))
exe = s._bind(ctx=mx.cpu(), args={})
exe.forward()
assert_almost_equal(exe.outputs[0], np.array([0,1,2,3,4]))
def test_arange_like():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
axis_list = [0, -1]
for sh in shape_list:
for axis in axis_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0, axis=axis)
np_out = np.arange(start=0, stop=sh[axis])
assert_almost_equal(nd_out.asnumpy(), np_out)
def test_arange_like_without_axis():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
for sh in shape_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0)
np_out = np.arange(start=0, stop=val.size)
assert_almost_equal(nd_out.asnumpy(), np_out.reshape(sh))
test_basic_val_init(mx.sym.zeros, np.zeros, (3, 4), np.float32)
test_basic_val_init(mx.sym.ones, np.ones, 3, np.int32)
test_basic_val_init(mx.sym.ones, np.ones, (2, 2, 3), np.float16)
test_arange()
test_arange_inferstop()
test_arange_like()
test_arange_like_without_axis()
def test_order():
ctx = default_context()
def gt_topk(dat, axis, ret_typ, k, is_ascend):
if ret_typ == "indices":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
elif ret_typ == "value":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(np.sort(dat, axis=axis), axis=axis, indices=indices, mode='wrap')
else:
assert dat.shape == (5, 5, 5, 5)
assert axis is None or axis == 1
ret = np.zeros(dat.shape)
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
gt_argsort = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
if axis is None:
ret.ravel()[gt_argsort] = 1
else:
for i in range(5):
for j in range(5):
for k in range(5):
ret[i, gt_argsort[i, :, j, k], j, k] = 1
return ret
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(np.float32)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
def get_large_matrix():
data = np.array([np.arange(300096).astype(np.float32)])
data = np.repeat(data, 100, axis=0)
np.apply_along_axis(np.random.shuffle, 1, data)
return data
large_matrix_npy = get_large_matrix()
for axis in [1, 3, None]:
for is_ascend in [True, False]:
b = mx.sym.sort(a, axis=axis, is_ascend=is_ascend)
if axis is None:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=a_npy.size, is_ascend=is_ascend)
else:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=5, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, rtol=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5,
is_ascend=is_ascend)])
b = mx.sym.argsort(a, axis=1, is_ascend=False)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=5,
is_ascend=False)])
b = mx.sym.argmax(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=False)])
b = mx.sym.argmin(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=True)])
for dtype in [np.float16, np.float32, np.float64]:
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(dtype)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
for axis in [1, 3, None]:
K = [1, 3, 5, 7] if axis is None else [1, 3, 5]
for k in K:
for is_ascend in [True, False]:
b = mx.sym.topk(a, axis=axis, is_ascend=is_ascend, ret_typ="value", k=k)
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=k, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, rtol=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5, is_ascend=is_ascend)])
b = mx.sym.topk(a, axis=3, is_ascend=is_ascend, ret_typ="indices", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 3))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=3, ret_typ="indices", k=3,
is_ascend=False)])
b = mx.sym.topk(a, axis=1, is_ascend=True, ret_typ="mask", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="mask", k=3,
is_ascend=True)])
def test_blockgrad():
a = mx.sym.Variable('a')
b = mx.sym.BlockGrad(a)
exe = b._simple_bind(ctx=default_context(), a=(10, 10))
a_npy = np.random.rand(10, 10)
exe.forward(is_train=True, a=a_npy)
assert_almost_equal(exe.outputs[0], a_npy)
exe.backward() # No error if BlockGrad works
def test_take_autograd_req():
row_len = 2
col_len = 8
shape = (row_len, col_len)
sc = mx.nd.random.uniform(-1.0, 1.0, shape=shape, dtype="float32")
sc.attach_grad()
i = mx.nd.array([0], dtype="int64")
j = mx.nd.array([0], dtype="int64")
with mx.autograd.record(train_mode=True):
xs = []
for _ in range(row_len):
x_i = []
for _ in range(col_len):
x_ij = sc.take(i).squeeze(axis=0).take(j).squeeze(axis=0)
x_i.append(x_ij)
j = j + 1
i = i + 1
j = j - col_len # reset j
xs.append(mx.nd.stack(*x_i))
x = mx.nd.stack(*xs)
x = x.sum()
x.backward()
assert_almost_equal(np.ones(sc.grad.shape), sc.grad)
@pytest.mark.parametrize('mode,out_of_range', [
('clip', True),
('wrap', True),
('raise', False)
])
@pytest.mark.parametrize('data_ndim', range(1, 5))
@pytest.mark.parametrize('idx_ndim', range(1, 4))
def test_take(mode, out_of_range, data_ndim, idx_ndim):
def grad_helper(grad_in, axis, idx):
if axis == 0:
if axis == len(grad_in.shape) - 1:
grad_in[idx] += 1.0
else:
grad_in[idx, :] += 1.0
elif axis == 1:
if axis == len(grad_in.shape) - 1:
grad_in[:, idx] += 1.0
else:
grad_in[:, idx, :] += 1.0
elif axis == 2:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, idx] += 1.0
else:
grad_in[:, :, idx, :] += 1.0
elif axis == 3:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, :, idx] += 1.0
else:
grad_in[:, :, :, idx, :] += 1.0
elif axis == 4:
grad_in[:, :, :, :, idx] += 1.0
else:
raise ValueError("axis %d is not supported..." % axis)
for axis in range(-data_ndim, data_ndim):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=1, high=5), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=1, high=5), )
data = mx.sym.Variable('a')
idx = mx.sym.Variable('indices')
idx = mx.sym.BlockGrad(idx)
result = mx.sym.take(a=data, indices=idx, axis=axis, mode=mode)
exe = result._simple_bind(default_context(), a=data_shape,
indices=idx_shape)
data_real = np.random.normal(size=data_shape).astype('float32')
if out_of_range:
idx_real = np.random.randint(low=-data_shape[axis], high=data_shape[axis], size=idx_shape)
if mode == 'raise':
idx_real[idx_real == 0] = 1
idx_real *= data_shape[axis]
else:
idx_real = np.random.randint(low=0, high=data_shape[axis], size=idx_shape)
if axis < 0:
axis += len(data_shape)
grad_out = np.ones((data_shape[0:axis] if axis > 0 else ()) + idx_shape + (data_shape[axis+1:] if axis < len(data_shape) - 1 else ()), dtype='float32')
grad_in = np.zeros(data_shape, dtype='float32')
exe.arg_dict['a'][:] = mx.nd.array(data_real)
exe.arg_dict['indices'][:] = mx.nd.array(idx_real)
exe.forward(is_train=True)
if out_of_range and mode == 'raise':
try:
mx_out = exe.outputs[0].asnumpy()
except MXNetError as e:
return
else:
# Did not raise exception
assert False, "did not raise %s" % MXNetError.__name__
assert_almost_equal(exe.outputs[0], np.take(data_real, idx_real, axis=axis, mode=mode))
for i in np.nditer(idx_real):
if mode == 'clip':
i = np.clip(i, 0, data_shape[axis])
grad_helper(grad_in, axis, i)
exe.backward([mx.nd.array(grad_out)])
assert_almost_equal(exe.grad_dict['a'], grad_in)
def test_grid_generator():
# transform_type = affine
test_case = [(20,21),(4,3),(6,12),(15,17)]
for target_shape in test_case:
affine_matrix = mx.sym.Variable('affine')
grid = mx.sym.GridGenerator(data=affine_matrix,transform_type='affine', target_shape=target_shape)
exe = grid._simple_bind(ctx=default_context(), affine=(1,6), grad_req='write')
# check forward
exe.arg_dict['affine'][:] = np.array([[1.0,0,0,0,1.0,0]])
exe.forward(is_train=True)
output = exe.outputs[0]
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0]), np.arange(target_shape[1]))
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
tmp = np.zeros((3,target_shape[0]*target_shape[1]))
tmp[0] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) % target_shape[1]) * (2.0 / (target_shape[1]-1))
tmp[1] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) // target_shape[1]) * (2.0 / (target_shape[0]-1))
tmp[2] = 1
grad_est = np.dot(out_grad[0].reshape(2,target_shape[0]*target_shape[1]),tmp.T).reshape(1,6)
assert_almost_equal(exe.grad_dict['affine'], grad_est)
# check addto
exe = grid._simple_bind(ctx=default_context(), affine=(1,6), grad_req='add')
grid_grad_npy = np.random.normal(size=exe.grad_dict['affine'].shape)
exe.grad_dict['affine'][:] = grid_grad_npy
exe.arg_dict['affine'][:] = np.array([[1.0, 0, 0, 0, 1.0, 0]])
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['affine'], grad_est + grid_grad_npy)
# transform_type = warp
test_case = [(12,21),(4,3),(6,12)]
for target_shape in test_case:
flow = mx.sym.Variable('flow')
grid = mx.sym.GridGenerator(data=flow,transform_type='warp', target_shape=target_shape)
exe = grid._simple_bind(ctx=default_context(), flow=(1,2)+target_shape, grad_req='write')
# check forward
exe.arg_dict['flow'][:] = np.ones((1,2)+target_shape)
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0])+1, np.arange(target_shape[1])+1)
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
grad_est = np.zeros((1,2)+target_shape)
grad_est[0,0] = out_grad[0,0] / ((target_shape[1]-1.0) / 2.0)
grad_est[0,1] = out_grad[0,1] / ((target_shape[0]-1.0) / 2.0)
assert_almost_equal(exe.grad_dict['flow'], grad_est, rtol=1e-3)
# check addto
exe_add = grid._simple_bind(ctx=default_context(), flow=(1, 2) + target_shape, grad_req='add')
flow_grad_npy = np.random.normal(size=exe_add.grad_dict['flow'].shape)
exe_add.arg_dict['flow'][:] = np.ones((1, 2) + target_shape)
exe_add.grad_dict['flow'][:] = flow_grad_npy
exe_add.forward(is_train=True)
exe_add.backward(mx.nd.array(out_grad))
assert_almost_equal(exe_add.grad_dict['flow'], grad_est + flow_grad_npy, rtol=1e-3, atol=1e-5)
def test_index2d():
for _ in range(30):
n = np.random.randint(1, 100)
m = np.random.randint(1, 500)
data = mx.random.uniform(-1, 1, shape=(n, m), ctx=default_context())
x = mx.nd.array(np.random.randint(0, m, size=n), ctx=default_context(), dtype='int32')
r = mx.nd.batch_take(data, x)
assert_almost_equal(r, data.asnumpy()[np.arange(n), x.asnumpy()])
def test_cast():
for srctype in [np.int32, np.float32, np.float16]:
for dsttype in [np.float32, np.int32, np.float16]:
x = mx.sym.Variable('x', dtype=srctype)
y = mx.sym.Cast(x, dtype=dsttype)
exe = y._simple_bind(ctx=default_context(), x=(10, 10))
assert exe.arg_arrays[0].dtype == srctype
X = np.random.uniform(-10, 10, size=(10, 10))
exe.arg_arrays[0][:] = X
exe.forward(is_train=True)
assert exe.outputs[0].dtype == dsttype
exe.backward(mx.nd.array(X, dtype=dsttype, ctx=default_context()))
assert_almost_equal(exe.outputs[0], X.astype(srctype).astype(dsttype), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_arrays[0], X.astype(dsttype).astype(srctype), rtol=1e-3, atol=1e-5)
def get_cast_op_data():
FP16_FRACTION_BITS = 10
FP32_FRACTION_BITS = 23
FP32_EXP_MIN = -126
FP32_EXP_MAX = 127
# generate test cases in the vicinity of representable float16 mantissas
# and mid-way between them, but over the full range of float32 exponents.
for sign_bit in [0, 1]:
for exponent in range(FP32_EXP_MIN - FP32_FRACTION_BITS - 1, FP32_EXP_MAX + 2):
denominator = 2**(FP16_FRACTION_BITS + 1)
for numerator in range(0, denominator):
fraction = numerator / float(denominator)
for y in [-1.0, 0.0, 1.0]:
small_delta = y / 2**FP32_FRACTION_BITS
val = (-1.0)**sign_bit * 2.0**exponent * (1.0 + fraction + small_delta)
yield val
# Add np.nan as a final data value to process
yield np.nan
# Test requires all platforms to round float32->float16 with same round-to-nearest-even policy.
def test_cast_float32_to_float16():
input_np = np.array(list(get_cast_op_data())).astype(np.float32)
# The intermediate cast to np.float64 below gets around a numpy rounding bug that is fixed
# as of numpy 1.17 by PR https://github.com/numpy/numpy/pull/12722
expected_output = input_np.astype(np.float64).astype(np.float16)
def check_cast(op, input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float32)
sym = op(x, dtype=np.float16)
ctx = default_context()
exe = sym._bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float32, ctx=ctx)})
assert exe.arg_arrays[0].dtype == np.float32
exe.forward(is_train=True)
assert exe.outputs[0].dtype == np.float16
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
check_cast(mx.sym.Cast, input_np, expected_output)
check_cast(mx.sym.amp_cast, input_np, expected_output)
def test_amp_multicast():
if default_context().device_type == 'cpu':
return
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res._bind(ctx, {'x': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx),
'y': mx.nd.random.uniform(shape=(3, 3), dtype=np.float32, ctx=ctx),
'z': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
out1, out2, out3 = exe.outputs
assert out1.asnumpy().dtype == np.float32
assert out2.asnumpy().dtype == np.float32
assert out3.asnumpy().dtype == np.float32
def check_amp_multicast(input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res._bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float16, ctx=ctx),
'y': mx.nd.array(input_np, dtype=np.float32, ctx=ctx),
'z': mx.nd.array(input_np, dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
input_np = np.array(list(get_cast_op_data()), dtype=np.float16)
expected_output = input_np.astype(np.float32)
check_amp_multicast(input_np, expected_output)
def test_all_finite():
data = mx.sym.Variable("data", dtype=np.float32)
data2 = mx.sym.Variable("data2", dtype=np.float32)
finite_arr = mx.nd.array([[0, 0]])
inf_arr = mx.nd.array([[np.inf, np.inf]])
z = mx.sym.all_finite(data)
ctx = default_context()
exe = z._bind(ctx, {'data': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
exe = z._bind(ctx, {'data': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z._bind(ctx, {'data': finite_arr, 'data2': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z._bind(ctx, {'data': finite_arr, 'data2': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
def test_repeat():
def test_repeat_forward():
ndim_max = 6 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
repeats = 3
for ndim in range(1, ndim_max+1):
shape = ()
for _ in range(0, ndim):
shape += (np.random.randint(1, size_max+1), )
a = np.random.random_sample(size=shape)
aa = np.repeat(a, repeats)
b = mx.nd.array(a, ctx=default_context())
bb = mx.nd.repeat(b, repeats)
assert_almost_equal(aa, bb)
for axis in range(0, ndim):
aa = np.repeat(a, repeats, axis)
bb = mx.nd.repeat(b, repeats, axis)
assert_almost_equal(aa, bb)
def test_repeat_backward(axis):
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=axis)
exe = test._bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * repeats)
if axis == 0:
npout_grad = npout_grad.reshape(n1 * repeats, n2)
elif axis == 1:
npout_grad = npout_grad.reshape(n1, n2 * repeats)
else:
raise RuntimeError("Invalid axis value")
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
if axis == 0:
for i in range(shape[0]):
for j in range(shape[1]):
k = i * repeats
expected_grad[i][j] = sum(npout_grad[k:k + repeats, j])
elif axis == 1:
for j in range(shape[1]):
for i in range(shape[0]):
k = j * repeats
expected_grad[i][j] = sum(npout_grad[i, k:k + repeats])
else:
raise RuntimeError("Invalid axis value")
assert_almost_equal(expected_grad, arr_grad, rtol=1e-3)
def test_repeat_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=0)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-3, rtol=1e-2)
test_repeat_forward()
test_repeat_backward(axis=0)
test_repeat_backward(axis=1)
test_repeat_numeric_gradient()
def test_reverse():
data = mx.symbol.Variable('data')
shape = (5, 5, 5)
data_tmp = np.random.uniform(-1, 1, shape)
test = mx.sym.reverse(data, axis=[1, 2])
grad = np.random.uniform(-1, 1, shape)
check_numeric_gradient(test, [data_tmp], numeric_eps=2E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp[:, ::-1, ::-1]])
check_symbolic_backward(test, [data_tmp], [grad], [grad[:, ::-1, ::-1]])
def test_tile():
def test_normal_case():
ndim_min = 1
ndim_max = 5 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
length_max = 3 # max length of reps
rep_max = 10 # max number of tiling in each dim
for ndim in range(ndim_min, ndim_max+1):
shape = []
for _ in range(1, ndim+1):
shape.append(np.random.randint(1, size_max+1))
shape = tuple(shape)
a = np.random.randint(0, 100, shape)
b = mx.nd.array(a, dtype=a.dtype)
reps_len = np.random.randint(1, length_max+1)
reps_tuple = ()
for _ in range(1, reps_len):
reps_tuple += (np.random.randint(1, rep_max), )
reps_array = np.asarray(reps_tuple)
a_tiled = np.tile(a, reps_array)
b_tiled = mx.nd.tile(b, reps_tuple).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_tensor():
shape = (2, 3, 0, 4)
with mx.np_shape():
a = np.array([], dtype=np.int32).reshape(shape)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
reps = (2, 4, 6)
a_tiled = np.tile(a, reps)
b_tiled = mx.nd.tile(b, reps).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_reps():
a = np.array([[2, 3, 4], [5, 6, 7]], dtype=np.int32)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
a_tiled = np.tile(a, ())
b_tiled = mx.nd.tile(b, ()).asnumpy()
assert same(a_tiled, b_tiled)
def test_tile_backward():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
exe = test._bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * reps1 * reps2).reshape(n1 * reps1, n2 * reps2)
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
expected_grad[i][j] += sum(sum(npout_grad[i:(n1 * reps1):reps1, j:(n2 * reps2):reps2]))
assert_almost_equal(expected_grad, arr_grad, rtol=1e-3)
def test_tile_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-2, rtol=1e-2)
def test_invalid_reps():
data = mx.nd.arange(16).reshape((4, 4))
assert_exception(mx.nd.tile, MXNetError, data, (1, 2, -3))
assert_exception(mx.nd.tile, MXNetError, data, (1, 0, 3))
test_normal_case()
with mx.np_shape():
test_empty_tensor()
test_empty_reps()
test_tile_backward()
test_tile_numeric_gradient()
test_invalid_reps()
def test_one_hot():
def test_normal_case(index_type=np.int32):
ndim_max = 6
dim_size_max = 20
depth = int(dim_size_max / 2)
on_value = 1
off_value = 0
for ndim in range(1, ndim_max+1):
shape = ()
for _ in range(1, ndim+1):
shape += (np.random.randint(1, dim_size_max+1), )
indices = np.random.randint(-dim_size_max, dim_size_max+1,
size=np.prod(shape)).reshape(shape)
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=index_type),
depth=depth, dtype=np.int32)
expected_array = np.zeros((np.prod(shape), depth), dtype=np.int32)
expected_array[:] = off_value
indices_1d = indices.flatten()
row = 0
for idx in indices_1d:
if 0 <= idx < depth:
expected_array[row, idx] = on_value
row += 1
expected_array = expected_array.reshape(shape + (depth, ))
one_hot_array = mx_one_hot_array.asnumpy()
assert same(expected_array, one_hot_array)
def test_empty_indices():
shape = (2, 0, 9, 3)
with mx.np_shape():
indices = np.array([]).reshape(shape)
depth = 10
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32
).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth,))
assert same(expected_array, mx_one_hot_array)
def test_zero_depth():
shape = (2, 4, 9, 3)
indices = np.ones(shape)
depth = 0
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, ))
assert same(expected_array, mx_one_hot_array)
test_normal_case(index_type=np.int32)
test_normal_case(index_type=np.float64)
test_normal_case(index_type=np.float32)
test_normal_case(index_type=np.float16)
with mx.np_shape():
test_empty_indices()
test_zero_depth()
def test_where():
def get_forward_expected_output(condition, x, y):
original_shape = x.shape
out = np.zeros(original_shape)
if condition.shape == x.shape:
for index, c in np.ndenumerate(condition):
if c != 0:
out[index] = x[index]
else:
out[index] = y[index]
elif condition.shape == (x.shape[0], ):
s = x.shape
m = s[0]
n = int(np.prod(s)/s[0])
x2d = x.reshape((m, n))
y2d = y.reshape((m, n))
out = out.reshape((m, n))
for i in range(0, m):
if condition[i] != 0:
for j in range(0, n):
out[i, j] = x2d[i, j]
else:
for j in range(0, n):
out[i, j] = y2d[i, j]
else:
raise RuntimeError("Invalid condition shape for where op")
out = out.reshape(original_shape)
return out
def get_forward_inputs_same_shape(shape):
condition_np = np.random.randint(0, 2, np.prod(shape)).reshape(shape)
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_forward_inputs_condition_vector(shape):
condition_np = np.random.randint(0, 2, shape[0])
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_backward_input(shape):
return np.random.randint(20, 30, np.prod(shape)).reshape(shape)
def get_backward_expected_outputs(grad_in, condition):
shape = grad_in.shape
grad_cond = np.zeros(condition.shape)
grad_x = np.empty(shape)
grad_y = np.empty(shape)
for index, c in np.ndenumerate(condition):
if 0 != c:
grad_x[index] = grad_in[index]
grad_y[index] = 0
else:
grad_x[index] = 0
grad_y[index] = grad_in[index]
return grad_cond, grad_x, grad_y
def test_where_helper(shape, same_shape):
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
out_expected = get_forward_expected_output(condition_np, x_np, y_np)
grad_in_np = get_backward_input(shape)
grad_expected_cond, grad_expected_x, grad_expected_y\
= get_backward_expected_outputs(grad_in_np, condition_np)
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
grad_in_mx = mx.nd.array(grad_in_np, dtype=np.int)
where_sym = mx.sym.where(condition, x, y)
# test req='write'
where_exe_write = where_sym._simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='write')
# test forward req='write'
outputs = where_exe_write.forward(is_train=True, condition=condition_np,
x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='write'
where_exe_write.backward(grad_in_mx.astype('float32'))
assert same(where_exe_write.grad_dict['x'].asnumpy(), grad_expected_x)
assert same(where_exe_write.grad_dict['y'].asnumpy(), grad_expected_y)
assert same(where_exe_write.grad_dict['condition'].asnumpy(), grad_expected_cond)
# test req='add'
x_grad_init = np.random.randint(30, 40, np.prod(shape)).reshape(shape)
y_grad_init = np.random.randint(40, 50, np.prod(shape)).reshape(shape)
where_exe_add = where_sym._simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='add')
where_exe_add.grad_dict['x'][:] = x_grad_init
where_exe_add.grad_dict['y'][:] = y_grad_init
# test forward req='add'
outputs = where_exe_add.forward(is_train=True, condition=condition_np, x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='add'
where_exe_add.backward(grad_in_mx.astype('float32'))
x_ograd = where_exe_add.grad_dict['x'].asnumpy()
y_ograd = where_exe_add.grad_dict['y'].asnumpy()
assert same(x_ograd, grad_expected_x+x_grad_init)
assert same(y_ograd, grad_expected_y+y_grad_init)
def test_where_numeric_gradient(shape, same_shape):
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
check_numeric_gradient(where_sym, [condition_np, x_np, y_np], grad_nodes=['x', 'y'])
def test_invalid_shape():
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
assert_exception(lambda: where_sym.eval(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
assert_exception(lambda: mx.nd.where(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
def test_1d_cond():
cond = mx.nd.array([1, 0, 1])
x = mx.nd.array([[2, 3], [4, 5], [6, 7]])
y = mx.nd.array([[7, 8], [9, 10], [10, 11]])
expect_out = np.array([[2, 3], [9, 10], [6, 7]])
out = mx.nd.where(cond, x, y).asnumpy()
assert(expect_out.all() == out.all())
test_where_helper((5, 9), True)
test_where_helper((5, 9), False)
test_where_helper((5, 7, 9), True)
test_where_helper((5, 7, 9), False)
test_where_helper((10, 8, 15, 3), True)
test_where_helper((10, 8, 15, 3), False)
test_where_numeric_gradient((5, 9), True)
test_where_numeric_gradient((5, 9), False)
test_where_numeric_gradient((5, 7, 9), True)
test_where_numeric_gradient((5, 7, 9), False)
test_invalid_shape()
test_1d_cond()
def test_softmin():
for ndim in range(1, 5):
for dtype in [np.float16, np.float32, np.float64]:
rtol, atol = (1e-2, 5e-3) if dtype is np.float16 else (1e-3, 1e-3)
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape).astype(dtype)
data = data / 10 if dtype is np.float16 else data
sym = mx.sym.softmin(axis=axis)
expected_fwd = np_softmax(-data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], atol=atol, dtype=dtype)
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=rtol, atol=atol, grad_req=req, dtype=dtype)
if dtype is not np.float16:
check_numeric_gradient(sym, [data], rtol=rtol, atol=atol, dtype=dtype)
def test_new_softmax():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.softmax(axis=axis)
expected_fwd = np_softmax(data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd])
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=1e-2, atol=1e-3, grad_req=req)
check_numeric_gradient(sym, [data], rtol=1e-2, atol=1e-3)
def test_softmax_with_temperature():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
data = np.random.uniform(-2, 2, size=shape)
for temp in range(1, 11):
sym = mx.sym.softmax(axis=0, temperature=temp)
expected_fwd = np_softmax(data, axis=0, temperature=temp)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], rtol=0.05, atol=1e-3)
check_symbolic_backward(sym, [data], [np.ones(shape)], [expected_bwd], rtol=0.05, atol=1e-3)
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
def test_log_softmax():
for ndim in range(1, 5):
for _ in range(5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(0, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.log_softmax(axis=axis-ndim)
check_symbolic_forward(sym, [data], [np.log(np_softmax(data, axis=axis)+1e-20)], rtol=1e-3, atol=1e-4)
check_numeric_gradient(sym, [data], rtol=1e-1, atol=1e-2)
def test_softmax_with_large_inputs():
def softmax_forward(input_data, true_output):
data = mx.sym.Variable('data')
out1 = data.softmax(axis=1)
exec1 = out1._bind(default_context(), args={'data': input_data})
exec1.forward()[0].wait_to_read()
ndarr = exec1.outputs[0][0][0][0]
assert_almost_equal(ndarr, true_output, rtol=1e-5, atol=1e-5)
softmax_forward(mx.nd.array([[[[-1e30,-1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[1e30,1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[-3.4e38,-3.4e38]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[3.4e38,3.4e38]]]]), np.array([1.0,1.0]))
@with_environment('MXNET_SAFE_ACCUMULATION', '1')
def test_softmax_dtype():
def check_dtypes_almost_equal(op_name,
atol, rtol,
grad_atol, grad_rtol,
idtype, ref_dtype, odtype=None):
op = getattr(mx.nd, op_name)
input_data = mx.random.uniform(shape=(100, 500))
dtype_input = input_data.astype(idtype)
ref_input = input_data.astype(ref_dtype)
dtype_input.attach_grad()
ref_input.attach_grad()
with mx.autograd.record():
dtype_softmax = op(dtype_input, axis=-1, dtype=odtype)
ref_softmax = op(ref_input, axis=-1, dtype=odtype)
assert_almost_equal(dtype_softmax, ref_softmax, rtol=rtol, atol=atol)
dtype_softmax.backward()
ref_softmax.backward()
assert_almost_equal(dtype_input.grad, ref_input.grad, rtol=grad_rtol, atol=grad_atol)
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64', 'float64')
def test_softmax_with_length():
def np_softmax_with_length(data, length):
res = np.zeros(data.shape)
for i in range(length.shape[0]):
for j in range(length.shape[1]):
leng = int(length[i, j])
res[i, 0:leng, j] = np_softmax(data[i, 0:leng, j])
return res
ndim = 3
shape = rand_shape_nd(ndim, dim=10)
len_shape = list(shape)
del len_shape[1]
len_shape = tuple(len_shape)
for dtype in [np.float16, np.float32, np.float64]:
mx_data = rand_ndarray(shape, dtype=dtype)
np_data = mx_data.asnumpy()
np_length = np.random.randint(1, shape[1] + 1, len_shape)
mx_length = mx.nd.array(np_length, dtype=np.int32)
np_out = np_softmax_with_length(np_data, np_length)
data = mx.sym.Variable("data")
length = mx.sym.Variable("length")
mx_sym = mx.sym.softmax(data=data, length=length, use_length=True, axis=1)
location = {"data": mx_data, "length": mx_length}
rtol = 1e-2 if dtype == np.float16 else 1e-3
atol = 1e-4 if dtype == np.float16 else 1e-5
check_symbolic_forward(mx_sym, location, [np_out], rtol=rtol, atol=atol, dtype="asnumpy")
check_symbolic_backward(mx_sym, location, [np.ones(shape, dtype=dtype)],
[np.zeros(shape), np.zeros(len_shape, dtype=np.int32)],
rtol=1e-2, atol=2e-3 if dtype == np.float16 else 1e-3, dtype="asnumpy")
def np_softmax(x, axis=-1, temperature=1.0, normalize=True):
if normalize:
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x / temperature)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def np_masked_softmax(data, mask, axis=-1, temperature=1.0, normalize=True):
neg = -1e18
if data.dtype == np.float16:
neg = -1e4
temp = np.where(mask, data, neg)
result = np_softmax(temp, axis=axis,
temperature=temperature,
normalize=normalize) * mask
return result
def np_masked_softmax_grad(out, grad_out, axis=-1, temperature=1.0):
temp = np.sum(out * grad_out, axis=axis, keepdims=True)
result = out * (grad_out - temp) / temperature
return result
def np_masked_log_softmax_grad(out, grad_out, mask, axis=-1, temperature=1.0):
grad_out = np.where(mask, grad_out, 0)
temp = np.sum(grad_out, axis=axis, keepdims=True)
result = (grad_out - np.exp(out) * temp) / temperature
result = np.where(mask, result, 0)
return result
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
@pytest.mark.parametrize('axis', [0, -1, -2, -3])
@pytest.mark.parametrize('ndims', [3, 4, 5])
@pytest.mark.parametrize('n_broadcast_axis', [0, 1, 2])
@pytest.mark.parametrize('temperature', [1, 5, 9 ,11])
@pytest.mark.parametrize('normalize', [True])
@pytest.mark.flaky
def test_masked_softmax(dtype, axis, ndims, n_broadcast_axis, temperature, normalize):
n_broadcast_axis = min(n_broadcast_axis, ndims - 1)
shape = rand_shape_nd(ndims, dim=10)
mx_data = rand_ndarray(shape, dtype=dtype)
bcst_dims = []
while len(bcst_dims) < n_broadcast_axis:
ax = np.random.randint(0, ndims)
if ax not in bcst_dims :
bcst_dims.append(ax)
shape_mask = list(shape)
for i in bcst_dims:
shape_mask[i] = 1
np_data = mx_data.asnumpy()
np_mask = np.random.randint(0, 2, shape_mask)
mx_mask = mx.nd.array(np_mask, dtype=np.bool)
mx_grad = rand_ndarray(shape, dtype=dtype)
np_grad = mx_grad.asnumpy()
np_out = np_masked_softmax(np_data, np_mask, axis,
temperature, normalize)
np_grad_out = np_masked_softmax_grad(np_out, np_grad,
axis, temperature)
data = mx.sym.Variable("data")
mask = mx.sym.Variable("mask")
mx_sym = mx.sym.masked_softmax(data=data, mask=mask,
temperature=temperature, axis=axis,
normalize=normalize)
location = {"data": mx_data, "mask": mx_mask}
rtol = 1e-2 if dtype == np.float16 else 1e-3
atol = 1e-4 if dtype == np.float16 else 1e-5
check_symbolic_forward(mx_sym, location, [np_out], rtol=rtol, atol=atol,
dtype="asnumpy", equal_nan=True)
check_symbolic_backward(mx_sym, location, [mx_grad],
[np_grad_out, np.zeros(shape, dtype=np.bool)],
rtol=1e-2, atol=2e-3 if dtype == np.float16 else 1e-3,
dtype="asnumpy", equal_nan=True)
@pytest.mark.parametrize('dtype', ['float32'])
@pytest.mark.parametrize('ndims', [1, 2, 3, 4, 5])
def test_masked_log_softmax(dtype, ndims):
shape = np.random.randint(1, 5, size=ndims)
axis = np.random.randint(0, ndims)
mx_data = rand_ndarray(shape, dtype=dtype)
np_data = mx_data.asnumpy()
np_mask = np.random.randint(0, 2, shape)
mx_mask = mx.nd.array(np_mask, dtype=np.bool)
mx_grad = rand_ndarray(shape, dtype=dtype)
np_grad = mx_grad.asnumpy()
np_out = np.log(np_masked_softmax(np_data, np_mask, axis)+1e-20) * np_mask
np_out_inf = np.where(np_mask, np_out, -np.inf)
np_grad_out = np_masked_log_softmax_grad(np_out, np_grad, np_mask, axis)
data = mx.sym.Variable("data")
mask = mx.sym.Variable("mask")
mx_sym = mx.sym.masked_log_softmax(data=data, mask=mask, axis=axis-ndims)
location = {"data": mx_data, "mask": mx_mask}
rtol = 1e-2 if dtype == np.float16 else 1e-3
atol = 1e-4 if dtype == np.float16 else 1e-5
check_symbolic_forward(mx_sym, location, [np_out_inf], rtol=rtol, atol=atol, dtype="asnumpy")
check_symbolic_backward(mx_sym, location, [mx_grad],
[np_grad_out, np.zeros(shape, dtype=np.bool)],
rtol=1e-2, atol=2e-3 if dtype == np.float16 else 1e-3,
dtype="asnumpy", equal_nan=True)
def test_pick():
def test_pick_helper(index_type=np.int32):
for mode in ['clip', 'wrap']:
ndim = np.random.randint(1, 5)
bshape = np.random.randint(1, 10, size=ndim)
axis = np.random.randint(0, ndim)
sshape = bshape.copy()
sshape[axis] = 1
data = np.random.uniform(-1, 1, size=bshape)
if mode == 'wrap':
index = np.random.randint(-2*bshape[axis], 2*bshape[axis], size=sshape)
else:
index = np.random.randint(0, bshape[axis], size=sshape)
exp = []
for i in range(ndim):
if i == axis:
if mode == 'wrap':
exp.append(index % bshape[axis])
else:
exp.append(index)
else:
ishape = [1 for _ in range(ndim)]
ishape[i] = bshape[i]
exp.append(np.arange(bshape[i]).reshape(ishape))
expected = data[exp]
data = mx.nd.array(data, dtype='float32')
index = mx.nd.array(index, dtype=index_type)
out = mx.nd.pick(data, index, axis=axis, keepdims=True, mode=mode)
assert_almost_equal(out.asnumpy(), expected)
data_holder = data
index_holder = index
data = mx.sym.Variable('data')
index = mx.sym.Variable('index')
sym = mx.sym.pick(data, index, axis=axis, keepdims=True, mode=mode)
check_numeric_gradient(sym, [data_holder, index_holder], grad_nodes=['data'])
test_pick_helper(np.int32)
test_pick_helper(np.float32)
def check_ctc_loss(acts, labels, loss_truth, contrib=False):
in_var = mx.sym.Variable('input')
labels_var = mx.sym.Variable('labels')
if contrib:
ctc = mx.sym.contrib.ctc_loss(in_var, labels_var)
else:
ctc = mx.sym.ctc_loss(in_var, labels_var)
acts_nd = mx.nd.array(acts, ctx=default_context())
labels_nd = mx.nd.array(labels, ctx=default_context())
exe = ctc._bind(ctx=default_context(), args=[acts_nd, labels_nd])
# test forward with grad calc
exe.forward(is_train=True)
outTest = exe.outputs[0].copy()
# test forward without grad calc
exe.forward(is_train=False)
outTrain = exe.outputs[0]
# make sure losses calculated with both modes are the same
assert_almost_equal(outTest, outTrain)
# test against ground truth, if available
if loss_truth is not None:
assert_almost_equal(outTest, loss_truth)
# test grad
check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3)
def test_ctc_loss():
# Test 1: check that batches are same + check against Torch WarpCTC
acts = np.array([
[[1.2, 3.4, 1.2, -0.1, -2.34], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[0.1, 0.2, 0.3, 0.22, 0.123], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14, -13, -12, -11]]],
dtype=np.float32)
labels = np.array([[2, 3, 0], [2, 3, 0]])
true_loss = np.array([4.04789, 4.04789], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts, labels, true_loss, contrib=contrib)
# Test 2:
acts2 = np.array([
[[-5, -4, -3, -2, -1], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[-10, -9, -8, -7, -6], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14.2, -13.5, -12.2, -11.22]]], dtype=np.float32)
labels2 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.float32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts2, labels2, true_loss, contrib=contrib)
# Test 3: check use integer type as label
labels3 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.int32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts2, labels3, true_loss, contrib=contrib)
def test_ctc_loss_with_large_classes():
ctx = default_context()
num_classes = 6000
seq_len = 8
batch_size = 2
data = np.empty((num_classes, 0))
for i in range(seq_len * batch_size) :
row = np.roll(np.arange(num_classes, dtype=np.float32), i).reshape(num_classes, 1)
data = np.append(data, row/13, axis=1)
data = data.reshape(seq_len, batch_size, num_classes)
label = np.array([
[100, 200, 300, 400, 500, 0, 0, 0],
[1000, 2000, 3000, 4000, 0, 5000, 0, 0]], dtype=np.int32)
nd_data = mx.nd.array(data)
nd_label = mx.nd.array(label)
loss = mx.nd.ctc_loss(data=nd_data, label=nd_label)
expected_loss = np.array([688.02826, 145.34462])
assert_almost_equal(loss, expected_loss)
def test_ctc_loss_grad():
def check_ctc_loss_grad(blank_label, contrib=False): # from tf
vocab_size = 5
max_label_len = 5
padding_mask = -1+ (blank_label=='first')
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
inputs = [
np.vstack(
[input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)]
inputs = np.log(np.asarray(inputs, dtype=np.float32))
grad_truth = np.array([
np.vstack(
[gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, vocab_size+1), np.float32)])
if blank_label == 'first':
inputs = np.roll(inputs, 1, axis=2)
grad_truth = np.roll(grad_truth, 1, axis=2)
labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x))
for x in [targets_0, targets_1]])+(blank_label == 'first'))
seq_lens = np.array([5, 5], dtype=np.int32)
label_lens = np.array([5, 4], dtype=np.int32)
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
with default_context():
data = mx.nd.array(inputs)
label = mx.nd.array(labels)
data.attach_grad()
with mx.autograd.record():
if contrib:
l = mx.contrib.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
else:
l = mx.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
l.backward()
assert_almost_equal(l, loss_truth, atol=1e-5, rtol=1e-5)
assert_almost_equal(data.grad, grad_truth, atol=1e-5, rtol=1e-5)
for contrib in [False, True]:
for label in ['first', 'last']:
check_ctc_loss_grad(label, contrib=contrib)
def test_quantization_op():
min0 = mx.nd.array([0.0])
max0 = mx.nd.array([1.0])
a = mx.nd.array([[0.1392, 0.5928], [0.6027, 0.8579]])
qa, min1, max1 = mx.nd.contrib.quantize(a, min0, max0, out_type='int8')
a_ = mx.nd.contrib.dequantize(qa, min1, max1, out_type='float32')
qa_real = mx.nd.array([[18, 75], [77, 109]])
a_real = mx.nd.array([[0.14173228, 0.5905512], [0.6062992, 0.8582677]])
print(a_.asnumpy())
print(a_real.asnumpy())
assert same(qa.asnumpy(), qa_real.asnumpy())
assert_almost_equal(a_.asnumpy(), a_real.asnumpy(), rtol=1e-2)
def test_index_copy():
x = mx.nd.zeros((5,3))
t = mx.nd.array([[1,2,3],[4,5,6],[7,8,9]])
index = mx.nd.array([0,4,2], dtype=np.int64)
tensor = mx.nd.array([[1,2,3],[0,0,0],[7,8,9],[0,0,0],[4,5,6]])
x_grad = mx.nd.array([[0,0,0],[1,1,1],[0,0,0],[1,1,1],[0,0,0]])
t_grad = mx.nd.array([[1,1,1],[1,1,1],[1,1,1]])
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
x.attach_grad()
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(x.grad.asnumpy(), x_grad.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
def test_boolean_mask():
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 1, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.array([[4, 5, 6]])
expected_grad = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
# test 0-size output
prev_np_shape = mx.set_np_shape(True)
try:
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 0, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.zeros((0, 3))
expected_grad = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
finally:
mx.set_np_shape(prev_np_shape)
# test gradient
shape = (100, 30)
a = mx.nd.random.randint(0, 100, shape=shape)
a.attach_grad()
bi = mx.nd.random.randint(0, 100, shape=shape[0:1]) > 50
ci = mx.nd.random.randint(0, 100, shape=shape[0:1]) < 50
mx_grad = mx.nd.zeros_like(a)
mx.autograd.mark_variables([a], [mx_grad], grad_reqs='add')
T = 3
for _ in range(T):
with mx.autograd.record():
b = mx.nd.contrib.boolean_mask(a, bi)
c = mx.nd.contrib.boolean_mask(a, ci)
su = b.sum() + c.sum()
su.backward()
grad = (bi + ci).asnumpy().reshape((-1,) + (1,) * (len(shape)-1))
grad = np.tile(grad, (1,) + shape[1:])
# T times
grad *= T
assert_allclose(a.grad.asnumpy(), grad)
a_np = a.asnumpy()
assert same(b.asnumpy(), a_np[bi.asnumpy().astype('bool')])
assert same(c.asnumpy(), a_np[ci.asnumpy().astype('bool')])
def test_div_sqrt_dim():
data_tmp = np.random.normal(0, 1, (5, 10, 8))
data = mx.symbol.Variable('data')
test = mx.sym.contrib.div_sqrt_dim(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=1E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp / np.sqrt(data_tmp.shape[-1])])
# helper function to identify inputs likely to fail check_numeric_gradient tol test
# due to finite difference method inaccuracies or function discontuities at the origin
def bad_input_finder(f, f_grad, dtype):
eps = default_numeric_eps()[np.dtype(dtype)]
rtol = default_rtols()[np.dtype(dtype)]
def expected_relative_error(x):
fd_gradient = (f(x+eps/2) - f(x-eps/2)) / eps
return abs(fd_gradient/f_grad(x) - 1)
def is_fd_problem_input(x):
return abs(x) < eps/2 or expected_relative_error(x) > rtol
return np.vectorize(is_fd_problem_input)
def test_reciprocal_op():
data_tmp = np.random.rand(3, 4).astype(np.float32) * 10 - 5
# Avoid possible division by 0 errors and finite difference method
# inaccuracies by replacing problem inputs with 1.0.
is_bad_input = bad_input_finder(np.reciprocal,
lambda x: -np.reciprocal(x)**2, np.float32)
data_tmp[is_bad_input(data_tmp)] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.reciprocal(data)
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [np.reciprocal(data_tmp)])
def test_cbrt_op():
data_tmp = np.random.rand(3, 4).astype(np.float32) * 10 - 5
# Avoid possible division by 0 errors and finite difference method
# inaccuracies by replacing problem inputs with 1.0.
is_bad_input = bad_input_finder(np.cbrt,
lambda x: 1./(3 * np.cbrt(x)**2), np.float32)
data_tmp[is_bad_input(data_tmp)] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.cbrt(data)
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [np.cbrt(data_tmp)])
def test_rcbrt_op():
data_tmp = np.random.rand(3, 4).astype(np.float32) * 10 - 5
# Avoid possible division by 0 errors and finite difference method
# inaccuracies by replacing problem inputs with 1.0.
is_bad_input = bad_input_finder(lambda x: 1./np.cbrt(x),
lambda x: -1./(3 * np.cbrt(x)**4), np.float32)
data_tmp[is_bad_input(data_tmp)] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.rcbrt(data)
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [1/np.cbrt(data_tmp)])
def test_custom_op():
class Sqr(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
if in_data[0].stype == 'default':
aux[0][:] = 1
self.assign(out_data[0], req[0], in_data[0]*in_data[0])
else:
inp = in_data[0]
csr_m = inp.data * inp.data
out = mx.nd.sparse.csr_matrix((csr_m, inp.indices, inp.indptr), shape=inp.shape)
self.assign(out_data[0], req[0], out)
if (in_data[0].stype == 'csr'):
assert(isinstance(out_data[0], mx.nd.sparse.CSRNDArray))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 2 * mx.nd.sparse.elemwise_mul(in_data[0], out_grad[0]))
if in_data[0].stype == 'default':
assert (aux[0].asnumpy() == 1).all()
@mx.operator.register("sqr")
class SqrProp(mx.operator.CustomOpProp):
def __init__(self):
super(SqrProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['output']
def list_auxiliary_states(self):
return ['aux']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], [in_shape[0]]
def infer_type(self, in_type):
return in_type, [in_type[0]], [in_type[0]]
def infer_storage_type(self, in_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default']
return ['csr'], ['csr'], ['csr']
def infer_storage_type_backward(self, ograd_stype, in_stype,
out_stype, igrad_stype, aux_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default'], ['default'], ['default']
return ['default'], ['csr'], ['csr'], ['csr'], ['csr']
def create_operator(self, ctx, shapes, dtypes):
return Sqr()
data = mx.symbol.Variable('data')
aux = mx.symbol.Variable('aux')
op = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.cast(data, dtype='float64')
op = mx.symbol.cast(op, dtype='float32')
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.Variable('data', stype='csr')
aux = mx.symbol.Variable('aux')
op2 = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = x.tostype('csr')
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op2, [x], [aux], grad_stype_dict={"data": "csr"})
x2 = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
x2 = x2.tostype('csr')
aux2 = mx.nd.zeros_like(x2)
x2.attach_grad()
with mx.autograd.record():
output = mx.nd.Custom(x2, aux2, name='sqr', op_type='sqr')
output.backward()
expected_output = mx.nd.sparse.square(x2)
expected_grad = 2 * x2
rtol = 1e-4
atol = 1e-6
assert_almost_equal(output, expected_output, rtol=rtol, atol=atol)
assert_almost_equal(x2.grad, expected_grad, rtol=rtol, atol=atol)
# test for backward compatibility, i.e. the correctness of default implementation of
# infer storage in custom operator
class Mult(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult")
class MultProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return Mult()
lhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
rhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
lhs.attach_grad()
rhs.attach_grad()
with mx.autograd.record():
y = mx.nd.Custom(lhs, rhs, name='mult', op_type='mult')
y.backward()
assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol)
assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol)
class MultNoGrad(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult_no_grad")
class MultNoGradProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultNoGradProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return MultNoGrad()
def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype):
return ograd_stype, in_stype, out_stype, igrad_stype, aux_stype
with mx.autograd.record():
y2 = mx.nd.Custom(lhs, rhs, name="mult_no_grad", op_type="mult_no_grad")
y2.backward()
assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol)
assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol)
class NoInputOp(mx.operator.CustomOp):
def __init__(self, length, depth):
super(NoInputOp, self).__init__()
self.output = np.ones(shape=(length, depth), dtype=np.float32)
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], self.output)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register("no_input_op")
class NoInputOpProp(mx.operator.CustomOpProp):
def __init__(self, length, depth):
super(NoInputOpProp, self).__init__()
self.length = int(length)
self.depth = int(depth)
def list_arguments(self):
return []
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return [], [(self.length, self.depth)], []
def infer_type(self, in_type):
return [], [np.float32], []
def create_operator(self, ctx, shapes, dtypes):
return NoInputOp(length=self.length, depth=self.depth)
with mx.autograd.record():
x = mx.nd.Custom(length=10, depth=10, op_type="no_input_op")
assert_almost_equal(x, np.ones(shape=(10, 10), dtype=np.float32))
@pytest.mark.skip(reason="Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/17467")
def test_custom_op_fork():
# test custom operator fork
# see https://github.com/apache/incubator-mxnet/issues/14396
class AdditionOP(mx.operator.CustomOp):
def __init__(self):
super(AdditionOP, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
out_data[0][:] = in_data[0] + in_data[1]
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
in_grad[0][:] = out_grad[0]
in_grad[1][:] = out_grad[0]
@mx.operator.register("AdditionOP")
class AdditionOPProp(mx.operator.CustomOpProp):
def __init__(self):
super(AdditionOPProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]]
def create_operator(self, ctx, shapes, dtypes):
return AdditionOP()
if not sys.platform.startswith('win'): # no fork in windows
def custom_add():
a = mx.nd.array([1, 2, 3])
b = mx.nd.array([4, 5, 6])
c = mx.nd.Custom(a, b, op_type='AdditionOP')
assert_almost_equal((a + b).asnumpy(), c.asnumpy())
custom_add()
from multiprocessing import Process
p = Process(target=custom_add)
p.daemon = True
p.start()
p.join(5)
assert not p.is_alive() and p.exitcode == 0
def _build_dot_custom(fun_forward, name):
class Dot(mx.operator.CustomOp):
def __init__(self):
super(Dot, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
fun_forward(in_data, out_data)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register(name)
class DotProp(mx.operator.CustomOpProp):
def __init__(self):
super(DotProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [(in_shape[0][0], in_shape[1][1])]
def create_operator(self, ctx, shapes, dtypes):
return Dot()
def test_custom_op_exc():
# test except handling
# see https://github.com/apache/incubator-mxnet/pull/14693
# 1. error in python code
def custom_exc1():
def f(in_data, out_data):
assert False
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot1')
a = mx.nd.zeros((4, 1))
b = mx.nd.zeros((1, 4))
c = mx.nd.Custom(a, b, op_type='Dot1')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc1)
# 2. error in pushing operator to engine
def custom_exc2():
def f(in_data, out_data):
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot2')
a = mx.nd.zeros((4, 2))
b = mx.nd.zeros((1, 4))
# trigger error by invalid input shapes of operands
c = mx.nd.Custom(a, b, op_type='Dot2')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc2)
# 3. error in real execution
if default_context().device_type == 'cpu':
def custom_exc3():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
out_data[0].wait_to_read()
_build_dot_custom(f, 'Dot3')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot3')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc3)
def custom_exc4():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
_build_dot_custom(f, 'Dot4')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot4')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc4)
def test_psroipooling():
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([168, 224], [168, 224]):
for grad_nodes in [['im_data']]:
spatial_scale = 0.0625
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1))
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1))
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
op = mx.sym.contrib.PSROIPooling(data=im_data_var, rois=rois_data_var, spatial_scale=spatial_scale,
group_size=num_group, pooled_size=num_group,
output_dim=num_classes, name='test_op')
rtol, atol = 1e-2, 1e-3
check_numeric_gradient(op, [im_data, rois_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes)
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@pytest.mark.parametrize('num_batch', [1, 2])
@pytest.mark.parametrize('num_channel_data_deformable_group', itertools.product([4, 8], [1, 2]))
@pytest.mark.parametrize('input_height_width', itertools.product([5, 6], [5, 6]))
@pytest.mark.parametrize('dilate', [(1, 1), (2, 2)])
@pytest.mark.parametrize('grad_nodes', [['im_data'], ['offset_data'], ['weight']])
def test_deformable_convolution(num_batch, num_channel_data_deformable_group, input_height_width,
dilate, grad_nodes):
num_channel_data, num_deformable_group = num_channel_data_deformable_group
input_height, input_width = input_height_width
output_height = input_height
output_width = input_width
im_data = np.random.rand(num_batch, num_channel_data, input_height, input_width)
offset_data = \
np.random.rand(num_batch, num_deformable_group * 3 * 3 * 2, output_height, output_width)\
* 0.8 + 0.1
weight = np.random.normal(0, 0.001, (num_channel_data, num_channel_data, 3, 3))
bias = np.zeros(num_channel_data)
im_data_var = mx.symbol.Variable(name="im_data").as_np_ndarray()
offset_data_var = mx.symbol.Variable(name="offset_data").as_np_ndarray()
weight_var = mx.symbol.Variable(name="weight").as_np_ndarray()
bias_var = mx.symbol.Variable(name="bias").as_np_ndarray()
op = mx.sym.npx.deformable_convolution(name='test_op', data=im_data_var,
offset=offset_data_var,
weight=weight_var, bias=bias_var,
num_filter=num_channel_data, pad=dilate,
kernel=(3, 3), stride=(1, 1), dilate=dilate,
num_deformable_group=num_deformable_group)
if grad_nodes[0] == 'offset_data':
# wider tolerance needed for coordinate differential
rtol, atol = 1.0, 1e-2
else:
rtol, atol = 0.05, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, offset_data, weight, bias], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0), numeric_eps=1.0/64)
def _validate_sample_location(input_rois, input_offset, spatial_scale, pooled_w, pooled_h, sample_per_part, part_size, output_dim, num_classes, trans_std, feat_h, feat_w):
num_rois = input_rois.shape[0]
output_offset = input_offset.copy()
# simulate deformable psroipooling forward function
for roi_idx in range(num_rois):
sub_rois = input_rois[roi_idx, :].astype(np.float32)
img_idx, x0, y0, x1, y1 = int(sub_rois[0]), sub_rois[1], sub_rois[2], sub_rois[3], sub_rois[4]
roi_start_w = round(x0) * spatial_scale - 0.5
roi_start_h = round(y0) * spatial_scale - 0.5
roi_end_w = round(x1 + 1) * spatial_scale - 0.5
roi_end_h = round(y1 + 1) * spatial_scale - 0.5
roi_w, roi_h = roi_end_w - roi_start_w, roi_end_h - roi_start_h
bin_size_w, bin_size_h = roi_w / pooled_w, roi_h / pooled_h
sub_bin_size_w, sub_bin_size_h = bin_size_w / sample_per_part, bin_size_h / sample_per_part
for c_top in range(output_dim):
channel_each_cls = output_dim / num_classes
class_id = int(c_top / channel_each_cls)
for ph in range(pooled_h):
for pw in range(pooled_w):
part_h = int(math.floor(float(ph) / pooled_h * part_size))
part_w = int(math.floor(float(pw) / pooled_w * part_size))
trans_x = input_offset[roi_idx, class_id * 2, part_h, part_w] * trans_std
trans_y = input_offset[roi_idx, class_id * 2 + 1, part_h, part_w] * trans_std
bin_h_start, bin_w_start = ph * bin_size_h + roi_start_h, pw * bin_size_w + roi_start_w
need_check = True
while need_check:
pass_check = True
for ih in range(sample_per_part):
for iw in range(sample_per_part):
h = bin_h_start + trans_y * roi_h + ih * sub_bin_size_h
w = bin_w_start + trans_x * roi_w + iw * sub_bin_size_w
if w < -0.5 or w > feat_w - 0.5 or h < -0.5 or h > feat_h - 0.5:
continue
w = min(max(w, 0.1), feat_w - 1.1)
h = min(max(h, 0.1), feat_h - 1.1)
# if the following condiiton holds, the sampling location is not differentiable
# therefore we need to re-do the sampling process
if h - math.floor(h) < 1e-3 or math.ceil(h) - h < 1e-3 or w - math.floor(w) < 1e-3 or math.ceil(w) - w < 1e-3:
trans_x, trans_y = random.random() * trans_std, random.random() * trans_std
pass_check = False
break
if not pass_check:
break
if pass_check:
output_offset[roi_idx, class_id * 2 + 1, part_h, part_w] = trans_y / trans_std
output_offset[roi_idx, class_id * 2, part_h, part_w] = trans_x / trans_std
need_check = False
return output_offset
@pytest.mark.skip(reason="Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/11713")
def test_deformable_psroipooling():
sample_per_part = 4
trans_std = 0.1
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([160, 224], [160, 224]):
for grad_nodes in [['im_data'], ['offset_data']]:
spatial_scale = 0.0625
stride = int(1 / spatial_scale)
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1 - 2 * stride)) + stride
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1 - 2 * stride)) + stride
offset_data = np.random.rand(num_rois, 2*num_classes, num_group, num_group)
# at certain points, the bilinear interpolation function may be non-differentiable
# to avoid this, we check whether the input locates on the valid points
offset_data = _validate_sample_location(rois_data, offset_data, spatial_scale, num_group, num_group,
sample_per_part, num_group, num_classes, num_classes, trans_std, feat_height, feat_width)
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
op = mx.sym.contrib.DeformablePSROIPooling(data=im_data_var, rois=rois_data_var,
trans=offset_data_var, spatial_scale=spatial_scale,
sample_per_part=4, group_size=num_group,
pooled_size=num_group, output_dim=num_classes,
trans_std=0.1, no_trans=False, name='test_op')
rtol, atol = 1e-2, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, rois_data, offset_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _gemm_test_helper(dtype, grad_check, rtol_fw = None, atol_fw = None,
rtol_bw = None, atol_bw = None, num_eps = None):
def np_random_data(shape, dtype=np.float32):
return np.random.uniform(low=-0.5,
high=0.5, size=shape).astype(dtype)
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
data3 = mx.symbol.Variable('data3')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
shape1 = (2, 3)
shape2 = (3, 2)
shape3 = (3, 3)
shape4 = (2, 2)
data_in1 = np_random_data(shape1, dtype)
data_in2 = np_random_data(shape2, dtype)
data_in3 = np_random_data(shape3, dtype)
data_in4 = np_random_data(shape4, dtype)
# Check all transpositions of gemm operator.
data_in1_t = np.transpose(data_in1)
data_in2_t = np.transpose(data_in2)
res_gemm = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [data_in1, data_in2, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in4])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True, transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in3])
res_gemm = 4. * np.dot(data_in1_t, data_in1) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in3])
res_gemm = 4. * np.dot(data_in1, data_in1_t) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in4])
# Check batch of gemm.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
c = rep_3x(data_in4, 2, 2)
r = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
r = rep_3x(r, 2, 2)
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [a, b, c], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b, c])
# Check for different axis that describes matrix rows.
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
c2 = np.copy(np.swapaxes(c, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = 0)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
c2 = np.copy(np.swapaxes(c, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = -3)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
# Check gemm2 operator same way as gemm.
res_gemm = 4. * np.dot(data_in1, data_in2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in1)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
res_gemm = 4. * np.dot(data_in1, data_in1_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
# Check batch of gemm2.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
r = rep_3x(4. * np.dot(data_in1, data_in2), 2, 2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [a, b], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b])
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = 0)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = -3)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
# Test gemm separately from other la-operators.
def test_gemm():
_gemm_test_helper(np.float64, True)
with environment('MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION', '0'):
_gemm_test_helper(np.float32, True)
if default_context().device_type == 'gpu':
with environment('MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION', '1'):
_gemm_test_helper(np.float32, True)
# Helper functions for test_laop
def _make_symm_symbol(a, ndims):
assert ndims >= 2
tr_shape = list(range(ndims))
tr_shape[-1] = ndims-2
tr_shape[-2] = ndims-1
tr_shape = tuple(tr_shape)
return 0.5 * (a + mx.sym.transpose(a, axes=tr_shape))
def _make_triangle_symm(a, ndims, m, lower, dtype=np.float32):
assert ndims >= 2
# The last two dimensions must both be m
# Create mask for lower triangle and diagonal
index = mx.sym.arange(start=0, stop=m, step=1, dtype=np.int32)
lt_mask = mx.sym.one_hot(index, depth=m, dtype=dtype)
for j in range(1, m):
part1 = mx.sym.zeros(shape=(j, m), dtype=dtype)
index = mx.sym.arange(start=0, stop=m-j, step=1, dtype=np.int32)
part2 = mx.sym.one_hot(index, depth=m, dtype=dtype)
lt_mask = lt_mask + mx.sym.concat(*[part1, part2], dim=0)
if not lower:
lt_mask = mx.sym.reshape(lt_mask, shape=(m, m))
lt_mask = mx.sym.transpose(lt_mask, axes=(1, 0))
shp = tuple([1]*(ndims-2) + [m, m])
lt_mask = mx.sym.reshape(lt_mask, shape=shp)
return mx.sym.broadcast_mul(a, lt_mask)
# @ankkhedia: Getting rid of fixed seed as flakiness could not be reproduced
# tracked at https://github.com/apache/incubator-mxnet/issues/11718
@xfail_when_nonstandard_decimal_separator
def test_laop():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 2e-6
rtol_bw = 1e-5
atol_bw = 1e-5
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
def check_fw_grad(sym, location, expected):
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
if grad_check == 1:
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
matrix = np.array([[9., 3., -6., 12.],
[3., 26., -7., -11.],
[-6., -7., 9., 7.],
[12., -11., 7., 65.]])
trian = np.array([[3., 0., 0., 0.],
[1., 5., 0., 0.],
[-2., -1., 2., 0.],
[4., -3., 6., 2.]])
pow = np.array([[2., 1., 1., 1.],
[1., 4., 1., 1.],
[1., 1., 8., 1.],
[1., 1., 1., 16.]])
inv = np.array([[8.95/3., 0.05/3., 2.65, -2.5/3.],
[0.05/3., 0.05, 0.05, 0.],
[2.65, 0.05, 2.5, -0.75],
[-2.5/3., 0., -0.75, 0.25]])
ident = np.eye(4)
shape = (4, 4, 1, 1)
ones = mx.nd.ones(shape).asnumpy()
for lower in [True, False]:
upper = not lower
# Tests with trivial 1x1 matrices.
data_in = np.random.uniform(1, 10, shape)
# test potrf
# Note: Have to symmetrize input, for gradient test to work
res_potrf = np.sqrt(data_in)
test_potrf = mx.sym.linalg.potrf(data1, lower=lower)
check_fw_grad(test_potrf, [data_in], [res_potrf])
# test potri
res_potri = np.divide(ones, data_in * data_in)
test_potri = mx.sym.linalg.potri(data1, lower=lower)
check_fw_grad(test_potri, [data_in], [res_potri])
# test trsm
trian_in = data_in * 7.
test_trsm = mx.sym.linalg.trsm(data1, data2, alpha=7., lower=lower)
check_fw_grad(test_trsm, [trian_in, data_in], [ones])
# test trmm
trian_in = np.divide(ones, trian_in)
test_trmm = mx.sym.linalg.trmm(data1, data2, alpha=7., transpose=True,
rightside=True, lower=lower)
check_fw_grad(test_trmm, [trian_in, data_in], [ones])
# test sumlogdiag
res_sumlogdiag = np.reshape(np.log(data_in), (4, 4))
test_sumlogdiag = mx.sym.linalg.sumlogdiag(data1)
check_fw_grad(test_sumlogdiag, [data_in], [res_sumlogdiag])
# more elaborate example of Cholesky factorization
low_trian = trian
if upper:
trian = np.transpose(trian)
# test potrf
test_potrf = mx.sym.linalg.potrf(_make_symm_symbol(data1, ndims=4), lower=lower)
a = rep_3x(matrix, 4, 4)
r = rep_3x(trian, 4, 4)
check_fw_grad(test_potrf, [a], [r])
#test potri
data1_ltri = _make_triangle_symm(
data1, ndims=4, m=4, lower=lower, dtype=dtype)
test_potri = mx.sym.linalg.potri(data1_ltri, lower=lower)
a = rep_3x(trian, 4, 4)
r = rep_3x(inv, 4, 4)
check_fw_grad(test_potri, [a], [r])
# test trsm
test_trsm = mx.sym.linalg.trsm(data1_ltri, data2, alpha=7., transpose=upper, lower=lower)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.transpose(low_trian), 4, 4)
check_fw_grad(test_trsm, [a, b], [r])
test_trsm2 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-2., rightside=True, transpose=lower, lower=lower)
r = rep_3x(-2. * low_trian, 4, 4)
check_fw_grad(test_trsm2, [a, b], [r])
test_trsm3 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=0.5, transpose=lower, lower=lower)
b = rep_3x(np.transpose(low_trian), 4, 4)
r = rep_3x(0.5 * ident, 4, 4)
check_fw_grad(test_trsm3, [a, b], [r])
test_trsm4 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-0.5, rightside=True, transpose=upper, lower=lower)
b = rep_3x(low_trian, 4, 4)
r = rep_3x(-0.5 * ident, 4, 4)
check_fw_grad(test_trsm4, [a, b], [r])
# test trmm
test_trmm = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=7., transpose=True, rightside=True, lower=lower)
a = [a, rep_3x(matrix, 4, 4)]
r = rep_3x(7. * np.dot(matrix, trian.T), 4, 4)
check_fw_grad(test_trmm, a, [r])
test_trmm2 = mx.sym.linalg.trmm(data1_ltri, data2, alpha=-2., lower=lower)
r = rep_3x(-2. * np.dot(trian, matrix), 4, 4)
check_fw_grad(test_trmm2, a, [r])
test_trmm3 = mx.sym.linalg.trmm(data1_ltri, data2, rightside=True, lower=lower)
r = rep_3x(np.dot(matrix, trian), 4, 4)
check_fw_grad(test_trmm3, a, [r])
test_trmm4 = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=1.2, transpose=True, lower=lower)
r = rep_3x(1.2 * np.dot(trian.T, matrix), 4, 4)
check_fw_grad(test_trmm4, a, [r])
# test sumlogdiag
r = np.reshape(np.tile(10. * np.log(np.array([2.])), 3), (3,))
check_fw_grad(test_sumlogdiag, [rep_3x(pow, 4, 4)], [r])
# Tests for operators linalg.syrk, linalg.gelqf
def _gelqf_combined_symbol(a):
q, l = mx.sym.linalg.gelqf(a)
q_qt = mx.sym.linalg.syrk(q, transpose=False, alpha=1., name='Q_times_Qt')
l_q = mx.sym.linalg.trmm(l, q, alpha=1., name='L_times_Q')
return mx.sym.Group([q_qt, l_q])
# NOTE: If we leave the unused output dangling, things break if dtype=np.float64. Namely, the
# backward gradient for the unused output is of dtype np.float32 then.
# ==> Very annoying!
def _gelqf_first_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(l), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(q, bogus_scal)
def _gelqf_second_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(q), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(l, bogus_scal)
def _syevd_combined_symbol(a):
u, lam = mx.sym.linalg.syevd(a)
u_ut = mx.sym.linalg.syrk(u, transpose=False, alpha=1., name='U_times_Ut')
lam_u = mx.sym.broadcast_mul(mx.sym.reshape(lam, shape=(-2, 1)), u)
ut_lam_u = mx.sym.linalg.gemm2(u, lam_u, alpha=1., transpose_a=True,
transpose_b=False, name='Ut_L_U')
return mx.sym.Group([u_ut, ut_lam_u])
def test_laop_2():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
# Tests for linalg.syrk
mnalpha_lst = [(2, 3, 1.), (5, 3, -2.), (1, 6, 5.), (3, 3, 0.5), (4, 1, 10.), (1, 1, 1.)]
for m, n, alpha in mnalpha_lst:
#print('syrk: m={}, n={}, alpha={}'.format(m, n, alpha))
data_in1 = np.random.uniform(1, 10, (m, n))
res_syrk1 = alpha * np.dot(data_in1, data_in1.T)
test_syrk1 = mx.sym.linalg.syrk(data1, transpose=False, alpha=alpha)
check_fw(test_syrk1, [data_in1], [res_syrk1])
if grad_check == 1:
check_grad(test_syrk1, [data_in1])
res_syrk2 = alpha * np.dot(data_in1.T, data_in1)
test_syrk2 = mx.sym.linalg.syrk(data1, transpose=True, alpha=alpha)
check_fw(test_syrk2, [data_in1], [res_syrk2])
if grad_check == 1:
check_grad(test_syrk2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
r1_batch = rep_3x(res_syrk1, m, m)
check_fw(test_syrk1, [a_batch], [r1_batch])
if grad_check == 1:
check_grad(test_syrk1, [a_batch])
r2_batch = rep_3x(res_syrk2, n, n)
check_fw(test_syrk2, [a_batch], [r2_batch])
if grad_check == 1:
check_grad(test_syrk2, [a_batch])
# Tests for linalg.gelqf
# Currently disabled on GPU as they need cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
test_gelqf2 = _gelqf_combined_symbol(data1) # Outputs (dot(Q, Q.T), dot(L, Q))
test_gelqf_q = _gelqf_first_output(data1) # Output Q (L is not dangling)
test_gelqf_l = _gelqf_second_output(data1) # Output L (Q is not dangling)
mn_lst = [(4, 4), (1, 1), (5, 20), (1, 10), (15, 50)]
for m, n in mn_lst:
#print('gelqf: m={}, n={}'.format(m, n))
data_in1 = np.random.normal(0., 10., (m, n))
res_eye = np.eye(m)
res_a = data_in1
check_fw(test_gelqf2, [data_in1], [res_eye, res_a])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [data_in1])
# A => L
check_grad(test_gelqf_l, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
reye_batch = rep_3x(res_eye, m, m)
ra_batch = a_batch
check_fw(test_gelqf2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [a_batch])
# A => L
check_grad(test_gelqf_l, [a_batch])
# Tests for operator linalg.syevd
def _syevd_first_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(lam), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(u, bogus_scal)
def _syevd_second_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(u), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(lam, bogus_scal)
def _syevd_forward(a):
lam, ut = np.linalg.eig(a)
ind = np.argsort(lam)
lam = lam[ind]
u = ut[:, ind].T
for i in range(0, a.shape[0]):
_syevd_forw_eigvec_sign(u[i])
return u, lam
def _syevd_forw_eigvec_sign(v):
ind = np.argmax(np.abs(v))
if v[ind] < 0.:
v[:] = -v
def _syevd_backward(grad_u, grad_l, u, l):
n = l.size
assert grad_l.size == n
assert grad_u.shape == (n, n)
assert u.shape == (n, n)
temp = np.dot(grad_u, u.T)
temp2 = np.diag(grad_l)
for i in range(1, n):
for j in range(0, i):
denom = 2. * (l[i] - l[j])
elem = (temp[i, j] - temp[j, i])/denom
temp2[i, j] = elem
temp2[j, i] = elem
temp3 = np.dot(u.T, temp2)
return np.dot(temp3, u)
# Seed set because the test is not robust enough to operate on random data
@pytest.mark.seed(1896893923)
def test_laop_3():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
dtype = np.float64
rtol_fw = 1e-6
atol_fw = 1e-6
num_eps = 1e-4
rtol_bw = 1e-2
atol_bw = 1e-2
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol_fw, atol=atol_fw, dtype=dtype)
# Tests for linalg.syevd
test_syevd2 = _syevd_combined_symbol(data1) # Outputs (U U^T, U^T (diag L) U)
data1_s2 = _make_symm_symbol(data1, ndims=2)
test_syevd_u_2 = _syevd_first_output(data1_s2)
test_syevd_l_2 = _syevd_second_output(data1_s2)
data1_s4 = _make_symm_symbol(data1, ndims=4)
test_syevd_u_4 = _syevd_first_output(data1_s4)
test_syevd_l_4 = _syevd_second_output(data1_s4)
n_lst = [4, 1, 2, 10, 14]
for n in n_lst:
#print('\n** syevd: n={}'.format(n))
data_in1 = np.random.normal(0., 10., (n, n))
data_in1 = 0.5 * (data_in1 + data_in1.T)
res_eye = np.eye(n)
res_a = data_in1
check_fw(test_syevd2, [data_in1], [res_eye, res_a])
# Check backward
grad_u = np.random.normal(0., 2., (n, n))
grad_l = np.random.normal(0., 2., (n,))
bw_u, bw_l = _syevd_forward(data_in1)
grad_a = _syevd_backward(grad_u, grad_l, bw_u, bw_l)
check_bw(mx.sym.linalg.syevd(data1), [data_in1], [grad_u, grad_l], [grad_a])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_2, [data_in1])
# A => L
check_grad(test_syevd_l_2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, n, n)
reye_batch = rep_3x(res_eye, n, n)
ra_batch = a_batch
check_fw(test_syevd2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_4, [a_batch])
# A => L
check_grad(test_syevd_l_4, [a_batch])
# @piyushghai - Removing the fixed seed for this test.
# Issue for flakiness is tracked at - https://github.com/apache/incubator-mxnet/issues/11721
def test_laop_4():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
rtol_fw = 1e-6
atol_fw = 1e-6
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected, dtype :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
a_np = np.array([[1., 2.], [2., 4.]])
u_np = np.array([[0.89442718, -0.44721359], [0.44721359, 0.89442718]])
l_np = np.array([0., 5.])
test_syevd = mx.sym.linalg.syevd(data1)
# float64
#print('float64')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float64)
# float32
#print('float32')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float32)
def test_laop_5():
# tests for diagonal and triangular matrix extraction and generation
data = mx.symbol.Variable('data')
# test complete range of small matrices to cover corner cases
for n in range(1, 5):
# test batched and non-batched processing
for b in range(3):
shape = (n, n) if b == 0 else (b, n, n)
data_in = np.random.uniform(1, 10, shape)
# test all legal offsets of the diagonal
for offs in range(1-n, n):
# test extraction of diagonal
test_diag = mx.sym.linalg.extractdiag(data, offset=offs)
res_diag = np.diagonal(data_in, offset=offs) if b==0 else np.diagonal(data_in, axis1=1, axis2=2, offset=offs)
check_symbolic_forward(test_diag, [data_in], [res_diag])
check_numeric_gradient(test_diag, [data_in])
# test generation of diagonal matrix
test_diag2 = mx.sym.linalg.makediag(data, offset=offs)
res_diag2 = None
if b == 0:
res_diag2 = np.diagflat(res_diag, k=offs)
else:
for i in range(b):
res = np.reshape(np.diagflat(res_diag[i], k=offs), (1, n, n))
res_diag2 = res if res_diag2 is None else np.concatenate((res_diag2, res), axis=0)
check_symbolic_forward(test_diag2, [res_diag], [res_diag2])
check_numeric_gradient(test_diag2, [res_diag])
# check both settings for parameter "lower" in case of zero offset
lower_vals = [True] if offs != 0 else [True, False]
for lower in lower_vals:
# test extraction of triangle by doing a full roundtrip as the intermediate extracted
# triangle has different orderings than numpy.
test_trian = mx.sym.linalg.extracttrian(data, offset=offs, lower=lower)
test_trian = mx.sym.linalg.maketrian(test_trian, offset=offs, lower=lower)
extracts_lower = (offs < 0) or ((offs == 0) and lower)
res_trian = None
if b == 0:
res_trian = np.tril(data_in, offs) if extracts_lower else np.triu(data_in, offs)
else:
for i in range(b):
res = np.tril(data_in[i], offs) if extracts_lower else np.triu(data_in[i], offs)
res = np.reshape(res, (1, n, n))
res_trian = res if res_trian is None else np.concatenate((res_trian, res), axis=0)
check_symbolic_forward(test_trian, [data_in], [res_trian])
check_numeric_gradient(test_trian, [data_in])
# Tests for linalg.inverse
@pytest.mark.skip(reason="Test crashes https://github.com/apache/incubator-mxnet/issues/15975")
def test_laop_6():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-4
atol_bw = 1e-6
data = mx.symbol.Variable('data')
check_fw = lambda sym, location, expected:\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
## det(I + dot(v, v.T)) = 1 + dot(v.T, v) >= 1, so it's always invertible;
## det is away from zero, so the value of logdet is stable
v = np.random.random(4)
a = np.eye(4) + np.outer(v, v)
a = np.tile(a, (3, 1, 1))
permute_mat = np.eye(4)[[1, 0, 2, 3]]
# test matrix inverse
r = np.eye(4)
r = np.tile(r, (3, 1, 1))
test_inverse = mx.sym.linalg.inverse(data)
test_eye = mx.sym.linalg.gemm2(data, test_inverse)
check_fw(test_eye, [a], [r])
check_grad(test_inverse, [a])
# test matrix determinant
# det
r = np.linalg.det(a)
test_det = mx.sym.linalg.det(data)
check_fw(test_det, [a], [r])
check_grad(test_det, [a])
# test slogdet
r1 = np.array([1., 1., 1.])
r2 = np.log(np.abs(np.linalg.det(a)))
test_sign, test_logabsdet = mx.sym.linalg.slogdet(data)
check_fw(test_sign, [a], [r1])
check_fw(test_sign, [np.dot(a, permute_mat)], [-r1])
check_fw(test_logabsdet, [a], [r2])
check_grad(test_logabsdet, [a])
def test_stack():
for _ in range(100):
ndim = random.randint(1, 5)
axis = random.randint(0, ndim)
if random.randint(0, 1):
axis = axis - ndim - 1
nin = random.randint(1, 3)
dshape = [random.randint(1, 5) for _ in range(ndim)]
inputs = [np.random.uniform(size=dshape) for _ in range(nin)]
output = np.stack(inputs, axis=axis)
sym_ins = [mx.sym.var('x%d'%i) for i in range(nin)]
out = mx.sym.stack(*sym_ins, axis=axis)
check_symbolic_forward(out, inputs, [output])
check_numeric_gradient(out, inputs)
@pytest.mark.flaky
def test_dropout():
def zero_count(array, ratio):
zeros = 0
for i in array:
if i == 0:
zeros += 1
elif math.isnan(i):
assert ratio == 1 # Only valid for ratio = 1
zeros += 1
return zeros
def check_correctness(executor, input, ratio):
input = input.ravel()
output = executor.outputs[0].asnumpy().ravel()
input_sum = np.sum(input)
output_sum = np.sum(output)
# Make sure input zeroes are none (test data setup check)
assert zero_count(input, ratio) == 0
# count number of zeroes in output
output_zeroes = zero_count(output, ratio)
# Hopefully should be within ratio/2 %
error = abs(output_sum - input_sum) / input_sum
if ratio == 1.0:
assert output_zeroes == len(input)
elif ratio > 0.2:
assert output_zeroes > 0
assert error < (ratio/2)
elif ratio == 0:
assert output_zeroes == 0
def check_dropout_ratio(ratio, shape, cudnn_off=True):
# test dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, cudnn_off=cudnn_off)
exe = y._simple_bind(ctx=default_context(), data=shape)
if ratio == 1:
max_value = float('nan')
else:
max_value = 1 if ratio == 0 else 1/ratio
if ratio == 1:
min_value = float('nan')
else:
min_value = 1 if ratio == 0 else 0
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
if not math.isnan(max_value):
assert exe.outputs[0].asnumpy().max() > 0
else:
assert math.isnan(exe.outputs[0].asnumpy().max())
if not math.isnan(min_value):
assert exe.outputs[0].asnumpy().min() == min_value
else:
assert math.isnan(exe.outputs[0].asnumpy().min())
check_correctness(exe, exe.arg_arrays[0].asnumpy(), ratio)
if ratio == 0.5:
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert (exe.outputs[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
# test permanent dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, mode='always', cudnn_off=cudnn_off)
exe = y._simple_bind(ctx=default_context(), data=shape)
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
def get_slice(x, axis, idx):
ix = ()
for i in range(x.ndim):
if i == axis:
ix += (idx,)
else:
ix += (slice(None, None, None),)
return x[ix]
def check_dropout_axes(ratio, shape, axes, cudnn_off=True):
compactshape = list(shape)
for axis in axes:
compactshape[axis] = 1
compactx = mx.random.uniform(shape=tuple(compactshape))
broadcastx = compactx.broadcast_to(shape)
dropouty = mx.nd.Dropout(broadcastx, p=ratio, axes=axes, cudnn_off=cudnn_off)
for axis in axes:
target = get_slice(dropouty, axis, 0).asnumpy()
for i in range(1, shape[axis]):
assert(get_slice(dropouty, axis, i).asnumpy() == target).all()
def check_passthrough(ratio, shape, cudnn_off=True):
# test inference_mode forward and then backward
a = mx.random.uniform(shape=shape)
a.attach_grad()
with mx.autograd.record(train_mode=False):
b = mx.nd.Dropout(a, ratio, cudnn_off=cudnn_off) # dropout acts as identity
b.backward()
assert_almost_equal(a.grad.asnumpy(), mx.nd.ones_like(b).asnumpy())
shape = (100, 100)
check_dropout_ratio(0.5, shape)
check_dropout_ratio(0.0, shape)
check_dropout_ratio(1.0, shape)
check_dropout_ratio(0.75, shape)
check_dropout_ratio(0.25, shape)
check_dropout_ratio(0.5, shape, cudnn_off=False)
check_dropout_ratio(0.0, shape, cudnn_off=False)
check_dropout_ratio(1.0, shape, cudnn_off=False)
check_dropout_ratio(0.75, shape, cudnn_off=False)
check_dropout_ratio(0.25, shape, cudnn_off=False)
check_passthrough(0.5, shape)
check_passthrough(0.0, shape)
check_passthrough(1.0, shape)
check_passthrough(0.5, shape, cudnn_off=False)
check_passthrough(0.0, shape, cudnn_off=False)
check_passthrough(1.0, shape, cudnn_off=False)
nshape = (10, 10, 10, 10)
with mx.autograd.train_mode():
check_dropout_axes(0.25, nshape, axes = (0,))
check_dropout_axes(0.25, nshape, axes = (1,))
check_dropout_axes(0.25, nshape, axes = (2,))
check_dropout_axes(0.25, nshape, axes = (3,))
check_dropout_axes(0.25, nshape, axes = (0, 1))
check_dropout_axes(0.25, nshape, axes = (0, 2))
check_dropout_axes(0.25, nshape, axes = (0, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2))
check_dropout_axes(0.25, nshape, axes = (1, 3))
check_dropout_axes(0.25, nshape, axes = (2, 3))
check_dropout_axes(0.25, nshape, axes = (0, 1, 2))
check_dropout_axes(0.25, nshape, axes = (0, 2, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2, 3))
check_dropout_axes(0.25, nshape, axes = (0,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (2,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (3,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 1), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (2, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 1, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 2, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 2, 3), cudnn_off=False)
@pytest.mark.skip(reason="test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/11290")
def test_scatter_gather_nd():
def check(data, idx):
data.attach_grad()
with mx.autograd.record():
y = mx.nd.gather_nd(data, idx)
y.backward(y)
npidx = tuple(i.asnumpy() for i in idx)
assert (data.asnumpy()[npidx] == y.asnumpy()).all()
npdata = np.zeros_like(data.asnumpy())
npdata[npidx] = y.asnumpy()
assert (npdata == data.grad.asnumpy()).all()
assert (mx.nd._internal._backward_gather_nd(y, idx, shape=data.shape).asnumpy() == data.grad.asnumpy()).all()
for dtype in ['int32', 'int64', 'float16', 'float32', 'float64']:
data = mx.nd.arange(360, dtype=dtype).reshape((3,4,5,6))
idx = mx.nd.array([[1,1,2], [3, 3, 0], [3,2,1]], dtype='int32')
check(data, idx)
idx = mx.nd.array([[1,1,2], [3,3,0], [3,2,1], [5,2,4]], dtype='int32')
check(data, idx)
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [0, 1, 0]], dtype='int32')
assert (mx.nd.scatter_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [2, 3]]).all()
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [1, 1, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [0, 5]]).all()
data_npy = np.random.randint(0, 10, (100,))
data = mx.nd.array(data_npy, dtype=dtype)
idx = mx.nd.zeros(shape=(1, 100), dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data_npy.sum())
if dtype == 'int64':
data = mx.nd.array([2123162361283621, -31231236374787,
-112372937128970, -1378278798172378], dtype=dtype)
idx = mx.nd.array([[0, 0, 0, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data.asnumpy().sum())
def test_gather_nd_check_bound():
def _test_gather_nd_exception(data, indices):
output = mx.nd.gather_nd(data, indices).asnumpy()
# check if indices is out of bound
data = mx.nd.array([[0, 1, 2], [3, 4, 5]])
indices1 = mx.nd.array([[0, 1, 0], [0, 1, 3]])
indices2 = mx.nd.array([[0, 1, 0], [0, 1, -5]])
assertRaises(IndexError, _test_gather_nd_exception, data, indices1)
# IndexError: index 3 is out of bounds for axis 1 with size 3
assertRaises(IndexError, _test_gather_nd_exception, data, indices2)
# IndexError: index -5 is out of bounds for axis 1 with size 3
# check if the negative indices are wrapped correctly
indices1 = mx.nd.array([[0, 1, -1], [0, 1, -2]])
indices2 = mx.nd.array([[0, 1, 1], [0, 1, 1]])
data1 = mx.nd.gather_nd(data, indices1)
data2 = mx.nd.gather_nd(data, indices2)
assert_almost_equal(data1, data2, rtol=1e-5, atol=1e-5)
def compare_forw_backw_unary_op(
name, forward_mxnet_call, forward_numpy_call,
backward_numpy_call, shape, input_low, input_high, rtol, atol,
dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
data = mx.symbol.Variable(op_name + '_data', dtype=dtype)
# Comparison: Forward expression
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
res_np = forward_numpy_call(data_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_fw(op_ex, [data_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data_grad = backward_numpy_call(data_np) * res_grad
check_bw(op_ex, [data_np], [res_grad], [data_grad])
def finite_diff_unary_op(
name, forward_mxnet_call, shape, input_low, input_high, rtol, atol,
num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
data = mx.symbol.Variable('data', dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_grad(op_ex, [data_np])
def np_smooth_l1(x, sigma):
issq = 1. / sigma / sigma
absx = np.abs(x)
temp = x * sigma
return np.where(absx < issq, 0.5 * (temp ** 2), absx - 0.5 * issq)
def np_smooth_l1_grad(x, sigma):
ssq = sigma * sigma
return np.where(np.abs(x) < 1. / ssq, x * ssq, np.sign(x))
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
# Seed set because the test is not robust enough to operate on random data
@pytest.mark.seed(192837465)
def test_unary_math_operators():
have_scipy = True
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
have_scipy = False
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
rtol_less_l = [1e-6, 1e-5, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
atol_less_l = [1e-6, 1e-5, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'arccos' : [lambda x: mx.sym.arccos(x),
lambda x: np.arccos(x),
lambda x: -1. / np.sqrt(1. - x ** 2.),
-0.95, 0.95],
'arccosh': [lambda x: mx.sym.arccosh(x),
lambda x: np.arccosh(x),
lambda x: 1. / np.sqrt(x ** 2 - 1.),
1.05, 10.0],
'arcsin': [lambda x: mx.sym.arcsin(x),
lambda x: np.arcsin(x),
lambda x: 1. / np.sqrt(1. - x ** 2),
-0.95, 0.95],
'arcsinh': [lambda x: mx.sym.arcsinh(x),
lambda x: np.arcsinh(x),
lambda x: 1. / np.sqrt(x**2 + 1.),
-5.0, 5.0],
'arctan': [lambda x: mx.sym.arctan(x),
lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.),
-5.0, 5.0],
'arctanh': [lambda x: mx.sym.arctanh(x),
lambda x: np.arctanh(x),
lambda x: 1. / (1. - x ** 2),
-0.95, 0.95],
'cbrt': [lambda x: mx.sym.cbrt(x),
lambda x: np.cbrt(x),
lambda x: 1. / (3. * np.cbrt(x) ** 2),
-10.0, 10.0],
'cos': [lambda x: mx.sym.cos(x),
lambda x: np.cos(x),
lambda x: -np.sin(x),
-5.0, 5.0],
'cosh': [lambda x: mx.sym.cosh(x),
lambda x: np.cosh(x),
lambda x: np.sinh(x),
-2.0, 2.0],
'exp': [lambda x: mx.sym.exp(x),
lambda x: np.exp(x),
lambda x: np.exp(x),
-4.0, 4.0],
'expm1': [lambda x: mx.sym.expm1(x),
lambda x: np.expm1(x),
lambda x: np.exp(x),
-0.1, 0.1],
'log': [lambda x: mx.sym.log(x),
lambda x: np.log(x),
lambda x: 1. / x,
0.01, 100.0],
'log10': [lambda x: mx.sym.log10(x),
lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)),
0.01, 100.0],
'log2': [lambda x: mx.sym.log2(x),
lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)),
0.01, 100.0],
'log1p': [lambda x: mx.sym.log1p(x),
lambda x: np.log1p(x),
lambda x: 1. / (1. + x),
-0.1, 0.1],
'rcbrt': [lambda x: mx.sym.rcbrt(x),
lambda x: 1. / np.cbrt(x),
lambda x: -1. / (3. * x * np.cbrt(x)),
0.01, 100.0],
'reciprocal': [lambda x: mx.sym.reciprocal(x),
lambda x: 1. / x,
lambda x: -1. / (x ** 2),
0.01, 100.0],
'relu': [lambda x: mx.sym.relu(x),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'rsqrt': [lambda x: mx.sym.rsqrt(x),
lambda x: 1. / np.sqrt(x),
lambda x: -0.5 / (x * np.sqrt(x)),
0.01, 100.0],
'sigmoid': [lambda x: mx.sym.sigmoid(x),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.softsign(x),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
'sin': [lambda x: mx.sym.sin(x),
lambda x: np.sin(x),
lambda x: np.cos(x),
-5.0, 5.0],
'sinh': [lambda x: mx.sym.sinh(x),
lambda x: np.sinh(x),
lambda x: np.cosh(x),
-2.0, 2.0],
'sqrt': [lambda x: mx.sym.sqrt(x),
lambda x: np.sqrt(x),
lambda x: 0.5 / np.sqrt(x),
0.01, 100.0],
'tan': [lambda x: mx.sym.tan(x),
lambda x: np.tan(x),
lambda x: np.tan(x) ** 2 + 1.,
-1.5, 1.5],
'tanh': [lambda x: mx.sym.tanh(x),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'smooth_l1_sig1': [lambda x: mx.sym.smooth_l1(x, scalar=1.),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig_default': [lambda x: mx.sym.smooth_l1(x),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig2': [lambda x: mx.sym.smooth_l1(x, scalar=2.),
lambda x: np_smooth_l1(x, 2.),
lambda x: np_smooth_l1_grad(x, 2.),
-1.0, 1.0]
}
if have_scipy:
unary_ops['gamma'] = [lambda x: mx.sym.gamma(x),
lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x),
0.01, 5.0]
unary_ops['gammaln'] = [lambda x: mx.sym.gammaln(x),
lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x),
0.01, 20.0]
# Loop over operators
for name, op in unary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
if name == 'gammaln' or name == 'gamma':
rtol = rtol_less_l[ind]
atol = atol_less_l[ind]
else:
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
def compare_forw_backw_binary_op(
name, forward_mxnet_call, forward_numpy_call,
backward1_numpy_call, backward2_numpy_call, shape, input1_low,
input1_high, input2_low, input2_high, rtol, atol, dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
data1 = mx.symbol.Variable(op_name + '_data1', dtype=dtype)
data2 = mx.symbol.Variable(op_name + '_data2', dtype=dtype)
# Comparison: Forward expression
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
res_np = forward_numpy_call(data1_np, data2_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_fw(op_ex, [data1_np, data2_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data1_grad = backward1_numpy_call(data1_np, data2_np) * res_grad
data2_grad = backward2_numpy_call(data1_np, data2_np) * res_grad
check_bw(op_ex, [data1_np, data2_np], [res_grad], [data1_grad, data2_grad])
def finite_diff_binary_op(
name, forward_mxnet_call, shape, input1_low, input1_high, input2_low,
input2_high, rtol, atol, num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
data1 = mx.symbol.Variable('data1', dtype=dtype)
data2 = mx.symbol.Variable('data2', dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_grad(op_ex, [data1_np, data2_np])
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
def test_binary_math_operators():
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
binary_ops = {
'hypot' : [lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
-5.0, 5.0, -5.0, 5.0],
'pow': [lambda x, y: mx.sym.pow(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0],
'power': [lambda x, y: mx.sym.power(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0]
}
# Loop over operators
for name, op in binary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
compare_forw_backw_binary_op(
name, op[0], op[1], op[2], op[3], shape, op[4], op[5], op[6],
op[7], rtol_l[ind], atol_l[ind], dtype)
# Finite difference testing
finite_diff_binary_op(
name, op[0], shape, op[4], op[5], op[6], op[7], rtol_fd, atol_fd,
num_eps)
@pytest.mark.serial
def test_slice():
def test_slice_forward_backward(a, index):
a_np = a.asnumpy()
begin = []
end = []
step = []
for slice_i in index:
begin.append(slice_i.start)
end.append(slice_i.stop)
step.append(slice_i.step)
b = mx.nd.slice(a, begin=begin, end=end, step=step)
b_np = a_np[index]
assert same(b.asnumpy(), b_np)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=begin, end=end, step=step)
expected_in_grad = np.zeros_like(a_np)
expected_in_grad[index] = b_np
check_symbolic_backward(slice_sym, [a_np], [b_np], [expected_in_grad])
shape = (16, 14, 17, 20)
arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape)
index_list = [(slice(None),), (slice(None), slice(None)), (slice(1, 10),), (slice(1, 10), slice(3, 9)),
(slice(1, 10), slice(2, 5), slice(3, 6), slice(7, 10)),
(slice(1, 10, 2), slice(2, 9, 3), slice(3, 6, 5), slice(7, 10, 2)),
(slice(None, None, -1), slice(None, None, -1), slice(None, None, -1)),
(slice(10, 0, -2), slice(5, 2, -1), slice(7, None, 3), slice(None, 12, 4))]
for index in index_list:
test_slice_forward_backward(arr, index)
# check numeric gradient
in_data = np.arange(36).reshape(2, 2, 3, 3)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=[0, None], end=[1, None], step=[2, -1])
check_numeric_gradient(slice_sym, [in_data])
def test_slice_partial_infer():
def check_slice_partial_infer(data, begin, end, step, expected_out_shape):
out = mx.sym.slice(data, begin=begin, end=end, step=step)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
def check_slice_axis_partial_infer(data, axis, begin, end, expected_out_shape):
out = mx.sym.slice_axis(data, axis=axis, begin=begin, end=end)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
var1 = mx.sym.var(name="data", shape=(0, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (0, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (0, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (0, 3))
var1 = mx.sym.var(name="data", shape=(10, 0))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, 0))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, 0))
with mx.np_shape():
var1 = mx.sym.var(name="data", shape=(-1, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (-1, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (-1, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (-1, 3))
var1 = mx.sym.var(name='data', shape=(10, -1))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, -1))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, -1))
def test_float16_min_max():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/9007"""
a = mx.nd.array([np.finfo('float16').min, np.finfo('float16').max], dtype='float16')
assert a.dtype == np.float16
assert np.finfo('float16').min == mx.nd.min(a).asscalar()
assert np.finfo('float16').max == mx.nd.max(a).asscalar()
@mx.use_np_shape
def test_zero_size_min_max():
def min():
a = mx.nd.zeros(shape=(5, 0))
a.min()
def max():
a = mx.nd.zeros(shape=(5, 0))
a.max()
pytest.raises(MXNetError, min)
pytest.raises(MXNetError, max)
def test_squeeze_op():
def check_squeeze_op(shape, axis=None):
data = mx.nd.random.uniform(low=-10.0, high=10.0, shape=shape)
if axis is None:
out = mx.nd.squeeze(data).asnumpy()
out_expected = np.squeeze(data.asnumpy())
else:
out = mx.nd.squeeze(data, axis=axis).asnumpy()
out_expected = np.squeeze(data.asnumpy(), axis=axis)
if out.shape == (1,): # as an exception (1, 1, 1) will be squeezed to (1,)
out_expected = np.squeeze(data.asnumpy(), axis=tuple([i for i in range(1, len(shape))]))
assert same(out, out_expected)
# check forward
check_squeeze_op((1, 5, 1, 3, 1), 0)
check_squeeze_op((1, 5, 1, 3, 1), 2)
check_squeeze_op((1, 5, 1, 3, 1), 4)
check_squeeze_op((1, 5, 1, 3, 1), (0, 4))
check_squeeze_op((1, 5, 1, 3, 1), (0, 2, 4))
check_squeeze_op((1, 5, 1, 3, 1))
check_squeeze_op((1, 1, 1, 1))
# check gradient
data = mx.symbol.Variable('data')
shape = (1, 2, 1, 3, 1)
data_tmp = np.ones(shape)
test = mx.sym.squeeze(data)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=2)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=(2, 4))
check_numeric_gradient(test, [data_tmp])
@pytest.mark.serial
def test_adaptive_avg_pool_op():
def py_adaptive_avg_pool(x, height, width):
# 2D per frame adaptive avg pool
def adaptive_avg_pool_frame(x, y):
isizeH, isizeW = x.shape
osizeH, osizeW = y.shape
for oh in range(osizeH):
istartH = int(np.floor(1.0 * (oh * isizeH) / osizeH))
iendH = int(np.ceil(1.0 * (oh + 1) * isizeH / osizeH))
kH = iendH - istartH
for ow in range(osizeW):
istartW = int(np.floor(1.0 * (ow * isizeW) / osizeW))
iendW = int(np.ceil(1.0 * (ow + 1) * isizeW / osizeW))
kW = iendW - istartW
xsum = 0
for ih in range(kH):
for iw in range(kW):
xsum += x[istartH+ih][istartW+iw]
y[oh][ow] = xsum / kH / kW
B,C,_,_ = x.shape
y = np.empty([B,C,height, width], dtype=x.dtype)
for b in range(B):
for c in range(C):
adaptive_avg_pool_frame(x[b][c], y[b][c])
return y
def check_adaptive_avg_pool_op(shape, output_height, output_width=None):
x = mx.nd.random.uniform(shape=shape)
if output_width is None:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=output_height)
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_height)
else:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=(output_height, output_width))
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_width)
assert_almost_equal(y.asnumpy(), npy)
shape = (2, 2, 10, 10)
for i in range(1, 11):
check_adaptive_avg_pool_op(shape, i)
for j in range(1, 11):
check_adaptive_avg_pool_op(shape, i, j)
def test_bilinear_resize_op():
def py_bilinear_resize(x, outputHeight, outputWidth):
batch, channel, inputHeight, inputWidth = x.shape
if outputHeight == inputHeight and outputWidth == inputWidth:
return x
y = np.empty([batch, channel, outputHeight, outputWidth])
rheight = 1.0 * (inputHeight - 1) / (outputHeight - 1) if outputHeight > 1 else 0.0
rwidth = 1.0 * (inputWidth - 1) / (outputWidth - 1) if outputWidth > 1 else 0.0
for h2 in range(outputHeight):
h1r = 1.0 * h2 * rheight
h1 = int(np.floor(h1r))
h1lambda = h1r - h1
h1p = 1 if h1 < (inputHeight - 1) else 0
for w2 in range(outputWidth):
w1r = 1.0 * w2 * rwidth
w1 = int(np.floor(w1r))
w1lambda = w1r - w1
w1p = 1 if w1 < (inputWidth - 1) else 0
for b in range(batch):
for c in range(channel):
y[b][c][h2][w2] = (1-h1lambda)*((1-w1lambda)*x[b][c][h1][w1] + \
w1lambda*x[b][c][h1][w1+w1p]) + \
h1lambda*((1-w1lambda)*x[b][c][h1+h1p][w1] + \
w1lambda*x[b][c][h1+h1p][w1+w1p])
return y
def py_bilinear_resize_backward(x, incoming_grads, mode='size'):
data1 = np.zeros_like(x)
data2 = incoming_grads
batchsize = data1.shape[0]
channels = data1.shape[1]
height1 = data1.shape[2]
width1 = data1.shape[3]
height2 = data2.shape[2]
width2 = data2.shape[3]
rheight = float(height1 - 1) / (height2 - 1) if (height2 > 1) else 0
rwidth = float(width1 - 1) / (width2 - 1) if (width2 > 1) else 0
# special case: just copy
if height1 == height2 and width1 == width2:
data1 += data2
return [data1]
for h2 in range(0, height2):
for w2 in range(0, width2):
h1r = rheight * h2
h1 = int(h1r)
h1p = 1 if (h1 < height1 - 1) else 0
h1lambda = h1r - h1
h0lambda = 1 - h1lambda
#
w1r = rwidth * w2
w1 = int(w1r)
w1p = 1 if (w1 < width1 - 1) else 0
w1lambda = w1r - w1
w0lambda = 1 - w1lambda
#
for n in range(0, batchsize):
for c in range(0, channels):
d2val = data2[n][c][h2][w2]
data1[n][c][h1][w1] += h0lambda * w0lambda * d2val
data1[n][c][h1][w1 + w1p] += h0lambda * w1lambda * d2val
data1[n][c][h1 + h1p][w1] += h1lambda * w0lambda * d2val
data1[n][c][h1 + h1p][w1 + w1p] += h1lambda * w1lambda * d2val
if mode == 'like':
return data1, np.zeros_like(incoming_grads)
return [data1]
def check_bilinear_resize_op(shape, height, width):
x = mx.nd.random.uniform(shape=shape)
y = mx.nd.contrib.BilinearResize2D(x, height=height, width=width)
assert_almost_equal(y, py_bilinear_resize(x.asnumpy(), height, width))
x_scale = width / shape[-1]
y_scale = height / shape[-2]
y = mx.nd.contrib.BilinearResize2D(x, scale_height=y_scale, scale_width=x_scale)
assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
def check_bilinear_resize_align_corners_op():
img_shape = [1, 1, 3, 2]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
# align_corners = False
expected_data[0] = [
64.000, 56.000, 40.000, 32.000, 56.000, 52.000, 44.000, 40.000, 40.000, 44.000, 52.000, 56.000,
36.500, 45.625, 63.875, 73.000, 45.500, 56.875, 79.625, 91.000, 50.000, 62.500, 87.500, 100.000
]
# align_corners = True
expected_data[1] = [
64.000, 53.333, 42.667, 32.000, 51.200, 49.067, 46.933, 44.800, 38.400, 44.800, 51.200, 57.600,
35.600, 47.467, 59.333, 71.200, 42.800, 57.067, 71.333, 85.600, 50.000, 66.667, 83.333, 100.000
]
x = np.array(data, dtype=np.float32).reshape(img_shape)
x_nd = mx.nd.array(x)
y0 = np.array(expected_data[0]).reshape((1, 1, target_height, target_width))
y0_nd = mx.nd.contrib.BilinearResize2D(x_nd, height=target_height, width=target_width, mode='size', align_corners=False)
assert_almost_equal(y0, y0_nd.asnumpy(), atol=1e-3)
y1 = np.array(expected_data[1]).reshape((1, 1, target_height, target_width))
y1_nd = mx.nd.contrib.BilinearResize2D(x_nd, height=target_height, width=target_width, mode='size', align_corners=True)
assert_almost_equal(y1, y1_nd.asnumpy(), atol=1e-3)
def check_bilinear_resize_modes_op(shape, scale_height=None, scale_width=None, shape_1=None, mode=None):
x = mx.nd.random.uniform(shape=shape)
original_h = shape[2]
original_w = shape[3]
if mode == 'odd_scale':
assert scale_height is not None and scale_width is not None
new_h = int(original_h * scale_height) if (original_h % 2) == 0 else \
int((original_h - 1) * scale_height) + 1
new_w = int(original_w * scale_width) if (original_w % 2) == 0 \
else int((original_w - 1) * scale_width) + 1
y = mx.nd.contrib.BilinearResize2D(x, scale_height=scale_height,
scale_width=scale_width,
mode='odd_scale')
elif mode == 'to_even_down':
new_h = original_h if (original_h % 2) == 0 else original_h - 1
new_w = original_w if (original_w % 2) == 0 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_down')
elif mode == 'to_even_up':
new_h = original_h if (original_h % 2) == 0 else original_h + 1
new_w = original_w if (original_w % 2) == 0 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_up')
elif mode == 'to_odd_down':
new_h = original_h if (original_h % 2) == 1 else original_h - 1
new_w = original_w if (original_w % 2) == 1 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_down')
elif mode == 'to_odd_up':
new_h = original_h if (original_h % 2) == 1 else original_h + 1
new_w = original_w if (original_w % 2) == 1 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_up')
elif mode == 'like':
x_1 = mx.nd.random.uniform(shape=shape_1)
new_h = x_1.shape[2]
new_w = x_1.shape[3]
y = mx.nd.contrib.BilinearResize2D(x, x_1, mode='like')
new_shape_desired = np.array([shape[0], shape[1], new_h, new_w], dtype='int')
new_shape_got = np.array(y.shape, dtype='int')
data_sym = mx.sym.var('data')
data_np = x.asnumpy()
expected = py_bilinear_resize(data_np, new_h, new_w)
out_grads = np.ones([shape[0], shape[1], new_h, new_w])
expected_backward = py_bilinear_resize_backward(data_np, out_grads, mode)
assert_array_equal(new_shape_desired, new_shape_got, "Desired and got shapes are not equal. {} vs {}".format(
str(new_shape_desired.tolist()), str(new_shape_got.tolist())))
assert_almost_equal(y.asnumpy(), expected, 1e-3, 0)
if mode != 'like':
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, None, scale_height=scale_height, scale_width=scale_width, mode=mode)
check_symbolic_forward(resize_sym, [data_np], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np], rtol=1e-2, atol=1e-4)
else:
data_sym_like = mx.sym.var('data_like')
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, data_sym_like, mode=mode)
date_np_like = x_1.asnumpy()
check_symbolic_forward(resize_sym, [data_np, date_np_like], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np, date_np_like], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np, date_np_like], rtol=1e-2, atol=1e-4)
shape = (2, 2, 10, 10)
check_bilinear_resize_op(shape, 5, 5)
check_bilinear_resize_op(shape, 10, 10)
check_bilinear_resize_op(shape, 15, 15)
check_bilinear_resize_op(shape, 3, 7)
check_bilinear_resize_op(shape, 13, 17)
shape = (2, 2, 20, 20)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape = (2, 2, 21, 21)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape_0 = (2, 2, 21, 21)
shape_1 = (2, 2, 10, 10)
check_bilinear_resize_modes_op(shape_0, shape_1=shape_1, mode='like')
check_bilinear_resize_modes_op(shape_1, shape_1=shape_0, mode='like')
check_bilinear_resize_align_corners_op()
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
threshold = 0.7
rpn_min_size = 16
batch_size = 20
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = np.float32)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = np.float32)
im_info = mx.nd.empty((batch_size, 3), dtype = np.float32)
cls_prob = mx.nd.array(np.random.random(cls_prob.shape))
bbox_pred = mx.nd.array(np.random.random(bbox_pred.shape))
for i in range(batch_size):
im_size = np.random.randint(100, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(70, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
def get_sub(arr, i):
new_shape = list(arr.shape)
new_shape[0] = 1
res = arr[i].reshape(new_shape)
return res
def check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
single_proposal = []
single_score = []
for i in range(batch_size):
rois, score = mx.nd.contrib.Proposal(
cls_prob = get_sub(cls_prob, i),
bbox_pred = get_sub(bbox_pred, i),
im_info = get_sub(im_info, i),
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal.append(rois)
single_score.append(score)
multi_proposal, multi_score = mx.nd.contrib.MultiProposal(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal = mx.nd.stack(*single_proposal).reshape(multi_proposal.shape)
single_score = mx.nd.stack(*single_score).reshape(multi_score.shape)
single_proposal_np = single_proposal.asnumpy()
multi_proposal_np = multi_proposal.asnumpy()
single_score_np = single_score.asnumpy()
multi_score_np = multi_score.asnumpy()
# check rois x1,y1,x2,y2
assert np.allclose(single_proposal_np[:, 1:], multi_proposal_np[:, 1:])
# check rois batch_idx
for i in range(batch_size):
start = i * rpn_post_nms_top_n
end = start + rpn_post_nms_top_n
assert (multi_proposal_np[start:end, 0] == i).all()
# check score
assert np.allclose(single_score_np, multi_score_np)
def check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
im_info_sym = mx.sym.Variable('im_info')
cls_prob_sym = mx.sym.Variable('cls_prob')
bbox_pred_sym = mx.sym.Variable('bbox_pred')
sym = mx.sym.contrib.MultiProposal(
cls_prob = cls_prob_sym,
bbox_pred = bbox_pred_sym,
im_info = im_info_sym,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = False)
location = [cls_prob.asnumpy(), bbox_pred.asnumpy(), im_info.asnumpy()]
expected = [np.zeros_like(e) for e in location]
out_grads = [np.ones((rpn_post_nms_top_n, 5))]
check_symbolic_backward(sym, location, out_grads, expected)
check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
check_forward(rpn_pre_nms_top_n, 1500)
check_forward(1000, 500)
check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
def test_quadratic_function():
def f(x, a, b, c):
return a * x**2 + b * x + c
a = np.random.random_sample()
b = np.random.random_sample()
c = np.random.random_sample()
data = mx.symbol.Variable('data')
quad_sym = mx.sym.contrib.quadratic(data=data, a=a, b=b, c=c)
for dtype in [np.float16, np.float32, np.float64]:
tol = 1e-2 if dtype is np.float16 else 1e-5
for ndim in range(1, 6):
shape = rand_shape_nd(ndim, 5)
data_np = np.random.randn(*shape).astype(dtype)
expected = f(data_np, a, b, c)
backward_expected = 2 * a * data_np + b
# check imperative forward
output = mx.nd.contrib.quadratic(mx.nd.array(data_np), a=a, b=b, c=c)
assert_almost_equal(output, expected, rtol=tol, atol=tol)
# check forward
check_symbolic_forward(quad_sym, [data_np], [expected], rtol=tol, atol=tol)
# check backward
check_symbolic_backward(quad_sym, [data_np], [np.ones(expected.shape)],
[backward_expected], rtol=tol, atol=tol)
# check backward using finite difference
check_numeric_gradient(quad_sym, [data_np], atol=0.001)
def allclose_function(contexts):
def getRandom(base, percent = 1.):
return base * (1 + percent * (2 * np.random.random_sample() - 1.) / 100)
title = 'exp'
for ctx in contexts:
title += ' cpu' if ctx == mx.cpu() else ' gpu'
title += ' nElem shape'
num_ctx = len(contexts)
result = [False, False]
for dtype in [np.float16, np.float32, np.float64]:
rtol = getRandom(1e-2 if dtype is np.float16 else 1e-5)
atol = getRandom(1e-4 if dtype is np.float16 else 1e-7)
print('\nnumpy.{}: atol = {} rtol = {}'.format(dtype.__name__, atol, rtol))
print(title)
for ndim in range(1, 10):
shape = rand_shape_nd(ndim, 8)
a_np = np.random.randn(*shape).astype(dtype)
b_np = (a_np + np.random.randn(*shape).astype(dtype) / 10000000).astype(dtype)
expected = np.allclose(a_np, b_np, rtol, atol)
for n, ctx in enumerate(contexts):
a_ctx = mx.nd.array(a_np, dtype = dtype, ctx=ctx)
b_ctx = mx.nd.array(b_np, dtype = dtype, ctx=ctx)
output = mx.nd.contrib.allclose(a_ctx, b_ctx, rtol=rtol, atol=atol)
result[n] = output.asnumpy() == 1
if expected != result[n]:
# Preparing the output of elements of the array, which are considered as "not close" AND
# corresponding elements of comparison CPU/GPU/Python vectors, which are considered as "close"
v_ctx = 'CPU' if ctx == mx.cpu() else 'GPU'
if expected:
v_cmp = 'Python'
a_b = a_ctx.asnumpy()
b_b = b_ctx.asnumpy()
a_g = np.asarray(a_np)
b_g = np.asarray(b_np)
else:
v_cmp = v_ctx
v_ctx = 'Python'
a_b = np.asarray(a_np)
b_b = np.asarray(b_np)
a_g = a_ctx.asnumpy()
b_g = b_ctx.asnumpy()
print('\n *** Violations found on %s, but not on %s side ***' % (v_ctx, v_cmp))
frmt = " a[{0:d}]: b[{0:d}]:" \
" abs(a[{0:d}]-b[{0:d}]) - atol + rtol*abs(b[{0:d}]):"
# Define the indices of all violations and corresponding values of coordinates
bad_indexes = np.abs(a_b - b_b) >= atol + rtol * abs(b_b)
a_values = [a_b[bad_indexes], a_g[bad_indexes]]
b_values = [b_b[bad_indexes], b_g[bad_indexes]]
idx = np.asarray(np.where(bad_indexes == True))
idx = idx.reshape(1, idx.size)
idx_flat = np.asarray(np.where(bad_indexes.flatten() == True)).flatten()
for i in range(len(a_values[0])):
flat_idx = idx_flat[i]
print('{}: index = {} flat_index = {}'.format('%4d'%i, idx[i], flat_idx))
print(frmt.format(flat_idx))
for j in range(2):
diff = np.abs(a_values[j][i]-b_values[j][i]) - atol + rtol*abs(b_values[j][i])
print('{}: {} {} {}'.format('%6s'%v_ctx, a_values[j][i], b_values[j][i], diff))
if num_ctx == 1:
print(' {0:d} {1:d} {2:10d} {3:}'.format(expected, result[0], np.prod(shape), shape))
else:
print(' {0:d} {1:d} {2:d} {3:10d} {4:}'.format(expected, result[0], result[1], np.prod(shape), shape))
if expected != result[0] or num_ctx > 1 and expected != result[1]:
assert False
@pytest.mark.serial
def test_allclose_function():
allclose_function([default_context()])
def test_histogram():
def f(x, bins=10, range=None):
return np.histogram(x, bins, range=range)
for ndim in range(1, 6):
shape = rand_shape_nd(ndim)
x = rand_ndarray(shape, stype='default', dtype=np.float64)
mx_bins = mx.nd.array([-1.0, 0.5, 2.0, 4.5, 50.0], dtype=np.float64)
np_bins = mx_bins.asnumpy()
bin_cnt = random.randint(2, 10)
bin_range = (-2.5, 2.5)
mx_histo1, mx_bins1 = mx.nd.histogram(x, bins=bin_cnt, range=bin_range)
np_histo1, np_bins1 = f(x.asnumpy(), bins=bin_cnt, range=bin_range)
assert_almost_equal(mx_bins1, np_bins1)
assert_almost_equal(mx_histo1, np_histo1, rtol=1e-3, atol=1e-5)
mx_histo2, mx_bins2 = mx.nd.histogram(x, bins=mx_bins)
np_histo2, np_bins2 = f(x.asnumpy(), bins=np_bins)
assert_almost_equal(mx_histo2, np_histo2, rtol=1e-3, atol=1e-5)
assert_almost_equal(mx_bins2, np_bins2, rtol=1e-3, atol=1e-5)
data = mx.sym.Variable("data")
bins = mx.sym.Variable("bins")
histo1 = mx.sym.histogram(a=data, bins=bin_cnt, range=bin_range)
histo2 = mx.sym.histogram(a=data, bins=bins)
executor1 = histo1._bind(ctx=default_context(), args={"data" : x})
executor1.forward(is_train=False)
assert_almost_equal(np_histo1, executor1.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo1", "FORWARD_histo1"), equal_nan=False)
executor2 = histo2._bind(ctx=default_context(), args={"data" : x, "bins" : mx_bins})
executor2.forward(is_train=False)
assert_almost_equal(np_histo2, executor2.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo2", "FORWARD_histo2"), equal_nan=False)
@pytest.mark.skip(reason="test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/13915")
def test_activation():
shapes = [(9,), (9, 10), (9, 10, 10), (1, 9, 10, 10)]
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'relu': [lambda x: mx.sym.Activation(x, act_type='relu'),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'sigmoid': [lambda x: mx.sym.Activation(x, act_type='sigmoid'),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'tanh': [lambda x: mx.sym.Activation(x, act_type='tanh'),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'softrelu': [lambda x: mx.sym.Activation(x, act_type='softrelu'),
lambda x: np.log(1. + np.exp(x)),
lambda x: 1. - 1 / (1 + np.exp(x)),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.Activation(x, act_type='softsign'),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
}
# Loop over operators
for name, op in unary_ops.items():
# Loop over shapes
for shape in shapes:
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
@pytest.mark.serial
def test_ravel():
# be aware that check_symbolic_forward will use float type internally
# for the arrays and that limits the representable flat index range.
# Taking dim==4 and a range of [0,..,100] for the data can already
# cause precision issues and break this test.
for dim in [1, 2, 3, 4]:
data = np.random.randint(50, size=(dim, 500))
shape = tuple(np.add(np.amax(data, axis=1), [1]))
a = mx.sym.Variable('a')
ravel_npy = np.ravel_multi_index(data, shape)
b = mx.sym.ravel_multi_index(a, shape=shape)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
# Test with leading dimension set to -1.
shape2 = shape
shape2 = (-1,)+shape[1:]
b = mx.sym.ravel_multi_index(a, shape=shape2)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape2)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
def test_unravel_index():
unravel_shape = (2, 10)
unravel_size = np.prod(unravel_shape)
for shape in [(10,), (2, 10), (3, 4, 5)]:
a = np.random.randint(0, unravel_size, size=shape)
b = np.stack(np.unravel_index(a, shape=unravel_shape), 0)
a_mx = mx.nd.array(a)
b_mx = mx.nd.unravel_index(a_mx, shape=unravel_shape)
assert_array_equal(b, b_mx.asnumpy())
def test_context_num_gpus():
try:
# Note: the test is run both on GPU and CPU hosts, so that we can not assert
# on a specific number here.
assert mx.context.num_gpus() >= 0
except mx.MXNetError as e:
# Note: On a CPU only host CUDA sometimes is not able to determine the number
# of GPUs
if str(e).find("CUDA") == -1:
raise e
@pytest.mark.serial
def test_op_roi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0), []
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
grad = [(y_low, x_low, w1), (y_low, x_high, w2),
(y_high, x_low, w3), (y_high, x_high, w4)
]
return val, grad
def roialign_forward_backward(data, rois, pooled_size, spatial_scale, sampling_ratio,
position_sensitive, dy):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 5,\
ValueError(
'The length of the axis 1 of rois should be 5 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
C_out = C // PH // PW if position_sensitive else C
out = np.zeros((R, C_out, PH, PW), dtype=T)
dx = np.zeros_like(data)
drois = np.zeros_like(rois)
for r in range(R):
batch_ind = int(rois[r, 0])
sw, sh, ew, eh = rois[r, 1:5] * T(spatial_scale)
roi_w = T(max(ew - sw, 1.0))
roi_h = T(max(eh - sh, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
for c in range(C_out):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
c_in = c * PH * PW + ph * PW + pw if position_sensitive else c
for iy in range(roi_bin_grid_h):
y = sh + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
x = sw + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
v, g = bilinear_interpolate(
bdata[c_in], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
# compute grad
for qy, qx, qw in g:
assert_same_dtype(qw.dtype, T)
dx[batch_ind, c_in, qy, qx] += dy[r,
c, ph, pw] * qw / count
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out, [dx, drois]
def test_roi_align_value(sampling_ratio=0, position_sensitive=False):
ctx = default_context()
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
C = C * pooled_size[0] * pooled_size[1] if position_sensitive else C
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy - wh / 2, center_xy + wh / 2, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
data.attach_grad()
rois.attach_grad()
with mx.autograd.record():
output = mx.nd.contrib.ROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sample_ratio=sampling_ratio,
position_sensitive=position_sensitive)
C_out = C // pooled_size[0] // pooled_size[1] if position_sensitive else C
dy = mx.nd.random.uniform(-1, 1, (R, C_out) +
pooled_size, ctx=ctx, dtype=dtype)
output.backward(dy)
real_output, [dx, drois] = roialign_forward_backward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio,
position_sensitive, dy.asnumpy())
assert_almost_equal(output, real_output, atol=1e-3)
assert_almost_equal(data.grad, dx, atol=1e-3)
assert_almost_equal(rois.grad, drois, atol=1e-3)
# modified from test_roipooling()
def test_roi_align_autograd(sampling_ratio=0):
ctx = default_context()
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.contrib.ROIAlign(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1,
sample_ratio=sampling_ratio)
x1 = np.random.rand(4, 1, 12, 12).astype('float64')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2],
[1, 3.1, 1.1, 5.2, 10.2]], dtype='float64')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'write', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'add', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
test_roi_align_value()
test_roi_align_value(sampling_ratio=2)
test_roi_align_value(position_sensitive=True)
test_roi_align_autograd()
def test_op_rroi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0)
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
return val
def rroialign_forward(data, rois, pooled_size, spatial_scale, sampling_ratio):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 6,\
ValueError(
'The length of the axis 1 of rois should be 6 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
out = np.zeros((R, C, PH, PW), dtype=T)
for r in range(R):
batch_ind = int(rois[r, 0])
roi_center_w, roi_center_h, roi_w, roi_h = rois[r, 1:5] * T(spatial_scale)
roi_theta = T(rois[r,5] * np.pi / 180.0)
roi_w = T(max(roi_w, 1.0))
roi_h = T(max(roi_h, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
roi_start_h = T(-roi_h / 2.0)
roi_start_w = T(-roi_w / 2.0)
for c in range(C):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
for iy in range(roi_bin_grid_h):
yy = roi_start_h + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
xx = roi_start_w + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
x = xx * np.cos(roi_theta, dtype=T) + yy * np.sin(roi_theta, dtype=T) + roi_center_w
y = yy * np.cos(roi_theta, dtype=T) - xx * np.sin(roi_theta, dtype=T) + roi_center_h
v = bilinear_interpolate(
bdata[c], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out
def test_rroi_align_value(sampling_ratio=-1):
ctx = default_context()
if ctx.device_type == 'gpu':
print('skipped testing rroi align for gpu since it is not supported yet')
return
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
theta = mx.nd.random.uniform(0, 180, (R,1), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy, wh, theta, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
output = mx.nd.contrib.RROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sampling_ratio=sampling_ratio)
real_output = rroialign_forward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio)
assert_almost_equal(output.asnumpy(), real_output, atol=1e-3)
test_rroi_align_value()
test_rroi_align_value(sampling_ratio=2)
def test_diag():
# Test 2d input
h = np.random.randint(2,9)
w = np.random.randint(2,9)
a_np = np.random.random((h, w)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
for k in [0, 1, -1, np.random.randint(-min(h,w) + 1, min(h,w))]:
assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k))
# invalid k
k = max(h,w) + 1
assertRaises(MXNetError, mx.nd.diag, a, k=k)
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# test 1d input
d = np.random.randint(2,9)
a_np = np.random.random((d))
a = mx.nd.array(a_np)
# k is random
k = np.random.randint(-d,d)
assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k))
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d input
x1 = np.random.randint(3,9)
x2 = np.random.randint(3,9)
x3 = np.random.randint(3,9)
x4 = np.random.randint(3,9)
a_np = np.random.random((x1, x2, x3, x4)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
# k = 0, axis1=0, axis2=1
r = mx.nd.diag(data=a, k=0, axis1=0, axis2=1)
assert_almost_equal(r, np.diagonal(a_np, offset=0, axis1=0, axis2=1))
# k = 1, axis1=1, axis2=0
r = mx.nd.diag(data=a, k=1, axis1=1, axis2=0)
assert_almost_equal(r, np.diagonal(a_np, offset=1, axis1=1, axis2=0))
# k = -1 axis1=1, axis3=3
r = mx.nd.diag(data=a, k=-1, axis1=1, axis2=3)
assert_almost_equal(r, np.diagonal(a_np, offset=-1, axis1=1, axis2=3))
# k = 2, axis1=-2, axis2=0
r = mx.nd.diag(data=a, k=2, axis1=-2, axis2=0)
assert_almost_equal(r, np.diagonal(a_np, offset=2, axis1=-2, axis2=0))
# Test 4d backward, k=0, axis1=3, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=0, axis1=3, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=1, axis1=1, axis2=2
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1, axis1=1, axis2=2)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-1, axis1=2, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1, axis1=2, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-2, axis1=1, axis2=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-2, axis1=1, axis2=-1)
check_numeric_gradient(diag_sym, [a_np])
@pytest.mark.serial
def test_depthtospace():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])
tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])
y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
n = random.randint(1, 5)
c = block * block * rand_mul1
h = random.randint(1, 5)
w = random.randint(1, 5)
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.depth_to_space(data, block)
assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c // (block ** 2), h * block, w * block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.depth_to_space(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_depth_dim():
invalid_shape_inp = (n, block - 1, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_space_dim():
invalid_shape_inp = (n, block ** 2, 0, block + 1)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n , c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
test_invalid_depth_dim()
test_invalid_space_dim()
test_invalid_block_size()
@pytest.mark.serial
def test_spacetodepth():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, c, h // blocksize, blocksize, w // blocksize, blocksize])
tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4])
y = np.reshape(tmp, [b, c * (blocksize**2), h // blocksize, w // blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
rand_mul2 = random.randint(1, 4)
n = random.randint(1, 5)
c = random.randint(1, 5)
h = block * rand_mul1
w = block * rand_mul2
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.space_to_depth(data, block)
assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c * (block ** 2), h // block, w // block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.space_to_depth(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_space_dim():
invalid_shape_inp = (n , c, block - 1, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n, c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_depth_dim():
invalid_shape_inp = (n, 0, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
test_invalid_space_dim()
test_invalid_block_size()
test_invalid_depth_dim()
def test_softmax_cross_entropy():
def f_sm_ce(data, label):
return np.sum(-np.log(data) * label)
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
sym = mx.sym.softmax_cross_entropy(data=data, label=label)
num_labels = random.randint(100, 200)
batch_size = random.randint(100, 200)
np_data = rand_ndarray((batch_size, num_labels), stype='default').asnumpy()
np_sm = np_softmax(np_data)
np_label = np.random.randint(0, num_labels, (batch_size, ))
np_one_hot_label = np.zeros((batch_size, num_labels))
np_one_hot_label[np.arange(batch_size), np_label] = 1.
check_symbolic_forward(sym, {'data' : np_data, 'label' : np_label}, [np.array([f_sm_ce(np_sm, np_one_hot_label)])], rtol=1e-3, atol=1e-5)
def test_split_v2():
dim = random.randint(2, 6)
shape = rand_shape_nd(dim)
axis = random.randint(-dim, dim-1)
axis_size = shape[axis]
samples = random.randint(0, axis_size - 1)
indices = sorted(random.sample([i for i in range(1, axis_size)], samples))
indices = tuple(indices)
mx_data = rand_ndarray(shape)
np_data = mx_data.asnumpy()
np_out = np.split(np_data, indices_or_sections=indices, axis=axis)
data = mx.sym.Variable("data")
sym = mx.sym.split_v2(data, indices_or_sections=indices, axis=axis)
check_symbolic_forward(sym, {"data": mx_data}, np_out, rtol=1e-3, atol=1e-5)
out_grad = [np.ones(arr.shape) for arr in np_out]
check_symbolic_backward(sym, {"data": mx_data}, out_grad, [np.concatenate(out_grad, axis=axis)])
def test_moments():
dim = random.randint(2, 5)
shape = rand_shape_nd(dim, dim=5)
axes = [i for i in range(dim)]
test_dims = random.sample(axes, random.randint(1, dim))
test_axes = tuple(sorted(test_dims))
np_a = np.random.uniform(-1.0, 1.0, shape)
a = mx.nd.array(np_a)
for keepdims in [True, False]:
eps = 1e-3
np_a[abs(np_a) < eps] = 2 * eps
np_mean = np.mean(np_a, axis=test_axes, keepdims=keepdims)
np_var = np.var(np_a, axis=test_axes, keepdims=keepdims)
mx_mean, mx_var = mx.nd.moments(a, keepdims=keepdims, axes=test_axes)
N = np_a.size / np_mean.size
mx_sym = mx.sym.Variable("data")
mx_moments = mx.sym.moments(mx_sym, axes=test_axes, keepdims=keepdims)
mx_test_sym = mx.sym.elemwise_add(mx_moments[0], mx_moments[1])
if len(np_mean.shape) == 0:
np_mean = np_mean.reshape(mx_mean.shape)
np_var = np_var.reshape(mx_var.shape)
assert np_mean.shape == mx_mean.shape
assert np_var.shape == mx_var.shape
check_symbolic_forward(mx_test_sym, [np_a], [np_mean + np_var], rtol=1e-3, atol=1e-5)
check_numeric_gradient(mx_test_sym, [np_a], numeric_eps=eps, rtol=1e-2, atol=2e-4)
def test_invalid_kernel_size():
invalid_kernel_size = 28
assert_exception(
mx.nd.Correlation,
MXNetError,
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=invalid_kernel_size)
def test_valid_kernel_size():
valid_kernel_size = 9
mx.nd.Correlation(
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=valid_kernel_size)
def test_valid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
output_data=mx.nd.Pooling(
input_data,
kernel=kernel,
stride=stride,
pad=(0,0,0),
pool_type='max',
name='pooling',
pooling_convention="same")
assert(math.ceil(input_data.shape[2]/stride) == output_data.shape[2])
def test_invalid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
pad = 2
assert_exception(
mx.nd.Pooling,
MXNetError,
input_data,
stride=stride,
kernel=kernel,
pad=pad,
pool_type='max',
name='pooling',
pooling_convention="same")
@pytest.mark.serial
def test_image_normalize():
# Part 1 - Test 3D input with 3D mean/std
shape_3d = (3, 28, 28)
mean = (0, 1, 2)
std = (3, 2, 1)
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][0] = data_expected_3d[:][:][0] / 3.0
data_expected_3d[:][:][1] = (data_expected_3d[:][:][1] - 1.0) / 2.0
data_expected_3d[:][:][2] = data_expected_3d[:][:][2] - 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][0] = 1 / 3.0
grad_expected_3d[:][:][1] = 1 / 2.0
grad_expected_3d[:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 2 - Test 4D input with 3D mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[0][:][:][0] = data_expected_4d[0][:][:][0] / 3.0
data_expected_4d[0][:][:][1] = (data_expected_4d[0][:][:][1] - 1.0) / 2.0
data_expected_4d[0][:][:][2] = data_expected_4d[0][:][:][2] - 2.0
data_expected_4d[1][:][:][0] = data_expected_4d[1][:][:][0] / 3.0
data_expected_4d[1][:][:][1] = (data_expected_4d[1][:][:][1] - 1.0) / 2.0
data_expected_4d[1][:][:][2] = data_expected_4d[1][:][:][2] - 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[0][:][:][0] = 1 / 3.0
grad_expected_4d[0][:][:][1] = 1 / 2.0
grad_expected_4d[0][:][:][2] = 1 / 1.0
grad_expected_4d[1][:][:][0] = 1 / 3.0
grad_expected_4d[1][:][:][1] = 1 / 2.0
grad_expected_4d[1][:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
# Part 3 - Test 3D input with scalar mean/std
shape_3d = (3, 28, 28)
mean = 1.0
std = 2.0
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][:] = (data_expected_3d[:][:][:] - 1.0) / 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 4 - Test 4D input with scalar mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[:][:][:][:] = (data_expected_4d[:][:][:][:] - 1.0) / 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[:][:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
@pytest.mark.serial
def test_index_array():
def test_index_array_default():
for shape in [(10,), (7, 5, 29), (5, 7, 11, 13, 17, 19)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_dim():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(())
expected = np.zeros((0,))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones((0, 0, 0))
expected = np.zeros((0, 0, 0, 3))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
def test_index_array_select_axes():
shape = (5, 7, 11, 13, 17, 19)
for axes in [(3,), (4, 1), (5, 1, 3), (-1,), (-5, -1, -3)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=axes)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)[..., axes]
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_select_axes_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=(2, 1))
input_array = np.ones((0, 0, 0, 0))
expected = np.zeros((0, 0, 2))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
test_index_array_default()
test_index_array_default_zero_dim()
test_index_array_default_zero_size()
test_index_array_select_axes()
test_index_array_select_axes_zero_size()
def test_scalar_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=())
assertRaises(MXNetError, mx.nd.ones, shape=())
with mx.np_shape():
data_mx = mx.nd.ones(shape=())
data_np = np.ones((), dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
def test_zero_size_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=(0, 1, 3, 0))
assertRaises(MXNetError, mx.nd.ones, shape=(0, 1, 3, 0))
with mx.np_shape():
data_mx = mx.nd.ones(shape=(0, 1, 0, 4))
data_np = np.ones(shape=data_mx.shape, dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
def test_concat_with_zero_size_tensor():
with mx.np_shape():
data1 = mx.nd.ones((0, 8, 12))
data2 = mx.nd.ones((3, 8, 12))
data3 = mx.nd.ones((0, 8, 12))
ret = mx.nd.Concat(data1, data2, data3, dim=0)
assert ret.shape == (3, 8, 12)
data1 = mx.nd.ones((0, 3, 10))
data2 = mx.nd.ones((0, 4, 10))
data3 = mx.nd.ones((0, 5, 10))
ret = mx.nd.Concat(data1, data2, data3, dim=1)
assert ret.shape == (0, 12, 10)
def test_np_shape_decorator():
@mx.use_np_shape
def check_scalar_one():
"""Generate scalar one tensor"""
return mx.nd.ones(shape=())
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
for active in [True, False]:
with mx.np_shape(active=active):
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
@mx.use_np_shape
def check_concat(shape1, shape2, axis):
data1 = mx.nd.ones(shape1)
data2 = mx.nd.ones(shape2)
ret = mx.nd.Concat(data1, data2, dim=axis)
expected_ret = np.concatenate((data1.asnumpy(), data2.asnumpy()), axis=axis)
assert ret.shape == expected_ret.shape
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
for _ in [True, False]:
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
def test_add_n():
data_shape = (2, 2)
input_num = 5
data = [mx.nd.random.uniform(shape=data_shape) for i in range(input_num)]
rslt = mx.nd.zeros(shape=data_shape)
for i in range(input_num):
rslt += data[i]
add_n_rslt = mx.nd.add_n(*data, out=data[0])
assert_almost_equal(rslt.asnumpy(), add_n_rslt.asnumpy(), atol=1e-5)
def test_get_all_registered_operators():
ops = get_all_registered_operators()
assert isinstance(ops, list)
assert len(ops) > 0
assert 'Activation' in ops
def test_get_operator_arguments():
operator_arguments = get_operator_arguments('Activation')
assert isinstance(operator_arguments, OperatorArguments)
assert operator_arguments.names == ['data', 'act_type']
assert operator_arguments.types \
== ['NDArray-or-Symbol', "{'log_sigmoid', 'mish', 'relu', 'sigmoid', 'softrelu', 'softsign', 'tanh'}, required"]
assert operator_arguments.narg == 2
def test_transpose_infer_shape_back():
o1 = mx.sym.ones(shape=[2,3])
o2 = mx.sym.ones(shape=[-1,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b._bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
def test_transpose_infer_shape_mixed():
o1 = mx.sym.ones(shape=[2,-1])
o2 = mx.sym.ones(shape=[3,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b._bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
def test_sample_normal_default_shape():
# Test case from https://github.com/apache/incubator-mxnet/issues/16135
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]))
assert s.shape == (1,)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=())
assert s.shape == (1,)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=1)
assert s.shape == (1, 1)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=(1,))
assert s.shape == (1, 1)
def test_large_tensor_disabled_err_msg():
LARGE_X = 4300000000
MEDIUM_X = 1000000000
SMALL_Y = 1
shape = (2, LARGE_X)
def check_nd_array():
x = np.arange(0, LARGE_X)
assertRaises(MXNetError, mx.nd.array, x)
def check_nd_ones():
assertRaises(MXNetError, mx.nd.ones, shape)
def check_nd_zeros():
assertRaises(MXNetError, mx.nd.zeros, shape)
def check_nd_full():
val = 1
assertRaises(Exception, mx.nd.full, shape, val)
def check_nd_arange():
start = 0
stop = LARGE_X
assertRaises(Exception, mx.nd.arange, start, stop)
def check_nd_random():
shape = (2, LARGE_X)
def check_random_exp():
lam = 4
assertRaises(MXNetError, mx.nd.random_exponential, lam, shape)
def check_random_gamma():
alpha = 9
beta = 0.5
assertRaises(MXNetError, mx.nd.random_gamma, alpha, beta, shape)
def check_random_normal():
loc = 0
scale = 1
assertRaises(MXNetError, mx.nd.random_normal, loc, scale, shape)
def check_random_poisson():
lam = 4
assertRaises(MXNetError, mx.nd.random_poisson, alpha, lam, shape)
def check_random_randint():
low = 0
high = 1000000
assertRaises(MXNetError, mx.nd.random_randint, low, high, shape)
def check_random_uniform():
low = 0
hight = 1
assertRaises(MXNetError, mx.nd.random_uniform, alpha, beta, shape)
def check_multihead_attention_selfatt(dtype):
def convert_weight(F, q_weight, k_weight, v_weight, num_heads):
q_weight = F.reshape(q_weight, shape=(num_heads, -1, 0), reverse=True)
k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
all_weights = F.concat(q_weight, k_weight, v_weight, dim=-2)
all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
return all_weights
def convert_bias(F, q_bias, k_bias, v_bias, num_heads):
q_bias = F.reshape(q_bias, shape=(num_heads, -1))
k_bias = F.reshape(k_bias, shape=(num_heads, -1))
v_bias = F.reshape(v_bias, shape=(num_heads, -1))
all_bias = F.stack(q_bias, k_bias, v_bias, axis=1)
all_bias = F.reshape(all_bias, shape=(-1,))
return all_bias
batch_size = 2
qkv_length = 7 # length of a sequence
qkv_dim = 9 # dimension of encoding
num_heads = 3 # number of attention head
head_dim = 5 # head size
out_dim = 13 * num_heads
qkv_units = num_heads * head_dim
arg_params = {
'qkv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype),
'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype),
}
qkv = mx.sym.Variable('qkv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
qkv_weight = convert_weight(mx.sym, q_weight, k_weight, v_weight, num_heads)
qkv_bias = convert_bias(mx.sym, q_bias, k_bias, v_bias, num_heads)
qkv = mx.sym.transpose(qkv, axes=(1, 0, 2))
qkv_proj = mx.sym.FullyConnected(qkv, weight=qkv_weight, bias=qkv_bias, flatten=False,
num_hidden=qkv_units * 3, no_bias=False)
att_score = mx.sym.contrib.interleaved_matmul_selfatt_qk(
qkv_proj, heads=num_heads)
att_score = att_score + sonde
weighted_value = mx.sym.contrib.interleaved_matmul_selfatt_valatt(
qkv_proj, att_score, heads=num_heads)
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.transpose(output, axes=(1, 0, 2))
output = mx.sym.Group([output, att_score])
executor = output._simple_bind(ctx=default_context(),
qkv=(batch_size, qkv_length, qkv_dim),
q_weight=(qkv_units, qkv_dim),
q_bias=(qkv_units,),
k_weight=(qkv_units, qkv_dim),
k_bias=(qkv_units,),
v_weight=(qkv_units, qkv_dim),
v_bias=(qkv_units,),
type_dict={'qkv': dtype,
'q_weight': dtype,
'k_weight': dtype,
'v_weight': dtype,
'q_bias': dtype,
'k_bias': dtype,
'v_bias': dtype,
'sonde': dtype},
grad_req='write')
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_shape = executor.outputs[0].shape
output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1
output_opti = executor.outputs[0].asnumpy()
att_score_opti = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype),
mx.nd.zeros(att_score_opti.shape, dtype=dtype)])
grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()}
qkv = mx.sym.Variable('qkv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
q = mx.sym.FullyConnected(qkv, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
k = mx.sym.FullyConnected(qkv, weight=k_weight, bias=k_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
v = mx.sym.FullyConnected(qkv, weight=v_weight, bias=v_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1))
q = mx.sym.transpose(q, axes=(0, 2, 1, 3))
q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True)
k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1))
k = mx.sym.transpose(k, axes=(0, 2, 1, 3))
k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True)
q = mx.sym.contrib.div_sqrt_dim(q)
att_score = mx.sym.batch_dot(q, k, transpose_b=True)
att_score = att_score + sonde
v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1))
v = mx.sym.transpose(v, axes=(0, 2, 1, 3))
v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True)
weighted_value = mx.sym.batch_dot(att_score, v)
weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0),
reverse=True)
weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3))
weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1))
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.Group([output, att_score])
executor = output._simple_bind(ctx=default_context(),
qkv=(batch_size, qkv_length, qkv_dim),
type_dict={'qkv': dtype},
grad_req='write')
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_orig = executor.outputs[0].asnumpy()
att_score_orig = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype),
mx.nd.zeros(att_score_orig.shape, dtype=dtype)])
grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()}
assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3)
assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3)
for k in grads_opti.keys():
assert(grads_orig[k].dtype == grads_opti[k].dtype)
assert(grads_orig[k].shape == grads_opti[k].shape)
assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3)
@assert_raises_cuda_not_satisfied(min_version='9.1')
@pytest.mark.serial
def test_multihead_attention_selfatt():
dtypes = ['float32']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for dtype in dtypes:
check_multihead_attention_selfatt(dtype=dtype)
def check_multihead_attention_encdec(dtype):
def convert_weight(F, k_weight, v_weight, num_heads):
k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
all_weights = F.concat(k_weight, v_weight, dim=-2)
all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
return all_weights
def convert_bias(F, k_bias, v_bias, num_heads):
k_bias = F.reshape(k_bias, shape=(num_heads, -1))
v_bias = F.reshape(v_bias, shape=(num_heads, -1))
all_bias = F.stack(k_bias, v_bias, axis=1)
all_bias = F.reshape(all_bias, shape=(-1,))
return all_bias
batch_size = 2
qkv_length = 7 # length of a sequence
qkv_dim = 9 # dimension of encoding
num_heads = 3 # number of attention head
head_dim = 5 # head size
out_dim = 13 * num_heads
qkv_units = num_heads * head_dim
arg_params = {
'q': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'kv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype),
'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype),
}
q = mx.sym.Variable('q')
kv = mx.sym.Variable('kv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
kv_weight = convert_weight(mx.sym, k_weight, v_weight, num_heads)
kv_bias = convert_bias(mx.sym, k_bias, v_bias, num_heads)
kv = mx.sym.transpose(kv, axes=(1, 0, 2))
kv_proj = mx.sym.FullyConnected(kv, weight=kv_weight, bias=kv_bias, flatten=False,
num_hidden=qkv_units * 2, no_bias=False)
q = mx.sym.transpose(q, axes=(1, 0, 2))
q_proj = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
att_score = mx.sym.contrib.interleaved_matmul_encdec_qk(
q_proj, kv_proj, heads=num_heads)
att_score = att_score + sonde
weighted_value = mx.sym.contrib.interleaved_matmul_encdec_valatt(
kv_proj, att_score, heads=num_heads)
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.transpose(output, axes=(1, 0, 2))
output = mx.sym.Group([output, att_score])
executor = output._simple_bind(ctx=default_context(),
q=(batch_size, qkv_length, qkv_dim),
kv=(batch_size, qkv_length, qkv_dim),
q_weight=(qkv_units, qkv_dim),
q_bias=(qkv_units,),
k_weight=(qkv_units, qkv_dim),
k_bias=(qkv_units,),
v_weight=(qkv_units, qkv_dim),
v_bias=(qkv_units,),
out_weight=(out_dim, qkv_units),
out_bias=(out_dim,),
type_dict={'q': dtype,
'kv': dtype,
'q_weight': dtype,
'q_bias': dtype,
'k_weight': dtype,
'k_bias': dtype,
'v_weight': dtype,
'v_bias': dtype,
'out_weight': dtype,
'out_bias': dtype,
},
grad_req='write')
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_shape = executor.outputs[0].shape
output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1
output_opti = executor.outputs[0].asnumpy()
att_score_opti = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_opti.shape, dtype=dtype)])
grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()}
q = mx.sym.Variable('q')
kv = mx.sym.Variable('kv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
q = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
k = mx.sym.FullyConnected(kv, weight=k_weight, bias=k_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
v = mx.sym.FullyConnected(kv, weight=v_weight, bias=v_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1))
q = mx.sym.transpose(q, axes=(0, 2, 1, 3))
q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True)
k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1))
k = mx.sym.transpose(k, axes=(0, 2, 1, 3))
k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True)
q = mx.sym.contrib.div_sqrt_dim(q)
att_score = mx.sym.batch_dot(q, k, transpose_b=True)
att_score = att_score + sonde
v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1))
v = mx.sym.transpose(v, axes=(0, 2, 1, 3))
v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True)
weighted_value = mx.sym.batch_dot(att_score, v)
weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0),
reverse=True)
weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3))
weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1))
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.Group([output, att_score])
executor = output._simple_bind(ctx=default_context(),
q=(batch_size, qkv_length, qkv_dim),
kv=(batch_size, qkv_length, qkv_dim),
type_dict={'q': dtype,
'kv': dtype},
grad_req='write')
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_orig = executor.outputs[0].asnumpy()
att_score_orig = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_orig.shape, dtype=dtype)])
grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()}
assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3)
assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3)
for k in grads_opti.keys():
assert(grads_orig[k].dtype == grads_opti[k].dtype)
assert(grads_orig[k].shape == grads_opti[k].shape)
assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3)
@assert_raises_cuda_not_satisfied(min_version='9.1')
@pytest.mark.serial
def test_multihead_attention_encdec():
dtypes = ['float32']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for dtype in dtypes:
check_multihead_attention_encdec(dtype=dtype)
@pytest.mark.serial
def test_im2col_col2im():
def compute_output_size(spatial, kernel, stride=1, dilate=1, pad=0):
pad_size = spatial + 2 * pad
dilated_kernel = dilate * (kernel - 1) + 1
return (pad_size - dilated_kernel) // stride + 1
def build_kwargs(kernel, stride=1, dilate=1, pad=0):
return {'kernel': (kernel, kernel),
'stride': (stride, stride),
'dilate': (dilate, dilate),
'pad': (pad, pad)}
# use im2col to compute convolution
def test_conv_compute(input_shape, num_filter, kernel, stride=1, dilate=1, pad=0):
batch_size = input_shape[0]
channel = input_shape[1]
kwargs = build_kwargs(kernel, stride, dilate, pad)
data = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(data, **kwargs)
w = mx.nd.uniform(shape=(num_filter, channel, kernel, kernel))
c1 = mx.nd.dot(col.transpose((0, 2, 1)), w.reshape(num_filter, -1).T).transpose((0, 2, 1))
hos = compute_output_size(input_shape[2], kernel, stride, dilate, pad)
wos = compute_output_size(input_shape[3], kernel, stride, dilate, pad)
c1 = c1.reshape((batch_size, num_filter, hos, wos))
c2 = mx.nd.Convolution(data, num_filter=num_filter, weight=w, no_bias=True, **kwargs)
assert_almost_equal(c1.asnumpy(), c2.asnumpy(), rtol=1e-5, atol=1e-5)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2,
dilate = 2
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
# use composite of im2col and col2im to reconstruct image
def test_reconstruct(input_shape, kernel, stride=1, dilate=1, pad=0):
batch_size = input_shape[0]
channel = input_shape[1]
kwargs = build_kwargs(kernel, stride, dilate, pad)
data = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(data, **kwargs)
im1 = mx.nd.col2im(col, input_shape[2:], **kwargs)
im2 = mx.nd.col2im(mx.nd.ones_like(col), input_shape[2:], **kwargs) * data
assert_almost_equal(im1.asnumpy(), im2.asnumpy(), rtol=1e-5, atol=1e-5)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
# test gradient
# the grad of im2col is col2im, and vice versa
def test_grad(input_shape, kernel, stride=1, dilate=1, pad=0):
# im2col
data = mx.sym.Variable('data')
kwargs = build_kwargs(kernel, stride, dilate, pad)
sym = mx.sym.im2col(data, **kwargs)
im = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(im, **kwargs)
col_shape = col.shape
expected = mx.nd.col2im(col, input_shape[2:], **kwargs)
check_symbolic_backward(sym, [im.asnumpy()], [col.asnumpy()], [expected.asnumpy()])
# col2im
data = mx.sym.Variable('data')
sym = mx.sym.col2im(data, input_shape[2:], **kwargs)
col = mx.nd.uniform(shape=col_shape)
im = mx.nd.col2im(col, input_shape[2:], **kwargs)
expected = mx.nd.im2col(im, **kwargs)
check_symbolic_backward(sym, [col.asnumpy()], [im.asnumpy()], [expected.asnumpy()])
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
def test_elemwise_sum_for_gradient_accumulation():
for nrepeat in range(1, 10):
stored_grad = dict()
for grad_req in ['write', 'add']:
a = mx.nd.array([1])
b = mx.nd.array([2])
if grad_req == 'write':
a.attach_grad(grad_req='write')
elif grad_req == 'add':
a.attach_grad(grad_req='add')
a.grad[:] = 0
with mx.autograd.record():
for _ in range(nrepeat):
b = b * a
b.backward()
stored_grad[grad_req] = a.grad.asscalar()
assert stored_grad['write'] == stored_grad['add']
assert stored_grad['write'] == 2 * nrepeat
def test_elementwise_ops_on_misaligned_input():
a = mx.nd.array([1,2,3,4], dtype='float16')
b = mx.nd.array([1,2,3,4], dtype='float16')
c = a[1:3]
d = b[1:3]
# Note: testing just elemwise_add since all elemwise_ops
# share the implementation
mx.nd.elemwise_add(c, d, out=c)
mx.nd.waitall()
a = mx.nd.array([1,2,3,4], dtype='float16')
b = mx.nd.array([1,2,3,4], dtype='float16')
c = a[0:3]
d = b[0:3]
mx.nd.elemwise_add(c, d, out=c)
mx.nd.waitall()
assert a[3].asscalar() == 4.0
@pytest.mark.parametrize('dtype', ['float16', 'float32', 'float64'])
@pytest.mark.parametrize('lead_dim', [2, 3, 4, 6, 10])
@pytest.mark.parametrize('both_ways', [False, True])
def test_broadcast_ops_on_misaligned_input(dtype, lead_dim, both_ways):
shape = list(rand_shape_2d()) + [lead_dim]
small_shape = [shape[0], 1, lead_dim]
if both_ways:
# Broadcast in both ways [1, K, L] x [M, 1, L]
big_shape = [1, shape[1], lead_dim]
else:
big_shape = shape
size = np.product(shape)
small_size = np.product(small_shape)
big_size = np.product(big_shape)
a = mx.nd.arange(5000)
b = mx.nd.arange(5000)
e = mx.nd.arange(5000)
c = a[1:big_size + 1].reshape(big_shape)
d = b[1:small_size + 1].reshape(small_shape)
f = e[1:size + 1].reshape(shape)
mx.nd.broadcast_add(c, d, out=f)
expected = c.asnumpy() + d.asnumpy()
mx.nd.waitall()
assert_almost_equal(f, expected)
@pytest.mark.parametrize('dtype', ['float16', 'float32', 'float64'])
@pytest.mark.parametrize('lead_dim', [2, 3, 4, 6, 10])
@pytest.mark.parametrize('both_ways', [False, True])
def test_broadcast_ops_on_misaligned_input_oneside(dtype, lead_dim, both_ways):
shape = list(rand_shape_2d()) + [lead_dim]
small_shape = [shape[0], shape[1], 1]
if both_ways:
# Broadcast in both ways [1, K, L] x [M, 1, 1]
big_shape = [1, shape[1], lead_dim]
else:
big_shape = shape
size = np.product(shape)
small_size = np.product(small_shape)
big_size = np.product(big_shape)
a = mx.nd.arange(5000)
b = mx.nd.arange(5000)
e = mx.nd.arange(5000)
c = a[1:big_size + 1].reshape(big_shape)
d = b[1:small_size + 1].reshape(small_shape)
f = e[1:size + 1].reshape(shape)
mx.nd.broadcast_add(c, d, out=f)
expected = c.asnumpy() + d.asnumpy()
mx.nd.waitall()
assert_almost_equal(f, expected)
def test_sldwin_selfatten_operators():
def gen_sliding_window_mask_full(batch_size, num_heads, seq_length, w, symmetric, d):
mask_np = np.zeros((batch_size, num_heads, seq_length, seq_length))
for i in range(seq_length):
end = (i + 1 + w * d) if symmetric else (i + 1)
for j in range(i - w * d, end, d):
if j >= 0 and j < seq_length:
mask_np[:, :, i, j] = 1
return mask_np
def test_sldwin_atten_op_impl(batch_size, seq_length, num_heads,
num_head_units, w, symmetric, d):
# Generate the data
query = np.random.normal(0, 1, (batch_size, seq_length, num_heads, num_head_units))
key = np.random.normal(0, 1, (batch_size, seq_length, num_heads, num_head_units))
value = np.random.normal(0, 1, (batch_size, seq_length, num_heads, num_head_units))
valid_length = np.zeros((batch_size,))
valid_length[:] = seq_length
query = mx.np.array(query, dtype=np.float32)
key = mx.np.array(key, dtype=np.float32)
value = mx.np.array(value, dtype=np.float32)
dilation = mx.np.ones((num_heads,), dtype=np.int32)
dilation[:] = d
valid_length = mx.np.array(valid_length, dtype=np.int32)
query.attach_grad()
key.attach_grad()
value.attach_grad()
with mx.autograd.record():
score = mx.npx.sldwin_atten_score(query, key, dilation,
w=w, symmetric=symmetric)
mask = mx.npx.sldwin_atten_mask_like(score, dilation, valid_length,
w=w, symmetric=symmetric)
score = score * mask
out = mx.npx.sldwin_atten_context(score, value, dilation,
w=w, symmetric=symmetric)
out.backward()
out_np = out.asnumpy()
grad_query = query.grad.asnumpy()
grad_key = key.grad.asnumpy()
grad_value = value.grad.asnumpy()
query.grad[:] = 0
key.grad[:] = 0
value.grad[:] = 0
mask_np = gen_sliding_window_mask_full(batch_size, num_heads, seq_length,
w, symmetric, d)
mask = mx.np.array(mask_np, dtype=np.float32)
with mx.autograd.record():
score = mx.npx.batch_dot(mx.np.swapaxes(query, 1, 2),
mx.np.swapaxes(key, 1, 2),
transpose_b=True)
score = score * mask
out = mx.npx.batch_dot(score,
mx.np.swapaxes(value, 1, 2)).transpose((0, 2, 1, 3))
out.backward()
out_np_gt = out.asnumpy()
grad_query_gt = query.grad.asnumpy()
grad_key_gt = key.grad.asnumpy()
grad_value_gt = value.grad.asnumpy()
assert_allclose(out_np_gt, out_np, 1E-3, 1E-3)
assert_allclose(grad_query_gt, grad_query, 1E-3, 1E-3)
assert_allclose(grad_key_gt, grad_key, 1E-3, 1E-3)
assert_allclose(grad_value_gt, grad_value, 1E-3, 1E-3)
for symmetric in [True, False]:
for d in [1, 2, 3]:
test_sldwin_atten_op_impl(2, 128, 2, 8, 16, symmetric, d)
test_sldwin_atten_op_impl(1, 8, 2, 4, 2, symmetric, d)
def test_zero_sized_dim():
# Must be done to prevent zero-sized dimension conversion to 'unknown'
prev_np_shape = mx.util.set_np_shape(True)
def seq_last():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/18938"""
data = mx.nd.array(np.random.rand(1, 0, 0))
res = mx.nd.op.SequenceLast(data)
assert data.shape[1:] == res.shape
def seq_mask():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/18939"""
data = mx.nd.array(np.random.rand(0, 1, 1))
res = mx.nd.op.SequenceMask(data)
assert data.shape == res.shape
def seq_reverse():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/18940"""
data = mx.nd.array(np.random.rand(0, 1, 1))
res = mx.nd.op.SequenceReverse(data)
assert data.shape == res.shape
try:
seq_last()
seq_reverse()
seq_mask()
finally:
mx.util.set_np_shape(prev_np_shape)
@mx.util.use_np
def test_take_grads():
# Test for https://github.com/apache/incubator-mxnet/issues/19817
from mxnet.gluon.nn import HybridBlock, Conv1D, HybridSequential, HybridLambda, Dense
from mxnet import autograd, np as mx_np, npx as mx_npx
from mxnet.gluon.loss import L2Loss
def get_grads(model, grads, ctx=mx.cpu()):
pd = model.collect_params()
total_grad_l2 = 0
total_grad_l1 = 0
total_grad_linf = 0
for p in pd:
try:
g = pd[p].grad(ctx) / N
g2 = (g**2).sum().as_in_context(mx.cpu()).asscalar()
g1 = g.abs().sum().as_in_context(mx.cpu()).asscalar()
ginf = g.max().as_in_context(mx.cpu()).asscalar()
total_grad_linf = max(total_grad_linf, ginf)
total_grad_l2 += g2
total_grad_l1 += g1
except Exception:
pass
grads.append(total_grad_l1)
grads.append(total_grad_l2)
grads.append(total_grad_linf)
def run_model(model, loss, X, Y, num_iters=5):
grads = []
for _ in range(num_iters):
with autograd.record():
Y_hat = model(X)
ll = loss(Y_hat, Y)
ll = ll.sum()
ll.backward()
get_grads(model, grads)
return grads
def dense_layer():
den = HybridSequential()
den.add(Dense(10, flatten=True, activation='tanh'))
return den
class Model(HybridBlock):
def __init__(self, use_take=False, **kwargs):
super().__init__()
self.use_take = use_take
self.den = dense_layer()
def forward(self, X, axis=1):
X1 = self.den(X)
print(X1.shape)
if self.use_take:
X2 = mx_np.take(X1, mx_np.array([0]), axis=axis)
else:
X2 = mx_npx.slice(X1.T, begin=0, end=1).T
return X2
N = 30
T = 20
C = 10
X = np.random.normal(size=(N, T, C))
Y = np.random.normal(size=(N, 1))
X, Y = mx_np.array(X), mx_np.array(Y)
seed = np.random.randint(1000)
# Using mx_np.take
mx.random.seed(seed)
model = Model(use_take=True)
model.initialize()
loss = L2Loss()
grads1 = run_model(model, loss, X, Y)
# Using mx_npx.slice
mx.random.seed(seed)
model2 = Model(use_take=False)
model2.initialize()
grads2 = run_model(model2, loss, X, Y)
for i in range(len(grads1)):
assert_almost_equal(grads1[i], grads2[i])
|
acquisitions.py | import numpy as np
import multiprocessing
import threading
from inspect import signature
import time
from pycromanager.zmq import deserialize_array, Bridge
from pycromanager.data import Dataset
import warnings
import os.path
import queue
from pycromanager.zmq import JavaObjectShadow
from docstring_inheritance import NumpyDocstringInheritanceMeta
### These functions outside class to prevent problems with pickling when running them in differnet process
def _run_acq_event_source(bridge_port, event_port, event_queue, bridge_timeout=Bridge.DEFAULT_TIMEOUT, debug=False):
"""
Parameters
----------
event_port :
event_queue :
debug :
(Default value = False)
Returns
-------
"""
with Bridge(debug=debug, port=bridge_port, timeout=bridge_timeout) as bridge:
event_socket = bridge._connect_push(event_port)
while True:
events = event_queue.get(block=True)
if debug:
print("got event(s):", events)
if events is None:
# Poison, time to shut down
event_socket.send({"events": [{"special": "acquisition-end"}]})
event_socket.close()
return
event_socket.send({"events": events if type(events) == list else [events]})
if debug:
print("sent events")
def _run_acq_hook(bridge_port, pull_port, push_port, hook_connected_evt, event_queue, hook_fn, debug):
"""
Parameters
----------
pull_port :
push_port :
hook_connected_evt :
event_queue :
hook_fn :
debug :
Returns
-------
"""
with Bridge(debug=debug, port=bridge_port) as bridge:
push_socket = bridge._connect_push(pull_port)
pull_socket = bridge._connect_pull(push_port)
hook_connected_evt.set()
while True:
event_msg = pull_socket.receive()
if "special" in event_msg and event_msg["special"] == "acquisition-end":
push_socket.send({})
push_socket.close()
pull_socket.close()
return
else:
if "events" in event_msg.keys():
event_msg = event_msg["events"] # convert from sequence
params = signature(hook_fn).parameters
if len(params) == 1 or len(params) == 3:
try:
if len(params) == 1:
new_event_msg = hook_fn(event_msg)
elif len(params) == 3:
new_event_msg = hook_fn(event_msg, bridge, event_queue)
except Exception as e:
warnings.warn("exception in acquisition hook: {}".format(e))
continue
else:
raise Exception("Incorrect number of arguments for hook function. Must be 1 or 3")
if isinstance(new_event_msg, list):
new_event_msg = {
"events": new_event_msg
} # convert back to the expected format for a sequence
push_socket.send(new_event_msg)
def _run_image_processor(
bridge_port, pull_port, push_port, sockets_connected_evt, process_fn, event_queue, debug
):
"""
Parameters
----------
pull_port :
push_port :
sockets_connected_evt :
process_fn :
event_queue :
debug :
Returns
-------
"""
with Bridge(debug=debug, port=bridge_port) as bridge:
push_socket = bridge._connect_push(pull_port)
pull_socket = bridge._connect_pull(push_port)
if debug:
print("image processing sockets connected")
sockets_connected_evt.set()
def process_and_sendoff(image_tags_tuple, original_dtype):
"""
Parameters
----------
image_tags_tuple :
Returns
-------
"""
if len(image_tags_tuple) != 2:
raise Exception("If image is returned, it must be of the form (pixel, metadata)")
pixels = image_tags_tuple[0]
metadata = image_tags_tuple[1]
# only accepts same pixel type as original
if not np.issubdtype(image_tags_tuple[0].dtype, original_dtype) and not np.issubdtype(
original_dtype, image_tags_tuple[0].dtype
):
raise Exception(
"Processed image pixels must have same dtype as input image pixels, "
"but instead they were {} and {}".format(image_tags_tuple[0].dtype, pixels.dtype)
)
if metadata['PixelType'] == 'RGB32':
if pixels.shape[-1] == 3:
#append 0 for alpha channel because thats whats expected
pixels = np.concatenate([pixels, np.zeros_like(pixels[..., 0])[..., None]], axis=2)
else:
#maybe pixel type was changed by processing?
metadata["PixelType"] = "GRAY8" if pixels.dtype.itemsize == 1 else "GRAY16"
processed_img = {
"pixels": pixels.tobytes(),
"metadata": metadata,
}
push_socket.send(processed_img)
while True:
message = None
while message is None:
message = pull_socket.receive(timeout=30) # check for new message
if "special" in message and message["special"] == "finished":
push_socket.send(message) # Continue propagating the finihsed signal
push_socket.close()
pull_socket.close()
return
metadata = message["metadata"]
pixels = deserialize_array(message["pixels"])
if metadata['PixelType'] == 'RGB32':
image = np.reshape(pixels, [metadata["Height"], metadata["Width"], 4])[..., :3]
else:
image = np.reshape(pixels, [metadata["Height"], metadata["Width"]])
params = signature(process_fn).parameters
if len(params) == 2 or len(params) == 4:
processed = None
try:
if len(params) == 2:
processed = process_fn(image, metadata)
elif len(params) == 4:
processed = process_fn(image, metadata, bridge, event_queue)
except Exception as e:
warnings.warn("exception in image processor: {}".format(e))
continue
else:
raise Exception(
"Incorrect number of arguments for image processing function, must be 2 or 4"
)
if processed is None:
continue
if type(processed) == list:
for image in processed:
process_and_sendoff(image, pixels.dtype)
else:
process_and_sendoff(processed, pixels.dtype)
class Acquisition(object, metaclass=NumpyDocstringInheritanceMeta):
"""
Base class for Pycro-Manager acquisitions
"""
def __init__(
self,
directory: str=None,
name: str=None,
image_process_fn : callable=None,
event_generation_hook_fn: callable=None,
pre_hardware_hook_fn: callable=None,
post_hardware_hook_fn: callable=None,
post_camera_hook_fn: callable=None,
show_display: bool or str=True,
image_saved_fn: callable=None,
process: bool=False,
saving_queue_size: int=20,
bridge_timeout: int=500,
port: int=Bridge.DEFAULT_PORT,
debug: int=False,
core_log_debug: int=False,
):
"""
Parameters
----------
directory : str
saving directory for this acquisition. Required unless an image process function will be
implemented that diverts images from saving
name : str
Saving name for the acquisition. Required unless an image process function will be
implemented that diverts images from saving
image_process_fn : Callable
image processing function that will be called on each image that gets acquired.
Can either take two arguments (image, metadata) where image is a numpy array and metadata is a dict
containing the corresponding iamge metadata. Or a 4 argument version is accepted, which accepts (image,
metadata, bridge, queue), where bridge and queue are an instance of the pycromanager.acquire.Bridge
object for the purposes of interacting with arbitrary code on the Java side (such as the micro-manager
core), and queue is a Queue objects that holds upcomning acquisition events. Both version must either
return
event_generation_hook_fn : Callable
hook function that will as soon as acquisition events are generated (before hardware sequencing optimization
in the acquisition engine. This is useful if one wants to modify acquisition events that they didn't generate
(e.g. those generated by a GUI application). Accepts either one argument (the current acquisition event)
or three arguments (current event, bridge, event Queue)
pre_hardware_hook_fn : Callable
hook function that will be run just before the hardware is updated before acquiring
a new image. In the case of hardware sequencing, it will be run just before a sequence of instructions are
dispatched to the hardware. Accepts either one argument (the current acquisition event) or three arguments
(current event, bridge, event Queue)
post_hardware_hook_fn : Callable
hook function that will be run just before the hardware is updated before acquiring
a new image. In the case of hardware sequencing, it will be run just after a sequence of instructions are
dispatched to the hardware, but before the camera sequence has been started. Accepts either one argument
(the current acquisition event) or three arguments (current event, bridge, event Queue)
post_camera_hook_fn : Callable
hook function that will be run just after the camera has been triggered to snapImage or
startSequence. A common use case for this hook is when one want to send TTL triggers to the camera from an
external timing device that synchronizes with other hardware. Accepts either one argument (the current
acquisition event) or three arguments (current event, bridge, event Queue)
show_display : bool or str
If True, show the image viewer window. If False, show no viewer. If 'napari', show napari as the viewer
image_saved_fn : Callable
function that takes two arguments (the Axes of the image that just finished saving, and the Dataset)
and gets called whenever a new image is written to disk
process : bool
Use multiprocessing instead of multithreading for acquisition hooks and image
processors. This can be used to speed up CPU-bounded processing by eliminating bottlenecks
caused by Python's Global Interpreter Lock, but also creates complications on Windows-based
systems
saving_queue_size : int
The number of images to queue (in memory) while waiting to write to disk. Higher values should
in theory allow sequence acquisitions to go faster, but requires the RAM to hold images while
they are waiting to save
bridge_timeout :
Timeout in ms of all operations going throught the Bridge
port :
Allows overriding the defualt port for using Java side servers on a different port
debug : bool
whether to print debug messages
core_log_debug : bool
Print debug messages on java side in the micro-manager core log
"""
self._bridge_timeout = bridge_timeout
self.bridge = Bridge(debug=debug, port=port, timeout=bridge_timeout)
self._bridge_port = port
self._debug = debug
self._dataset = None
self._finished = False
# Get a dict of all named argument values (or default values when nothing provided)
arg_names = [k for k in signature(Acquisition.__init__).parameters.keys() if k != 'self']
l = locals()
named_args = {arg_name: (l[arg_name] if arg_name in l else
dict(signature(Acquisition.__init__).parameters.items())[arg_name].default)
for arg_name in arg_names }
if directory is not None:
# Expend ~ in path
directory = os.path.expanduser(directory)
# If path is relative, retain knowledge of the current working directory
named_args['directory'] = os.path.abspath(directory)
self._create_event_queue(**named_args)
self._create_remote_acquisition(**named_args)
self._initialize_image_processor(**named_args)
self._initialize_hooks(**named_args)
self._remote_acq.start()
self._dataset_disk_location = (
self._remote_acq.get_storage().get_disk_location()
if self._remote_acq.get_storage() is not None
else None
)
self._start_events()
if image_saved_fn is not None:
self._dataset = Dataset(remote_storage_monitor=self._remote_acq.get_storage_monitor())
self._storage_monitor_thread = self._dataset._add_storage_monitor_fn(
callback_fn=image_saved_fn, debug=self._debug
)
if show_display == 'napari':
import napari
from pycromanager.napari_util import start_napari_signalling
viewer = napari.Viewer()
start_napari_signalling(viewer, self.get_dataset())
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.mark_finished()
# now wait on it to finish
self.await_completion()
def _start_events(self, **kwargs):
self.event_port = self._remote_acq.get_event_port()
self._event_thread = threading.Thread(
target=_run_acq_event_source,
args=(self._bridge_port, self.event_port, self._event_queue, self._bridge_timeout, self._debug),
name="Event sending",
)
self._event_thread.start()
def _initialize_image_processor(self, **kwargs):
if kwargs['image_process_fn'] is not None:
java_processor = self.bridge.construct_java_object(
"org.micromanager.remote.RemoteImageProcessor"
)
self._remote_acq.add_image_processor(java_processor)
self._processor_thread = self._start_processor(
java_processor, kwargs['image_process_fn'], self._event_queue, process=kwargs['process'])
def _initialize_hooks(self, **kwargs):
self._hook_threads = []
if kwargs['event_generation_hook_fn'] is not None:
hook = self.bridge.construct_java_object(
"org.micromanager.remote.RemoteAcqHook", args=[self._remote_acq]
)
self._hook_threads.append(self._start_hook(hook, kwargs['event_generation_hook_fn'],
self._event_queue, process=kwargs['process']))
self._remote_acq.add_hook(hook, self._remote_acq.EVENT_GENERATION_HOOK)
if kwargs['pre_hardware_hook_fn'] is not None:
hook = self.bridge.construct_java_object(
"org.micromanager.remote.RemoteAcqHook", args=[self._remote_acq]
)
self._hook_threads.append(self._start_hook(hook,
kwargs['pre_hardware_hook_fn'], self._event_queue,
process=kwargs['process']))
self._remote_acq.add_hook(hook, self._remote_acq.BEFORE_HARDWARE_HOOK)
if kwargs['post_hardware_hook_fn'] is not None:
hook = self.bridge.construct_java_object(
"org.micromanager.remote.RemoteAcqHook", args=[self._remote_acq]
)
self._hook_threads.append(self._start_hook(hook, kwargs['post_hardware_hook_fn'],
self._event_queue, process=kwargs['process']))
self._remote_acq.add_hook(hook, self._remote_acq.AFTER_HARDWARE_HOOK)
if kwargs['post_camera_hook_fn'] is not None:
hook = self.bridge.construct_java_object(
"org.micromanager.remote.RemoteAcqHook", args=[self._remote_acq]
)
self._hook_threads.append(self._start_hook(hook, kwargs['post_camera_hook_fn'],
self._event_queue, process=kwargs['process']))
self._remote_acq.add_hook(hook, self._remote_acq.AFTER_CAMERA_HOOK)
def _create_event_queue(self, **kwargs):
# Create thread safe queue for events so they can be passed from multiple processes
self._event_queue = multiprocessing.Queue() if kwargs['process'] else queue.Queue()
def _create_remote_acquisition(self, **kwargs):
core = self.bridge.get_core()
acq_factory = self.bridge.construct_java_object(
"org.micromanager.remote.RemoteAcquisitionFactory", args=[core]
)
show_viewer = kwargs['show_display'] == True and (kwargs['directory'] is not None and kwargs['name'] is not None)
self._remote_acq = acq_factory.create_acquisition(
kwargs['directory'],
kwargs['name'],
show_viewer,
kwargs['saving_queue_size'],
kwargs['core_log_debug'],
)
def get_dataset(self):
"""
Get access to the dataset backing this acquisition. If the acquisition is in progress,
return a Dataset object that wraps the java class containing it. If the acquisition is finished,
load the dataset from disk on the Python side for better performance
"""
if self._finished:
if self._dataset is None or self._dataset._remote_storage_monitor is not None:
self._dataset = Dataset(self._dataset_disk_location)
elif self._dataset is None:
# Load remote storage
self._dataset = Dataset(remote_storage_monitor=self._remote_acq.get_storage_monitor())
# Monitor image arrival so they can be loaded on python side, but with no callback function
self._storage_monitor_thread = self._dataset._add_storage_monitor_fn(callback_fn=None, debug=self._debug)
return self._dataset
def mark_finished(self):
"""
Signal to acquisition that no more events will be added and it is time to initiate shutdown.
This is only needed if the context manager (i.e. "with Acquisition...") is not used
"""
if self._event_queue is not None: # magellan acquisitions dont have this
# this should shut down storage and viewer as apporpriate
self._event_queue.put(None)
def await_completion(self):
"""Wait for acquisition to finish and resources to be cleaned up"""
while not self._remote_acq.is_finished():
time.sleep(0.01)
self._remote_acq = None
# Wait on all the other threads to shut down properly
if hasattr(self, '_storage_monitor_thread'):
self._storage_monitor_thread.join()
for hook_thread in self._hook_threads:
hook_thread.join()
if hasattr(self, '_event_thread'):
self._event_thread.join()
self.bridge.close()
self._finished = True
def acquire(self, events: dict or list, keep_shutter_open=False):
"""Submit an event or a list of events for acquisition. Optimizations (i.e. taking advantage of
hardware synchronization, where available), will take place across this list of events, but not
over multiple calls of this method. A single event is a python dictionary with a specific structure
Parameters
----------
events : list, dict
A single acquistion event (a dict) or a list of acquisition events
keep_shutter_open :
(Default value = False)
Returns
-------
"""
if events is None:
# manual shutdown
self._event_queue.put(None)
return
if keep_shutter_open and isinstance(events, list):
for e in events:
e["keep_shutter_open"] = True
events.append(
{"keep_shutter_open": False}
) # return to autoshutter, dont acquire an image
elif keep_shutter_open and isinstance(events, dict):
events["keep_shutter_open"] = True
events = [
events,
{"keep_shutter_open": False},
] # return to autoshutter, dont acquire an image
self._event_queue.put(events)
def _start_hook(self, remote_hook : JavaObjectShadow, remote_hook_fn : callable, event_queue, process):
"""
Parameters
----------
remote_hook :
remote_hook_fn :
event_queue :
process :
Returns
-------
"""
hook_connected_evt = multiprocessing.Event() if process else threading.Event()
pull_port = remote_hook.get_pull_port()
push_port = remote_hook.get_push_port()
hook_thread = (multiprocessing.Process if process else threading.Thread)(
target=_run_acq_hook,
name="AcquisitionHook",
args=(
self._bridge_port,
pull_port,
push_port,
hook_connected_evt,
event_queue,
remote_hook_fn,
self._debug,
),
)
# if process else threading.Thread(target=_acq_hook_fn, args=(), name='AcquisitionHook')
hook_thread.start()
hook_connected_evt.wait() # wait for push/pull sockets to connect
return hook_thread
def _start_processor(self, processor, process_fn, event_queue, process):
"""
Parameters
----------
processor :
process_fn :
event_queue :
process :
Returns
-------
"""
# this must start first
processor.start_pull()
sockets_connected_evt = multiprocessing.Event() if process else threading.Event()
pull_port = processor.get_pull_port()
push_port = processor.get_push_port()
processor_thread = (multiprocessing.Process if process else threading.Thread)(
target=_run_image_processor,
args=(
self._bridge_port,
pull_port,
push_port,
sockets_connected_evt,
process_fn,
event_queue,
self._debug,
),
name="ImageProcessor",
)
processor_thread.start()
sockets_connected_evt.wait() # wait for push/pull sockets to connect
processor.start_push()
return processor_thread
class XYTiledAcquisition(Acquisition):
"""
For making tiled images with an XY stage and multiresolution saving
(e.g. for making one large contiguous image of a sample larger than the field of view)
"""
def __init__(
self,
tile_overlap : int or tuple,
directory: str=None,
name: str=None,
max_multi_res_index: int=None,
image_process_fn: callable=None,
pre_hardware_hook_fn: callable=None,
post_hardware_hook_fn: callable=None,
post_camera_hook_fn: callable=None,
show_display: bool=True,
image_saved_fn: callable=None,
process: bool=False,
saving_queue_size: int=20,
bridge_timeout: int=500,
port: int=Bridge.DEFAULT_PORT,
debug: bool=False,
core_log_debug: bool=False,
):
"""
Parameters
----------
tile_overlap : int or tuple of int
If given, XY tiles will be laid out in a grid and multi-resolution saving will be
actived. Argument can be a two element tuple describing the pixel overlaps between adjacent
tiles. i.e. (pixel_overlap_x, pixel_overlap_y), or an integer to use the same overlap for both.
For these features to work, the current hardware configuration must have a valid affine transform
between camera coordinates and XY stage coordinates
max_multi_res_index : int
Maximum index to downsample to in multi-res pyramid mode. 0 is no downsampling,
1 is downsampled up to 2x, 2 is downsampled up to 4x, etc. If not provided, it will be dynamically
calculated and updated from data
"""
self.tile_overlap = tile_overlap
self.max_multi_res_index = max_multi_res_index
# Collct all argument values except the ones specific to Magellan
arg_names = list(signature(self.__init__).parameters.keys())
arg_names.remove('tile_overlap')
arg_names.remove('max_multi_res_index')
l = locals()
named_args = {arg_name: l[arg_name] for arg_name in arg_names}
super().__init__(**named_args)
def _create_remote_acquisition(self, **kwargs):
core = self.bridge.get_core()
acq_factory = self.bridge.construct_java_object(
"org.micromanager.remote.RemoteAcquisitionFactory", args=[core]
)
show_viewer = kwargs['show_display'] and (kwargs['directory'] is not None and kwargs['name'] is not None)
if type(self.tile_overlap) is tuple:
x_overlap, y_overlap = self.tile_overlap
else:
x_overlap = self.tile_overlap
y_overlap = self.tile_overlap
self._remote_acq = acq_factory.create_tiled_acquisition(
kwargs['directory'],
kwargs['name'],
show_viewer,
True,
x_overlap,
y_overlap,
self.max_multi_res_index if self.max_multi_res_index is not None else -1,
kwargs['saving_queue_size'],
kwargs['core_log_debug'],
)
class MagellanAcquisition(Acquisition):
"""
Class used for launching Micro-Magellan acquisitions. Must pass either magellan_acq_index
or magellan_explore as an argument
"""
def __init__(
self,
magellan_acq_index: int=None,
magellan_explore: bool=False,
image_process_fn: callable=None,
event_generation_hook_fn: callable=None,
pre_hardware_hook_fn: callable=None,
post_hardware_hook_fn: callable=None,
post_camera_hook_fn: callable=None,
image_saved_fn: callable=None,
bridge_timeout: int=500,
port: int=Bridge.DEFAULT_PORT,
debug: bool=False,
core_log_debug: bool=False,
):
"""
Parameters
----------
magellan_acq_index : int
run this acquisition using the settings specified at this position in the main
GUI of micro-magellan (micro-manager plugin). This index starts at 0
magellan_explore : bool
Run a Micro-magellan explore acquisition
"""
self.magellan_acq_index = magellan_acq_index
self.magellan_explore = magellan_explore
# Collct all argument values except the ones specific to Magellan
arg_names = list(signature(self.__init__).parameters.keys())
arg_names.remove('magellan_acq_index')
arg_names.remove('magellan_explore')
l = locals()
named_args = {arg_name: l[arg_name] for arg_name in arg_names}
super().__init__(**named_args)
def _start_events(self, **kwargs):
pass # Magellan handles this on Java side
def _create_event_queue(self, **kwargs):
pass # Magellan handles this on Java side
def _create_remote_acquisition(self, **kwargs):
if self.magellan_acq_index is not None:
magellan_api = self.bridge.get_magellan()
self._remote_acq = magellan_api.create_acquisition(self.magellan_acq_index)
self._event_queue = None
elif self.magellan_explore:
magellan_api = self.bridge.get_magellan()
self._remote_acq = magellan_api.create_explore_acquisition()
self._event_queue = None
|
test_app.py | import json
import random
import threading
import tornado.websocket
import tornado.gen
from tornado.testing import AsyncHTTPTestCase
from tornado.httpclient import HTTPError
from tornado.options import options
from tests.sshserver import run_ssh_server, banner, Server
from tests.utils import encode_multipart_formdata, read_file, make_tests_data_path # noqa
from wizardwebssh import handler
from wizardwebssh.main import make_app, make_handlers
from wizardwebssh.settings import (
get_app_settings, get_server_settings, max_body_size
)
from wizardwebssh.utils import to_str
from wizardwebssh.worker import clients
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
handler.DELAY = 0.1
swallow_http_errors = handler.swallow_http_errors
server_encodings = {e.strip() for e in Server.encodings}
class TestAppBase(AsyncHTTPTestCase):
def get_httpserver_options(self):
return get_server_settings(options)
def assert_response(self, bstr, response):
if swallow_http_errors:
self.assertEqual(response.code, 200)
self.assertIn(bstr, response.body)
else:
self.assertEqual(response.code, 400)
self.assertIn(b'Bad Request', response.body)
def assert_status_in(self, status, data):
self.assertIsNone(data['encoding'])
self.assertIsNone(data['id'])
self.assertIn(status, data['status'])
def assert_status_equal(self, status, data):
self.assertIsNone(data['encoding'])
self.assertIsNone(data['id'])
self.assertEqual(status, data['status'])
def assert_status_none(self, data):
self.assertIsNotNone(data['encoding'])
self.assertIsNotNone(data['id'])
self.assertIsNone(data['status'])
def fetch_request(self, url, method='GET', body='', headers={}, sync=True):
if not sync and url.startswith('/'):
url = self.get_url(url)
if isinstance(body, dict):
body = urlencode(body)
if not headers:
headers = self.headers
else:
headers.update(self.headers)
client = self if sync else self.get_http_client()
return client.fetch(url, method=method, body=body, headers=headers)
def sync_post(self, url, body, headers={}):
return self.fetch_request(url, 'POST', body, headers)
def async_post(self, url, body, headers={}):
return self.fetch_request(url, 'POST', body, headers, sync=False)
class TestAppBasic(TestAppBase):
running = [True]
sshserver_port = 2200
body = 'hostname=127.0.0.1&port={}&_xsrf=yummy&username=robey&password=foo'.format(sshserver_port) # noqa
headers = {'Cookie': '_xsrf=yummy'}
def get_app(self):
self.body_dict = {
'hostname': '127.0.0.1',
'port': str(self.sshserver_port),
'username': 'robey',
'password': '',
'_xsrf': 'yummy'
}
loop = self.io_loop
options.debug = False
options.policy = random.choice(['warning', 'autoadd'])
options.hostfile = ''
options.syshostfile = ''
options.tdstream = ''
app = make_app(make_handlers(loop, options), get_app_settings(options))
return app
@classmethod
def setUpClass(cls):
print('='*20)
t = threading.Thread(
target=run_ssh_server, args=(cls.sshserver_port, cls.running)
)
t.setDaemon(True)
t.start()
@classmethod
def tearDownClass(cls):
cls.running.pop()
print('='*20)
def test_app_with_invalid_form_for_missing_argument(self):
response = self.fetch('/')
self.assertEqual(response.code, 200)
body = 'port=7000&username=admin&password&_xsrf=yummy'
response = self.sync_post('/', body)
self.assert_response(b'Missing argument hostname', response)
body = 'hostname=127.0.0.1&port=7000&password&_xsrf=yummy'
response = self.sync_post('/', body)
self.assert_response(b'Missing argument username', response)
body = 'hostname=&port=&username=&password&_xsrf=yummy'
response = self.sync_post('/', body)
self.assert_response(b'Missing value hostname', response)
body = 'hostname=127.0.0.1&port=7000&username=&password&_xsrf=yummy'
response = self.sync_post('/', body)
self.assert_response(b'Missing value username', response)
def test_app_with_invalid_form_for_invalid_value(self):
body = 'hostname=127.0.0&port=22&username=&password&_xsrf=yummy'
response = self.sync_post('/', body)
self.assert_response(b'Invalid hostname', response)
body = 'hostname=http://www.googe.com&port=22&username=&password&_xsrf=yummy' # noqa
response = self.sync_post('/', body)
self.assert_response(b'Invalid hostname', response)
body = 'hostname=127.0.0.1&port=port&username=&password&_xsrf=yummy'
response = self.sync_post('/', body)
self.assert_response(b'Invalid port', response)
body = 'hostname=127.0.0.1&port=70000&username=&password&_xsrf=yummy'
response = self.sync_post('/', body)
self.assert_response(b'Invalid port', response)
def test_app_with_wrong_hostname_ip(self):
body = 'hostname=127.0.0.2&port=2200&username=admin&_xsrf=yummy'
response = self.sync_post('/', body)
self.assertEqual(response.code, 200)
self.assertIn(b'Unable to connect to', response.body)
def test_app_with_wrong_hostname_domain(self):
body = 'hostname=xxxxxxxxxxxx&port=2200&username=admin&_xsrf=yummy'
response = self.sync_post('/', body)
self.assertEqual(response.code, 200)
self.assertIn(b'Unable to connect to', response.body)
def test_app_with_wrong_port(self):
body = 'hostname=127.0.0.1&port=7000&username=admin&_xsrf=yummy'
response = self.sync_post('/', body)
self.assertEqual(response.code, 200)
self.assertIn(b'Unable to connect to', response.body)
def test_app_with_wrong_credentials(self):
response = self.sync_post('/', self.body + 's')
self.assert_status_in('Authentication failed.', json.loads(to_str(response.body))) # noqa
def test_app_with_correct_credentials(self):
response = self.sync_post('/', self.body)
self.assert_status_none(json.loads(to_str(response.body)))
def test_app_with_correct_credentials_but_with_no_port(self):
default_port = handler.DEFAULT_PORT
handler.DEFAULT_PORT = self.sshserver_port
# with no port value
body = self.body.replace(str(self.sshserver_port), '')
response = self.sync_post('/', body)
self.assert_status_none(json.loads(to_str(response.body)))
# with no port argument
body = body.replace('port=&', '')
response = self.sync_post('/', body)
self.assert_status_none(json.loads(to_str(response.body)))
handler.DEFAULT_PORT = default_port
@tornado.testing.gen_test
def test_app_with_correct_credentials_timeout(self):
url = self.get_url('/')
response = yield self.async_post(url, self.body)
data = json.loads(to_str(response.body))
self.assert_status_none(data)
url = url.replace('http', 'ws')
ws_url = url + 'ws?id=' + data['id']
yield tornado.gen.sleep(handler.DELAY + 0.1)
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertIsNone(msg)
self.assertEqual(ws.close_reason, 'Websocket authentication failed.')
@tornado.testing.gen_test
def test_app_with_correct_credentials_but_ip_not_matched(self):
url = self.get_url('/')
response = yield self.async_post(url, self.body)
data = json.loads(to_str(response.body))
self.assert_status_none(data)
clients = handler.clients
handler.clients = {}
url = url.replace('http', 'ws')
ws_url = url + 'ws?id=' + data['id']
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertIsNone(msg)
self.assertEqual(ws.close_reason, 'Websocket authentication failed.')
handler.clients = clients
@tornado.testing.gen_test
def test_app_with_correct_credentials_user_robey(self):
url = self.get_url('/')
response = yield self.async_post(url, self.body)
data = json.loads(to_str(response.body))
self.assert_status_none(data)
url = url.replace('http', 'ws')
ws_url = url + 'ws?id=' + data['id']
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertEqual(to_str(msg, data['encoding']), banner)
ws.close()
@tornado.testing.gen_test
def test_app_with_correct_credentials_but_without_id_argument(self):
url = self.get_url('/')
response = yield self.async_post(url, self.body)
data = json.loads(to_str(response.body))
self.assert_status_none(data)
url = url.replace('http', 'ws')
ws_url = url + 'ws'
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertIsNone(msg)
self.assertIn('Missing argument id', ws.close_reason)
@tornado.testing.gen_test
def test_app_with_correct_credentials_but_empty_id(self):
url = self.get_url('/')
response = yield self.async_post(url, self.body)
data = json.loads(to_str(response.body))
self.assert_status_none(data)
url = url.replace('http', 'ws')
ws_url = url + 'ws?id='
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertIsNone(msg)
self.assertIn('Missing value id', ws.close_reason)
@tornado.testing.gen_test
def test_app_with_correct_credentials_but_wrong_id(self):
url = self.get_url('/')
response = yield self.async_post(url, self.body)
data = json.loads(to_str(response.body))
self.assert_status_none(data)
url = url.replace('http', 'ws')
ws_url = url + 'ws?id=1' + data['id']
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertIsNone(msg)
self.assertIn('Websocket authentication failed', ws.close_reason)
@tornado.testing.gen_test
def test_app_with_correct_credentials_user_bar(self):
body = self.body.replace('robey', 'bar')
url = self.get_url('/')
response = yield self.async_post(url, body)
data = json.loads(to_str(response.body))
self.assert_status_none(data)
url = url.replace('http', 'ws')
ws_url = url + 'ws?id=' + data['id']
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertEqual(to_str(msg, data['encoding']), banner)
# messages below will be ignored silently
yield ws.write_message('hello')
yield ws.write_message('"hello"')
yield ws.write_message('[hello]')
yield ws.write_message(json.dumps({'resize': []}))
yield ws.write_message(json.dumps({'resize': {}}))
yield ws.write_message(json.dumps({'resize': 'ab'}))
yield ws.write_message(json.dumps({'resize': ['a', 'b']}))
yield ws.write_message(json.dumps({'resize': {'a': 1, 'b': 2}}))
yield ws.write_message(json.dumps({'resize': [100]}))
yield ws.write_message(json.dumps({'resize': [100]*10}))
yield ws.write_message(json.dumps({'resize': [-1, -1]}))
yield ws.write_message(json.dumps({'data': [1]}))
yield ws.write_message(json.dumps({'data': (1,)}))
yield ws.write_message(json.dumps({'data': {'a': 2}}))
yield ws.write_message(json.dumps({'data': 1}))
yield ws.write_message(json.dumps({'data': 2.1}))
yield ws.write_message(json.dumps({'key-non-existed': 'hello'}))
# end - those just for testing wizardwebssh websocket stablity
yield ws.write_message(json.dumps({'resize': [79, 23]}))
msg = yield ws.read_message()
self.assertEqual(b'resized', msg)
yield ws.write_message(json.dumps({'data': 'bye'}))
msg = yield ws.read_message()
self.assertEqual(b'bye', msg)
ws.close()
@tornado.testing.gen_test
def test_app_auth_with_valid_pubkey_by_urlencoded_form(self):
url = self.get_url('/')
privatekey = read_file(make_tests_data_path('user_rsa_key'))
self.body_dict.update(privatekey=privatekey)
response = yield self.async_post(url, self.body_dict)
data = json.loads(to_str(response.body))
self.assert_status_none(data)
url = url.replace('http', 'ws')
ws_url = url + 'ws?id=' + data['id']
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertEqual(to_str(msg, data['encoding']), banner)
ws.close()
@tornado.testing.gen_test
def test_app_auth_with_valid_pubkey_by_multipart_form(self):
url = self.get_url('/')
privatekey = read_file(make_tests_data_path('user_rsa_key'))
files = [('privatekey', 'user_rsa_key', privatekey)]
content_type, body = encode_multipart_formdata(self.body_dict.items(),
files)
headers = {
'Content-Type': content_type, 'content-length': str(len(body))
}
response = yield self.async_post(url, body, headers=headers)
data = json.loads(to_str(response.body))
self.assert_status_none(data)
url = url.replace('http', 'ws')
ws_url = url + 'ws?id=' + data['id']
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertEqual(to_str(msg, data['encoding']), banner)
ws.close()
@tornado.testing.gen_test
def test_app_auth_with_invalid_pubkey_for_user_robey(self):
url = self.get_url('/')
privatekey = 'h' * 1024
files = [('privatekey', 'user_rsa_key', privatekey)]
content_type, body = encode_multipart_formdata(self.body_dict.items(),
files)
headers = {
'Content-Type': content_type, 'content-length': str(len(body))
}
if swallow_http_errors:
response = yield self.async_post(url, body, headers=headers)
self.assertIn(b'Invalid key', response.body)
else:
with self.assertRaises(HTTPError) as ctx:
yield self.async_post(url, body, headers=headers)
self.assertIn('Bad Request', ctx.exception.message)
@tornado.testing.gen_test
def test_app_auth_with_pubkey_exceeds_key_max_size(self):
url = self.get_url('/')
privatekey = 'h' * (handler.PrivateKey.max_length + 1)
files = [('privatekey', 'user_rsa_key', privatekey)]
content_type, body = encode_multipart_formdata(self.body_dict.items(),
files)
headers = {
'Content-Type': content_type, 'content-length': str(len(body))
}
if swallow_http_errors:
response = yield self.async_post(url, body, headers=headers)
self.assertIn(b'Invalid key', response.body)
else:
with self.assertRaises(HTTPError) as ctx:
yield self.async_post(url, body, headers=headers)
self.assertIn('Bad Request', ctx.exception.message)
@tornado.testing.gen_test
def test_app_auth_with_pubkey_cannot_be_decoded_by_multipart_form(self):
url = self.get_url('/')
privatekey = 'h' * 1024
files = [('privatekey', 'user_rsa_key', privatekey)]
content_type, body = encode_multipart_formdata(self.body_dict.items(),
files)
body = body.encode('utf-8')
# added some gbk bytes to the privatekey, make it cannot be decoded
body = body[:-100] + b'\xb4\xed\xce\xf3' + body[-100:]
headers = {
'Content-Type': content_type, 'content-length': str(len(body))
}
if swallow_http_errors:
response = yield self.async_post(url, body, headers=headers)
self.assertIn(b'Invalid unicode', response.body)
else:
with self.assertRaises(HTTPError) as ctx:
yield self.async_post(url, body, headers=headers)
self.assertIn('Bad Request', ctx.exception.message)
def test_app_post_form_with_large_body_size_by_multipart_form(self):
privatekey = 'h' * (2 * max_body_size)
files = [('privatekey', 'user_rsa_key', privatekey)]
content_type, body = encode_multipart_formdata(self.body_dict.items(),
files)
headers = {
'Content-Type': content_type, 'content-length': str(len(body))
}
response = self.sync_post('/', body, headers=headers)
self.assertIn(response.code, [400, 599])
def test_app_post_form_with_large_body_size_by_urlencoded_form(self):
privatekey = 'h' * (2 * max_body_size)
body = self.body + '&privatekey=' + privatekey
response = self.sync_post('/', body)
self.assertIn(response.code, [400, 599])
@tornado.testing.gen_test
def test_app_with_user_keyonly_for_bad_authentication_type(self):
self.body_dict.update(username='keyonly', password='foo')
response = yield self.async_post('/', self.body_dict)
self.assertEqual(response.code, 200)
self.assert_status_in('Bad authentication type', json.loads(to_str(response.body))) # noqa
@tornado.testing.gen_test
def test_app_with_user_pass2fa_with_correct_passwords(self):
self.body_dict.update(username='pass2fa', password='password',
totp='passcode')
response = yield self.async_post('/', self.body_dict)
self.assertEqual(response.code, 200)
data = json.loads(to_str(response.body))
self.assert_status_none(data)
@tornado.testing.gen_test
def test_app_with_user_pass2fa_with_wrong_pkey_correct_passwords(self):
url = self.get_url('/')
privatekey = read_file(make_tests_data_path('user_rsa_key'))
self.body_dict.update(username='pass2fa', password='password',
privatekey=privatekey, totp='passcode')
response = yield self.async_post(url, self.body_dict)
data = json.loads(to_str(response.body))
self.assert_status_none(data)
@tornado.testing.gen_test
def test_app_with_user_pkey2fa_with_correct_passwords(self):
url = self.get_url('/')
privatekey = read_file(make_tests_data_path('user_rsa_key'))
self.body_dict.update(username='pkey2fa', password='password',
privatekey=privatekey, totp='passcode')
response = yield self.async_post(url, self.body_dict)
data = json.loads(to_str(response.body))
self.assert_status_none(data)
@tornado.testing.gen_test
def test_app_with_user_pkey2fa_with_wrong_password(self):
url = self.get_url('/')
privatekey = read_file(make_tests_data_path('user_rsa_key'))
self.body_dict.update(username='pkey2fa', password='wrongpassword',
privatekey=privatekey, totp='passcode')
response = yield self.async_post(url, self.body_dict)
data = json.loads(to_str(response.body))
self.assert_status_in('Authentication failed', data)
@tornado.testing.gen_test
def test_app_with_user_pkey2fa_with_wrong_passcode(self):
url = self.get_url('/')
privatekey = read_file(make_tests_data_path('user_rsa_key'))
self.body_dict.update(username='pkey2fa', password='password',
privatekey=privatekey, totp='wrongpasscode')
response = yield self.async_post(url, self.body_dict)
data = json.loads(to_str(response.body))
self.assert_status_in('Authentication failed', data)
@tornado.testing.gen_test
def test_app_with_user_pkey2fa_with_empty_passcode(self):
url = self.get_url('/')
privatekey = read_file(make_tests_data_path('user_rsa_key'))
self.body_dict.update(username='pkey2fa', password='password',
privatekey=privatekey, totp='')
response = yield self.async_post(url, self.body_dict)
data = json.loads(to_str(response.body))
self.assert_status_in('Need a verification code', data)
class OtherTestBase(TestAppBase):
sshserver_port = 3300
headers = {'Cookie': '_xsrf=yummy'}
debug = False
policy = None
xsrf = True
hostfile = ''
syshostfile = ''
tdstream = ''
maxconn = 20
origin = 'same'
encodings = []
body = {
'hostname': '127.0.0.1',
'port': '',
'username': 'robey',
'password': 'foo',
'_xsrf': 'yummy'
}
def get_app(self):
self.body.update(port=str(self.sshserver_port))
loop = self.io_loop
options.debug = self.debug
options.xsrf = self.xsrf
options.policy = self.policy if self.policy else random.choice(['warning', 'autoadd']) # noqa
options.hostfile = self.hostfile
options.syshostfile = self.syshostfile
options.tdstream = self.tdstream
options.maxconn = self.maxconn
options.origin = self.origin
app = make_app(make_handlers(loop, options), get_app_settings(options))
return app
def setUp(self):
print('='*20)
self.running = True
OtherTestBase.sshserver_port += 1
t = threading.Thread(
target=run_ssh_server,
args=(self.sshserver_port, self.running, self.encodings)
)
t.setDaemon(True)
t.start()
super(OtherTestBase, self).setUp()
def tearDown(self):
self.running = False
print('='*20)
super(OtherTestBase, self).tearDown()
class TestAppInDebugMode(OtherTestBase):
debug = True
def assert_response(self, bstr, response):
if swallow_http_errors:
self.assertEqual(response.code, 200)
self.assertIn(bstr, response.body)
else:
self.assertEqual(response.code, 500)
self.assertIn(b'Uncaught exception', response.body)
def test_server_error_for_post_method(self):
body = dict(self.body, error='raise')
response = self.sync_post('/', body)
self.assert_response(b'"status": "Internal Server Error"', response)
def test_html(self):
response = self.fetch('/', method='GET')
self.assertIn(b'novalidate>', response.body)
class TestAppWithLargeBuffer(OtherTestBase):
@tornado.testing.gen_test
def test_app_for_sending_message_with_large_size(self):
url = self.get_url('/')
response = yield self.async_post(url, dict(self.body, username='foo'))
data = json.loads(to_str(response.body))
self.assert_status_none(data)
url = url.replace('http', 'ws')
ws_url = url + 'ws?id=' + data['id']
ws = yield tornado.websocket.websocket_connect(ws_url)
msg = yield ws.read_message()
self.assertEqual(to_str(msg, data['encoding']), banner)
send = 'h' * (64 * 1024) + '\r\n\r\n'
yield ws.write_message(json.dumps({'data': send}))
lst = []
while True:
msg = yield ws.read_message()
lst.append(msg)
if msg.endswith(b'\r\n\r\n'):
break
recv = b''.join(lst).decode(data['encoding'])
self.assertEqual(send, recv)
ws.close()
class TestAppWithRejectPolicy(OtherTestBase):
policy = 'reject'
hostfile = make_tests_data_path('known_hosts_example')
@tornado.testing.gen_test
def test_app_with_hostname_not_in_hostkeys(self):
response = yield self.async_post('/', self.body)
data = json.loads(to_str(response.body))
message = 'Connection to {}:{} is not allowed.'.format(self.body['hostname'], self.sshserver_port) # noqa
self.assertEqual(message, data['status'])
class TestAppWithBadHostKey(OtherTestBase):
policy = random.choice(['warning', 'autoadd', 'reject'])
hostfile = make_tests_data_path('test_known_hosts')
def setUp(self):
self.sshserver_port = 2222
super(TestAppWithBadHostKey, self).setUp()
@tornado.testing.gen_test
def test_app_with_bad_host_key(self):
response = yield self.async_post('/', self.body)
data = json.loads(to_str(response.body))
self.assertEqual('Bad host key.', data['status'])
class TestAppWithTrustedStream(OtherTestBase):
tdstream = '127.0.0.2'
def test_with_forbidden_get_request(self):
response = self.fetch('/', method='GET')
self.assertEqual(response.code, 403)
self.assertIn('Forbidden', response.error.message)
def test_with_forbidden_post_request(self):
response = self.sync_post('/', self.body)
self.assertEqual(response.code, 403)
self.assertIn('Forbidden', response.error.message)
def test_with_forbidden_put_request(self):
response = self.fetch_request('/', method='PUT', body=self.body)
self.assertEqual(response.code, 403)
self.assertIn('Forbidden', response.error.message)
class TestAppNotFoundHandler(OtherTestBase):
custom_headers = handler.MixinHandler.custom_headers
def test_with_not_found_get_request(self):
response = self.fetch('/pathnotfound', method='GET')
self.assertEqual(response.code, 404)
self.assertEqual(
response.headers['Server'], self.custom_headers['Server']
)
self.assertIn(b'404: Not Found', response.body)
def test_with_not_found_post_request(self):
response = self.sync_post('/pathnotfound', self.body)
self.assertEqual(response.code, 404)
self.assertEqual(
response.headers['Server'], self.custom_headers['Server']
)
self.assertIn(b'404: Not Found', response.body)
def test_with_not_found_put_request(self):
response = self.fetch_request('/pathnotfound', method='PUT',
body=self.body)
self.assertEqual(response.code, 404)
self.assertEqual(
response.headers['Server'], self.custom_headers['Server']
)
self.assertIn(b'404: Not Found', response.body)
class TestAppWithHeadRequest(OtherTestBase):
def test_with_index_path(self):
response = self.fetch('/', method='HEAD')
self.assertEqual(response.code, 200)
def test_with_ws_path(self):
response = self.fetch('/ws', method='HEAD')
self.assertEqual(response.code, 405)
def test_with_not_found_path(self):
response = self.fetch('/notfound', method='HEAD')
self.assertEqual(response.code, 404)
class TestAppWithPutRequest(OtherTestBase):
xsrf = False
@tornado.testing.gen_test
def test_app_with_method_not_supported(self):
with self.assertRaises(HTTPError) as ctx:
yield self.fetch_request('/', 'PUT', self.body, sync=False)
self.assertIn('Method Not Allowed', ctx.exception.message)
class TestAppWithTooManyConnections(OtherTestBase):
maxconn = 1
def setUp(self):
clients.clear()
super(TestAppWithTooManyConnections, self).setUp()
@tornado.testing.gen_test
def test_app_with_too_many_connections(self):
clients['127.0.0.1'] = {'fake_worker_id': None}
url = self.get_url('/')
response = yield self.async_post(url, self.body)
data = json.loads(to_str(response.body))
self.assertEqual('Too many live connections.', data['status'])
clients['127.0.0.1'].clear()
response = yield self.async_post(url, self.body)
self.assert_status_none(json.loads(to_str(response.body)))
class TestAppWithCrossOriginOperation(OtherTestBase):
origin = 'http://www.example.com'
@tornado.testing.gen_test
def test_app_with_wrong_event_origin(self):
body = dict(self.body, _origin='localhost')
response = yield self.async_post('/', body)
self.assert_status_equal('Cross origin operation is not allowed.', json.loads(to_str(response.body))) # noqa
@tornado.testing.gen_test
def test_app_with_wrong_header_origin(self):
headers = dict(Origin='localhost')
response = yield self.async_post('/', self.body, headers=headers)
self.assert_status_equal('Cross origin operation is not allowed.', json.loads(to_str(response.body)), ) # noqa
@tornado.testing.gen_test
def test_app_with_correct_event_origin(self):
body = dict(self.body, _origin=self.origin)
response = yield self.async_post('/', body)
self.assert_status_none(json.loads(to_str(response.body)))
self.assertIsNone(response.headers.get('Access-Control-Allow-Origin'))
@tornado.testing.gen_test
def test_app_with_correct_header_origin(self):
headers = dict(Origin=self.origin)
response = yield self.async_post('/', self.body, headers=headers)
self.assert_status_none(json.loads(to_str(response.body)))
self.assertEqual(
response.headers.get('Access-Control-Allow-Origin'), self.origin
)
class TestAppWithBadEncoding(OtherTestBase):
encodings = [u'\u7f16\u7801']
@tornado.testing.gen_test
def test_app_with_a_bad_encoding(self):
response = yield self.async_post('/', self.body)
dic = json.loads(to_str(response.body))
self.assert_status_none(dic)
self.assertIn(dic['encoding'], server_encodings)
class TestAppWithUnknownEncoding(OtherTestBase):
encodings = [u'\u7f16\u7801', u'UnknownEncoding']
@tornado.testing.gen_test
def test_app_with_a_unknown_encoding(self):
response = yield self.async_post('/', self.body)
self.assert_status_none(json.loads(to_str(response.body)))
dic = json.loads(to_str(response.body))
self.assert_status_none(dic)
self.assertEqual(dic['encoding'], 'utf-8')
|
main.py | import socket
import json
import threading
from gui import *
gui = GUI()
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def main():
client.connect(('localhost', 8888))
readIncomingThread = threading.Thread(target=read_incoming)
readIncomingThread.start()
gui.pack()
gui.inputField.bind('<Return>', send_message)
gui.root.mainloop()
def send_message(e):
try:
client.send(json.dumps({"route": "test", "body": {"text": gui.inputField.get()}}).encode())
gui.inputField.delete(0, END)
except:
print("Failed to send...")
def read_incoming():
while True:
data, addr = client.recvfrom(1024)
print(data.decode())
j = json.loads(data.decode())
gui.chatWindow.insert(END, j["sender"]["name"] + "> " + j["content"] + "\n")
if __name__ == "__main__":
main() |
patl.py | import pyaudio
import numpy
import time
import logging
from threading import Thread
from patutils import LENGTH, BOUND, TONES, ReturnCode
STOP = False
CHUNK = 2048
RATE = 44100
DEBUG = True
threads = []
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16, channels=1, rate=RATE, input=True, frames_per_buffer=CHUNK)
def read_data():
return numpy.fromstring(stream.read(CHUNK), numpy.int16) # Read data from pyaudio input
def sound_listener(callback=None, args=(), report_empty=False):
while True:
if STOP:
return
data = read_data()
# https://stackoverflow.com/questions/6908540/pyaudio-how-to-tell-frequency-and-amplitude-while-recording
amplitudes = 20 * numpy.log10(numpy.abs(numpy.fft.rfft(data[:CHUNK])))
# Uses values to determine the type of frequencies available
frequencies = numpy.linspace(0, RATE / 2, len(amplitudes))
peak = frequencies[amplitudes.argmax()] # numpy function grabs max amplitude, applies to frequency
matches = list(filter(lambda x: abs(x - peak) <= BOUND, TONES))
if matches:
if len(matches) > 1:
logging.warning("** MULTIPLE MATCHES FOUND**")
corrected = matches[0]
value = TONES.index(corrected)
# print(f"freq: {peak}, corrected: {corrected}, value: {value}") # DEBUG
if callback:
if callback(*args, value) == ReturnCode.EXIT:
return
else:
# print("no value") # DEBUG
if callback and report_empty:
if callback(*args, None) == ReturnCode.EXIT:
return
# time.sleep(LENGTH / 2) # TODO(Check)
def start_listener(callback=None, args=(), report_empty=False, join = False):
thread = Thread(target=sound_listener, args=[callback, args, report_empty])
threads.append(thread)
thread.start()
if join:
thread.join()
if __name__ == "__main__":
try:
start_listener()
for thread in threads:
thread.join()
except KeyboardInterrupt:
print("Goodbye!")
finally:
STOP = True
for thread in threads:
thread.join()
stream.stop_stream()
stream.close()
p.terminate()
|
websocket_server.py | #coding=utf-8
import sys,pathlib
gsqp=pathlib.Path(__file__).absolute(
).parent.parent.parent.__str__()
#*.py /N /qgb /[gsqp]
if gsqp not in sys.path:sys.path.append(gsqp)#py3 works
from qgb import py
U,T,N,F=py.importUTNF()
# py.pdb()()
#ไธ้ข่ฟๆ ทไธ่ก๏ผไธ็ดๅกๅจ'C:\\QGB\\babun\\cygwin\\bin\\qgb'๏ผๅคๅ .parentไนๆ ๆ
# gsqp=Path(__file__).parent.parent.parent.parent.parent.absolute().__str__()
# print(gsqp,[],sys.path)
# import py
import asyncio,websockets
import signal
# signal.signal(signal.SIGINT,U.exit)
# signal.signal(signal.SIGTERM,U.exit)
async def hello(websocket, path):
name = await websocket.recv()
# py.pdb()()
r="{ qgb }"
if not U.get(websocket.transport._sock) and name=='trigger':
r={"cmds":[{"cmd":"canvas","idx":0},{"lights":"empty_list","idx":0},{"cmd":"distant_light","idx":2,"direction":[0.22,0.44,0.88],"color":[0.8,0.8,0.8],"size":[1.0,1.0,1.0],"canvas":0},{"cmd":"distant_light","idx":3,"direction":[-0.88,-0.22,-0.44],"color":[0.3,0.3,0.3],"size":[1.0,1.0,1.0],"canvas":0},{"cmd":"box","idx":5,"size":[1.0,1.0,1.0],"canvas":0}]}
r=U.get_set(0,r)
r=T.json_dumps(r)
U.set(websocket.transport._sock,[U.stime()])
elif name=='trigger':
U.get(websocket.transport._sock).append(U.stime())
else:
r=U.exec_return_str(name,globals=globals(),locals=locals() )
await websocket.send(r)
return
print(f"< {name}")
greeting = f"{path} {name} !"
await websocket.send(greeting)
print(f"> {greeting}")
start_server = websockets.serve(hello, 'localhost', 8765)
loop=asyncio.get_event_loop()
# loop.add_signal_handler(signal.SIGINT,U.exit)
# loop.add_signal_handler(signal.SIGTERM, U.exit)
loop.run_until_complete(start_server)
print(U.stime(),'run_until_complete done!')
def wakeup():
loop.call_later(0.1, wakeup)
if '__main__' in __name__:
try:
loop.call_later(0.1, wakeup)
print(U.stime(),'call_later')
# loop.run_forever()
t=U.Thread(target=loop.run_forever)
t.start()
print(U.stime(),'run_forever')
U.ipy_embed()()
except BaseException:
# U.msgbox(U.stime())
U.exit('KeyboardInterrupt')
|
__init__.py | import curses, logging.config, threading, click
from time import sleep
from .interfaces import TextAccumulator, OptionWindow
from .textgenerator import TextGenerator
from random import randint
highlighted_slot_index = 0
def draw_menu(stdscrr, option_window, global_text_accumulator):
stdscr = curses.initscr()
height, width = stdscr.getmaxyx()
curses.resizeterm(50, 200)
# Start colors in curses
stdscr.refresh()
curses.start_color()
new_box = curses.newwin(1, width, height-2, 0)
new_box.attron(curses.color_pair(1))
new_box.attron(curses.A_BOLD)
new_box.addnstr(0, 0, "UP/DOWN Selects text. RIGHT Adds selected text block to game. LEFT Removes rightmost character from the game. q exits.", width-1)
new_box.attroff(curses.color_pair(1))
new_box.attroff(curses.A_BOLD)
new_box.refresh()
curses.init_pair(1, curses.COLOR_CYAN, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_BLACK, curses.COLOR_WHITE)
option_window.set_screen(stdscr)
global_text_accumulator.set_screen(stdscr)
t = threading.Thread(name ='daemon', target=input_thread, args=(stdscr, option_window, global_text_accumulator))
t.setDaemon(True)
t.start()
while t.is_alive():
option_window.attempt_to_generate_new_option_at_text_ticker_location(global_text_accumulator.generate_add_text_candidate())
sleep(1)
option_window.focus_option_at_key(option_window.highlight_ticker)
for box in option_window.options.values():
box.refresh()
stdscr.refresh()
def input_thread(stdscr, option_window, global_text_accumulator):
curses.curs_set(0)
k = 0
while (k != ord('q')):
highlighted_slot_index = option_window.highlight_ticker
if k == curses.KEY_DOWN:
option_window.remove_focus()
highlighted_slot_index = (highlighted_slot_index + 2) % 16
option_window.focus_option_at_key(highlighted_slot_index)
if k == curses.KEY_UP:
option_window.remove_focus()
highlighted_slot_index = (highlighted_slot_index - 2) % 16
option_window.focus_option_at_key(highlighted_slot_index)
if k == curses.KEY_RIGHT:
if option_window.options.get(highlighted_slot_index).text != "LOADING...":
current_text = option_window.options.get(highlighted_slot_index).text
global_text_accumulator.add_text(current_text)
option_window.set_screen(stdscr)
current_slot_index = 0
option_window.highlight_ticker = 0
if k == curses.KEY_LEFT:
global_text_accumulator.backspace()
option_window.set_screen(stdscr)
current_slot_index = 0
option_window.highlight_ticker = 0
for box in option_window.options.values():
box.refresh()
stdscr.refresh()
# Wait for next input
k = stdscr.getch()
@click.command()
@click.option('--path', default='model/', help='Path to a local pytorch model.')
@click.option('--name', default='gyre/200wordrpgmodel', prompt='Enter The Model Name', help='Name of the huggingface model.')
def play(path: str, name: str):
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': True,
})
option_window = OptionWindow()
text_generator = TextGenerator(model_path = path, model_name = name)
text_accumulator = TextAccumulator(text_generator)
curses.wrapper(draw_menu, option_window = option_window, global_text_accumulator = text_accumulator)
def main():
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': True,
})
option_window = OptionWindow()
text_generator = TextGenerator(model_path = path, model_name = name)
global_text_accumulator = TextAccumulator(text_generator)
curses.wrapper(draw_menu, option_window = option_window, global_text_accumulator = global_text_accumulator)
if __name__ == "__main__":
main()
|
jrpc_py.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import random
import time
from builtins import *
import zmq
try:
import queue
except ImportError:
import queue as queue
import threading
import msgpack
import snappy
import copy
qEmpty = copy.copy(queue.Empty)
def _unpack_msgpack_snappy(str):
if str.startswith(b'S'):
tmp = snappy.uncompress(str[1:])
# print "SNAPPY: ", len(str), len(tmp)
obj = msgpack.loads(tmp, encoding='utf-8')
elif str.startswith(b'\0'):
obj = msgpack.loads(str[1:], encoding='utf-8')
else:
return None
return obj
def _pack_msgpack_snappy(obj):
# print "pack", obj
tmp = msgpack.dumps(obj, encoding='utf-8')
if len(tmp) > 1000:
return b'S' + snappy.compress(tmp)
else:
return b'\0' + tmp
def _unpack_msgpack(str):
return msgpack.loads(str, encoding='utf-8')
def _pack_msgpack(obj):
return msgpack.dumps(obj, encoding='utf-8')
def _unpack_json(str):
return json.loads(str, encoding='utf-8')
def _pack_json(obj):
return json.dumps(obj, encoding='utf-8')
class JRpcClient(object):
def __init__(self, data_format="msgpack_snappy"):
self._waiter_lock = threading.Lock()
self._waiter_map = {}
self._should_close = False
self._next_callid = 0
self._send_lock = threading.Lock()
self._callid_lock = threading.Lock()
self._last_heartbeat_rsp_time = 0
self._connected = False
self.on_disconnected = None
self.on_rpc_callback = None
self._callback_queue = queue.Queue()
self._call_wait_queue = queue.Queue()
self._ctx = zmq.Context()
self._pull_sock = self._ctx.socket(zmq.PULL)
self._pull_sock.bind("inproc://pull_sock")
self._push_sock = self._ctx.socket(zmq.PUSH)
self._push_sock.connect("inproc://pull_sock")
self._heartbeat_interval = 1
self._heartbeat_timeout = 3
self._addr = None
if data_format == "msgpack_snappy":
self._pack = _pack_msgpack_snappy
self._unpack = _unpack_msgpack_snappy
elif data_format == "msgpack":
self._pack = _pack_msgpack
self._unpack = _unpack_msgpack
elif data_format == "json":
self._pack = _pack_json
self._unpack = _unpack_json
else:
assert False, "unknown data_format " + data_format
t = threading.Thread(target=self._recv_run)
t.setDaemon(True)
t.start()
self._recv_thread = t
t = threading.Thread(target=self._callback_run)
t.setDaemon(True)
t.start()
self._callback_thread = t
def __del__(self):
self.close()
def next_callid(self):
self._callid_lock.acquire()
self._next_callid += 1
callid = self._next_callid
self._callid_lock.release()
return callid
def set_heartbeat_options(self, interval, timeout):
self._heartbeat_interval = interval
self._heartbeat_timeout = timeout
def _recv_run(self):
heartbeat_time = 0
poller = zmq.Poller()
poller.register(self._pull_sock, zmq.POLLIN)
remote_sock = None
while not self._should_close:
try:
if self._connected and time.time() - self._last_heartbeat_rsp_time > self._heartbeat_timeout:
self._connected = False
if self.on_disconnected: self._async_call(self.on_disconnected)
if remote_sock and time.time() - heartbeat_time > self._heartbeat_interval:
self._send_hearbeat()
heartbeat_time = time.time()
socks = dict(poller.poll(500))
if self._pull_sock in socks and socks[self._pull_sock] == zmq.POLLIN:
cmd = self._pull_sock.recv()
if cmd == b"CONNECT":
# print time.ctime(), "CONNECT " + self._addr
if remote_sock:
poller.unregister(remote_sock)
remote_sock.close()
remote_sock = None
remote_sock = self._do_connect()
if remote_sock:
poller.register(remote_sock, zmq.POLLIN)
elif cmd.startswith(b"SEND:") and remote_sock:
# print time.ctime(), "SEND " + cmd[5:]
remote_sock.send(cmd[5:])
if remote_sock and remote_sock in socks and socks[remote_sock] == zmq.POLLIN:
data = remote_sock.recv()
if data:
# if not data.find("heartbeat"):
# print time.ctime(), "RECV", data
self._on_data_arrived(data)
except zmq.error.Again as e:
# print "RECV timeout: ", e
pass
except Exception as e:
print("_recv_run:", e)
def _callback_run(self):
while not self._should_close:
try:
r = self._callback_queue.get(timeout=1)
if r:
r()
except qEmpty as e:
pass
except TypeError as e:
if str(e) == "'NoneType' object is not callable":
pass
else:
print("_callback_run {}".format(r), type(e), e)
except Exception as e:
print("_callback_run {}".format(r), type(e), e)
def _async_call(self, func):
self._callback_queue.put(func)
def _send_request(self, json):
try:
self._send_lock.acquire()
self._push_sock.send(b"SEND:" + json)
finally:
self._send_lock.release()
def connect(self, addr):
self._addr = addr
self._push_sock.send_string('CONNECT', encoding='utf-8')
def _do_connect(self):
client_id = str(random.randint(1000000, 100000000))
socket = self._ctx.socket(zmq.DEALER)
identity = (client_id) + '$' + str(random.randint(1000000, 1000000000))
identity = identity.encode('utf-8')
socket.setsockopt(zmq.IDENTITY, identity)
socket.setsockopt(zmq.RCVTIMEO, 500)
socket.setsockopt(zmq.SNDTIMEO, 500)
socket.setsockopt(zmq.LINGER, 0)
socket.connect(self._addr)
return socket
def close(self):
self._should_close = True
self._callback_thread.join()
self._recv_thread.join()
def _on_data_arrived(self, str):
try:
msg = self._unpack(str)
# print "RECV", msg
if not msg:
print("wrong message format")
return
if 'method' in msg and msg['method'] == '.sys.heartbeat':
self._last_heartbeat_rsp_time = time.time()
if not self._connected:
self._connected = True
if self.on_connected:
self._async_call(self.on_connected)
# Let user has a chance to check message in .sys.heartbeat
if 'result' in msg and self.on_rpc_callback:
self._async_call(lambda: self.on_rpc_callback(msg['method'], msg['result']))
elif 'id' in msg and msg['id']:
# Call result
id = int(msg['id'])
if self._waiter_lock.acquire():
if id in self._waiter_map:
q = self._waiter_map[id]
if q: q.put(msg)
self._waiter_lock.release()
else:
# Notification message
if 'method' in msg and 'result' in msg and self.on_rpc_callback:
self._async_call(lambda: self.on_rpc_callback(msg['method'], msg['result']))
except Exception as e:
print("_on_data_arrived:", e)
pass
def _send_hearbeat(self):
msg = {'jsonrpc' : '2.0',
'method' : '.sys.heartbeat',
'params' : { 'time': time.time()},
'id' : str(self.next_callid())}
json_str = self._pack(msg)
self._send_request(json_str)
def _alloc_wait_queue(self):
self._waiter_lock.acquire()
if self._call_wait_queue:
q = self._call_wait_queue
self._call_wait_queue = None
else:
q = queue.Queue()
self._waiter_lock.release()
return q
def _free_wait_queue(self, q):
self._waiter_lock.acquire()
if not self._call_wait_queue:
self._call_wait_queue = q
else:
del q
self._waiter_lock.release()
def call(self, method, params, timeout=6):
# print "call", method, params, timeout
callid = self.next_callid()
if timeout:
q = self._alloc_wait_queue()
self._waiter_lock.acquire()
self._waiter_map[callid] = q
self._waiter_lock.release()
msg = {'jsonrpc' : '2.0',
'method' : method,
'params' : params,
'id' : str(callid) }
# print "SEND", msg
json_str = self._pack(msg)
self._send_request(json_str)
if timeout:
ret = {}
try:
r = q.get(timeout=timeout)
q.task_done()
except qEmpty:
r = None
self._waiter_lock.acquire()
self._waiter_map[callid] = None
self._waiter_lock.release()
self._free_wait_queue(q)
if r:
if 'result' in r:
ret['result'] = r['result']
if 'error' in r:
ret['error'] = r['error']
return ret if ret else {'error': {'error': -1, 'message': "timeout"}}
else:
return {'result': True}
|
automating_all_users_csv.py | """
'Automated ALL' CSV Test Program v2.0, Copyright 2017 Sam Suri
CSV Test program retrieves live data but does not update ANYTHING in VAN. It shows the user what would happen via
use of a CSV file. Program should be run prior to running full program.
"""
import hmac, hashlib, time, json, requests
import threading
import urllib.request
from queue import Queue
from pandas import DataFrame
from json import dumps, loads
from xmljson import yahoo as yh
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import fromstring
#BSD Call and Declarations:
api_secret = '' # API secret provided by BSD for user Sam Suri
api_ts = int(time.time()) # API call uses HMAC authentication that incorporates time
api_id = '' # API ID provided by BSD for user Sam Suri
api_baseCall = '/page/api/signup/get_signups_by_form_id' # API Call to get list of sign ups based on form ID
signup_form_id = str(input('Please enter the signup form ID: ')) # prompts the user for input of form ID
# Creates parameters for API call, incorporates user ID, time created, and form ID
api_param = 'api_ver=2&api_id=' + api_id + '&api_ts=' + (str(api_ts)) + '&signup_form_id=' + str(signup_form_id)
# Creates string to pass into with HMAC authentication
signing_string = api_id + '\n' + str(api_ts) + '\n' + api_baseCall + '\n' + api_param
# Creates HMAC authentication, uses API secret, 'signing_string'
api_mac = hmac.new(api_secret.encode(), signing_string.encode(), hashlib.sha1).hexdigest()
# Creates full address of API call, inserts API Id, time created, HMAC authentication code, and form ID
api_url = 'http://battletx.bsd.net/page/api/signup/get_signups_by_form_id?api_ver=2&api_id=' + api_id + '&api_ts=' + \
str(api_ts) + '&api_mac=' + api_mac + '&signup_form_id=' + str(signup_form_id)
#Reformating BSD XML:
api_xml_data = urllib.request.urlopen(api_url).read() # Uses urllib library to read XML data from BSD API URL
doc = dumps(yh.data(fromstring(api_xml_data))) # Parses XML data using xmljson library, parses using yahoo standard
loaded_doc = json.loads(doc) # Deserializes data
name_of_list_in_use = 'cmi_list' # will be used in title of CSV file
# Function iterates over dictionary and checks keys, if keys match strings, count is altered
def indiv_dict_length(tuple):
count = 0 # declares temporary count variable, returns it at end of function
for k, v in tuple:
if v != {}:
if k == 'firstname':
count += 1
if k == 'lastname':
count += 1
if k == 'email':
count += 1
if k == 'zip':
count += 1
if k == 'phone':
count += 1
return count
# Function creates initial data frame using PANDAS library and creates columns
def create_data_frame():
columns = ['First Name', 'Last Name', 'Phone Number', 'Zip Code', 'Email']
df = DataFrame(columns=columns)
return df # returns data frame to a variable
# Function appends to existing dataframe
def append_csv_row(dictionary): # looks for keys and inserts values into data frame
with df_append_lock:
df.loc[len(df)] = [dictionary['firstName'],
dictionary['lastName'],
dictionary['phones'][0]['phoneNumber'],
dictionary['addresses'][0]['zipOrPostalCode'],
dictionary['emails'][0]['email']
]
# Function prints data frame to csv file whose title dynamically includes current date
def print_data_frame_to_csv():
csv_name = 'All Contacts Affected on ' + str(time.strftime('%d-%m-%Y') + ' for ' + name_of_list_in_use) + '.csv'
df.to_csv(csv_name, index = False) # index is set to false as programs like Excel make this redundant
# Function finds or creates individual in VAN - My Campaign
def update_indiv(temp_dict, signup_date):
if (signup_date >= start_date) & (signup_date <= end_date):
append_csv_row(temp_dict) # appends information on user to CSV file
return
#Queue and threading variables and declarations
q = Queue(maxsize = 2000) # declares Queue of maxsize 2000, max in Queue is realistically around 1000
number_of_threads = 4 # threads are limited to 4 due to processing constraints of current computer, could go up to 10
df_append_lock = threading.Lock() # lock ensures that all records are appended to DataFrame
# Function passes in each object in queue, each object is a dictionary
def execute_queue(q):
while True:
i = q.get() # assigns passed in dictionary to i
temp_dict = {}
signup_date_check = 0
signup_date = ''
# finds out how many fields each person has, and assigns them to variable, allows program to know when to move on to new dictionary
temp_dict_length = indiv_dict_length(i.items())
if temp_dict_length >= 5: # ensures that each person at least three fields, any less and VAN can't match user
for k3, v3 in i.items(): # breaks dictionary into tuple
if v3 != {}: # makes sure that only answered fields are included
if k3 == 'firstname':
k3 = 'firstName'
temp_dict[k3] = v3 # if key matches, appends key, value pair to temp_dict
if k3 == 'create_dt': # finds date of when signup occurred
signup_date = v3[0:10] # strips out time
signup_date_check = 1 # boolean value is set to 1, program cannot run without info
if k3 == 'lastname':
k3 = 'lastName'
temp_dict[k3] = v3 # if key matches, appends key, value pair to temp_dict
if k3 == 'email':
k3 = 'emails'
v3 = [{'email': v3}] # reassigns key to match VAN JSON
temp_dict[k3] = v3 # if key matches, appends key, value pair to temp_dict
if k3 == 'zip':
k3 = 'addresses'
v3 = [{'zipOrPostalCode': v3}] # reassigns key to match VAN JSON
temp_dict[k3] = v3 # if key matches, appends key, value pair to temp_dict
if k3 == 'phone':
k3 = 'phones'
if v3[0] == '1': # formats phone number to match VAN style, checks if country code is present
v3 = v3[0] + '-' + v3[1:4] + '-' + v3[4:7] + '-' + v3[7:]
else:
v3 = '1-' + v3[0:3] + '-' + v3[3:6] + '-' + v3[6:]
v3 = [{'phoneNumber': v3}] # reassigns key to match VAN JSON
temp_dict[k3] = v3
# makes sure that all filled out fields have been added to temp_dict
if (len(temp_dict) == temp_dict_length) & (signup_date_check == 1):
# allows single dictionary to proceed so that API call can be made
update_indiv(temp_dict, signup_date)
break
q.task_done()
# iterates over number of threads declared
for i in range(number_of_threads):
worker = threading.Thread(target = execute_queue, args=(q,)) # executes function in Queue, passes in dictionary
worker.daemon = True
worker.start()
# Creates data frame
df = create_data_frame()
# start_date and end_date variables are created from user input
print('\nSurvey responses will only be updated if signups fall within a specific time period')
start_date = input('Please enter the start date (YYYY-MM-DD): ')
end_date = input('Please enter the end date (YYYY-MM-DD): ')
# Waits for input so that the function does not run completely just by being clicked
input("The program will now run and match all individuals \nin My Campaign, press 'enter' to proceed: ")
for k, v in loaded_doc.items():
for k1, v1 in v.items():
for k2, v2 in v1.items():
for i in v2: # When lists of dictionaries is reached, outputs each one to Queue
q.put(i)
# Tests to see if Queue is finished
q.join()
# If Queue is empty, completed data frame is printed to CSV file
print_data_frame_to_csv()
input("\nThe program has completed, press 'enter' to quit: ") |
application.py | import os
import time
import multiprocessing as mp
import logging
import configparser as configparser
import json
import coloredlogs
from beaker.cache import CacheManager
from beaker.util import parse_cache_config_options
from lockfile import LockFile
from augur import logger
import argparse
def updater_process(name, delay):
logger.info('Spawned {} updater process with PID {}'.format(name, os.getpid()))
app = Application()
datasource = getattr(app, name)()
try:
while True:
logger.info('Updating {}...'.format(name))
datasource.update()
time.sleep(delay)
except KeyboardInterrupt:
os._exit(0)
except:
raise
class Application(object):
"""Initalizes all classes form Augur using a config file or environment variables"""
def __init__(self, config_file='augur.config.json', no_config_file=0, description='Augur application'):
# Command line arguments
# TODO: make this useful
self.arg_parser = argparse.ArgumentParser(description=description)
self.arg_parser.parse_known_args()
# Open the config file
self.__already_exported = {}
self.__default_config = { 'Plugins': [] }
self.__using_config_file = True
self.__config_bad = False
self.__config_file_path = os.path.abspath(os.getenv('AUGUR_CONFIG_FILE', config_file))
self.__config_location = os.path.dirname(self.__config_file_path)
self.__export_env = os.getenv('AUGUR_ENV_EXPORT', '0') == '1'
if os.getenv('AUGUR_ENV_ONLY', '0') != '1' and no_config_file == 0:
try:
self.__config_file = open(self.__config_file_path, 'r+')
except:
logger.info('Couldn\'t open {}, attempting to create. If you have a augur.cfg, you can convert it to a json file using "make to-json"'.format(config_file))
if not os.path.exists(self.__config_location):
os.makedirs(self.__config_location)
self.__config_file = open(self.__config_file_path, 'w+')
self.__config_bad = True
# Options to export the loaded configuration as environment variables for Docker
if self.__export_env:
export_filename = os.getenv('AUGUR_ENV_EXPORT_FILE', 'augur.cfg.sh')
self.__export_file = open(export_filename, 'w+')
logger.info('Exporting {} to environment variable export statements in {}'.format(config_file, export_filename))
self.__export_file.write('#!/bin/bash\n')
# Load the config file
try:
config_text = self.__config_file.read()
config_text = config_text.replace('$(AUGUR)', self.__config_location)
self.__config = json.loads(config_text)
except json.decoder.JSONDecodeError as e:
if not self.__config_bad:
self.__using_config_file = False
logger.error('%s could not be parsed, using defaults. Fix that file, or delete it and run this again to regenerate it. Error: %s', self.__config_file_path, str(e))
self.__config = self.__default_config
else:
self.__using_config_file = False
self.__config = self.__default_config
# List of data sources that can do periodic updates
self.__updatable = []
self.__processes = []
# Create cache
cache_config = self.read_config('Cache', 'config', None, {
'cache.type': 'file',
'cache.data_dir': 'runtime/cache/',
'cache.lock_dir': 'runtime/cache/'
})
if not os.path.exists(cache_config['cache.data_dir']):
os.makedirs(cache_config['cache.data_dir'])
if not os.path.exists(cache_config['cache.lock_dir']):
os.makedirs(cache_config['cache.lock_dir'])
cache_parsed = parse_cache_config_options(cache_config)
self.cache = CacheManager(**cache_parsed)
# Initalize all objects to None
self.__ghtorrent = None
self.__ghtorrentplus = None
self.__githubapi = None
self.__git = None
self.__librariesio = None
self.__downloads = None
self.__publicwww = None
self.__localCSV = None
def __updater(self, updates=None):
if updates is None:
updates = self.__updatable
for update in updates:
if not 'started' in update:
up = mp.Process(target=updater_process, args=(update['name'], update['delay']), daemon=True)
up.start()
self.__processes.append(up)
update['started'] = True
def init_all(self):
self.ghtorrent()
self.ghtorrentplus()
self.githubapi()
self.git()
self.librariesio()
self.downloads()
self.publicwww()
self.localcsv()
def read_config(self, section, name, environment_variable=None, default=None):
value = None
if environment_variable is not None:
value = os.getenv(environment_variable)
if value is None:
try:
value = self.__config[section][name]
except Exception as e:
value = default
if not section in self.__config:
self.__config[section] = {}
if self.__using_config_file:
self.__config_bad = True
self.__config[section][name] = default
if (environment_variable is not None
and value is not None
and self.__export_env
and not hasattr(self.__already_exported, environment_variable)):
self.__export_file.write('export ' + environment_variable + '="' + value + '"\n')
self.__already_exported[environment_variable] = True
if os.getenv('AUGUR_DEBUG_LOG_ENV', '0') == '1':
logger.debug('{}:{} = {}'.format(section, name, value))
return value
def set_config(self, section, name, value):
if not section in self.__config:
self.__config[section] = {}
self.__config[section][name] = value
def finalize_config(self):
# Parse args with help
self.arg_parser.parse_known_args()
# Close files and save config
if self.__config_bad:
logger.info('Regenerating config with missing values...')
self.__config_file.close()
self.__config_file = open(self.__config_file_path, 'w')
config_text = json.dumps(self.__config, sort_keys=True, indent=4)
config_text = config_text.replace(self.__config_location, '$(AUGUR)')
self.__config_file.write(config_text)
self.__config_file.close()
if (self.__export_env):
self.__export_file.close()
def path_relative_to_config(self, path):
if not os.path.isabs(path):
return os.path.join(self.__config_location, path)
else:
return path
def update_all(self):
print(self.__updatable)
for updatable in self.__updatable:
logger.info('Updating {}...'.format(updatable['name']))
updatable['update']()
def schedule_updates(self):
# don't use this,
logger.debug('Scheduling updates...')
self.__updater()
def join_updates(self):
"""
Join to the update processes
"""
for process in self.__processes:
process.join()
def shutdown_updates(self):
for process in self.__processes:
process.terminate()
def ghtorrent(self):
from augur.ghtorrent import GHTorrent
if self.__ghtorrent is None:
logger.debug('Initializing GHTorrent')
self.__ghtorrent = GHTorrent(
user=self.read_config('Database', 'user', 'AUGUR_DB_USER', 'root'),
password=self.read_config('Database', 'pass', 'AUGUR_DB_PASS', 'password'),
host=self.read_config('Database', 'host', 'AUGUR_DB_HOST', '127.0.0.1'),
port=self.read_config('Database', 'port', 'AUGUR_DB_PORT', '3306'),
dbname=self.read_config('Database', 'name', 'AUGUR_DB_NAME', 'msr14')
)
return self.__ghtorrent
def ghtorrentplus(self):
from augur.ghtorrentplus import GHTorrentPlus
if self.__ghtorrentplus is None:
logger.debug('Initializing GHTorrentPlus')
self.__ghtorrentplus = GHTorrentPlus(
user=self.read_config('GHTorrentPlus', 'user', 'AUGUR_GHTORRENT_PLUS_USER', 'root'),
password=self.read_config('GHTorrentPlus', 'pass', 'AUGUR_GHTORRENT_PLUS_PASS', 'password'),
host=self.read_config('GHTorrentPlus', 'host', 'AUGUR_GHTORRENT_PLUS_HOST', '127.0.0.1'),
port=self.read_config('GHTorrentPlus', 'port', 'AUGUR_GHTORRENT_PLUS_PORT', '3306'),
dbname=self.read_config('GHTorrentPlus', 'name', 'AUGUR_GHTORRENT_PLUS_NAME', 'ghtorrentplus')
, ghtorrent=self.ghtorrent())
return self.__ghtorrentplus
def git(self, update=False):
from augur.git import Git
storage = self.path_relative_to_config(
self.read_config('Git', 'storage', 'AUGUR_GIT_STORAGE', 'runtime/git_repos/')
)
repolist = self.read_config('Git', 'repositories', None, [])
if self.__git is None:
logger.debug('Initializing Git')
self.__git = Git(
list_of_repositories=repolist,
storage_folder=storage,
csv=self.localcsv(),
cache=self.cache
)
self.__updatable.append({
'name': 'git',
'delay': int(self.read_config('Git', 'refresh', 'AUGUR_GIT_REFRESH', '3600')),
'update': self.__git.update
})
if update:
self.__git.update()
return self.__git
def githubapi(self):
from augur.githubapi import GitHubAPI
if self.__githubapi is None:
logger.debug('Initializing GitHub API')
api_key=self.read_config('GitHub', 'apikey', 'AUGUR_GITHUB_API_KEY', 'None')
self.__githubapi = GitHubAPI(api_key=api_key)
return self.__githubapi
def librariesio(self):
from augur.librariesio import LibrariesIO
if self.__librariesio is None:
logger.debug('Initializing LibrariesIO')
self.__librariesio = LibrariesIO(
api_key=self.read_config('LibrariesIO', 'apikey', 'AUGUR_LIBRARIESIO_API_KEY', 'None'),
githubapi=self.githubapi()
)
return self.__librariesio
def downloads(self):
from augur.downloads import Downloads
if self.__downloads is None:
logger.debug('Initializing Downloads')
self.__downloads = Downloads(self.githubapi())
return self.__downloads
def publicwww(self):
from augur.publicwww import PublicWWW
if self.__publicwww is None:
logger.debug('Initializing PublicWWW')
self.__publicwww = PublicWWW(api_key=self.read_config('PublicWWW', 'apikey', 'AUGUR_PUBLIC_WWW_API_KEY', 'None'))
return self.__publicwww
def localcsv(self):
from augur.localcsv import LocalCSV
if self.__localCSV is None:
logger.debug('Initializing LocalCSV')
self.__localCSV = LocalCSV()
return self.__localCSV
|
zap.py | # ZAP - Zurich Atmosphere Purge
#
# Copyright (c) 2016 Kurt Soto
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import absolute_import, division, print_function
import astropy.units as u
import logging
import numpy as np
import os
import sys
from astropy.io import fits
from astropy.wcs import WCS
from functools import wraps
from multiprocessing import cpu_count, Manager, Process
from scipy import ndimage as ndi
from scipy.stats import sigmaclip
from time import time
from .version import __version__
# Limits of the segments in Angstroms
SKYSEG = [0, 5400, 5850, 6440, 6750, 7200, 7700, 8265, 8602, 8731, 9275, 10000]
# Number of available CPUs
NCPU = cpu_count()
PY2 = sys.version_info[0] == 2
if not PY2:
text_type = str
string_types = (str,)
else:
text_type = unicode
string_types = (str, unicode)
logging.basicConfig(format='[%(levelname)s] %(message)s', level=logging.INFO,
stream=sys.stdout)
logger = logging.getLogger(__name__)
###############################################################################
################################### Top Level Functions #######################
###############################################################################
def process(musecubefits, outcubefits='DATACUBE_ZAP.fits', clean=True,
zlevel='median', cftype='weight', cfwidthSVD=100, cfwidthSP=50,
pevals=[], nevals=[], optimizeType='normal', extSVD=None,
skycubefits=None, svdoutputfits='ZAP_SVD.fits', mask=None,
interactive=False, ncpu=None):
""" Performs the entire ZAP sky subtraction algorithm.
Work on an input FITS file and optionally writes the product to an output
FITS file.
Parameters
----------
musecubefits : str
Input FITS file, containing a cube with data in the first extension.
outcubefits : str
Output FITS file, based on the input one to propagate all header
information and other extensions. Default to `DATACUBE_ZAP.fits`.
clean : bool
If True (default value), the NaN values are cleaned. Spaxels with more
then 25% of NaN values are removed, the others are replaced with an
interpolation from the neighbors. The NaN values are reinserted into
the final datacube. If set to False, any spaxel with a NaN value will
be ignored.
zlevel : str
Method for the zeroth order sky removal: `none`, `sigclip` or `median`
(default).
cftype : str
Method for the continuum filter: `median` or `weight` (default). For
the `weight` method, a zeroth order sky is required (see `zlevel`).
cfwidthSVD : int or float
Window size for the continuum filter, for the SVD computation.
Default to 100.
cfwidthSP : int or float
Window size for the continuum filter used to remove the continuum
features for calculating the eigenvalues per spectrum. Smaller values
better trace the sources. An optimal range of is typically
20 - 50 pixels. Default to 50.
optimizeType : str
Optimization method to compute the number of eigenspectra used for each
segment: `none`, `normal` (default), `enhanced`. If `none`, the number
of eigenspectra must be specified with `nevals` or `pevals`, otherwise
`normal` is used.
pevals : list
Allow to specify the percentage of eigenspectra used for each segment.
If this is used, the pevals is ignored. Provide either a single value
that will be used for all of the segments, or a list of 11 values that
will be used for each of the segments.
nevals : list
Allow to specify the number of eigenspectra used for each segment.
If this is used, the pevals is ignored. Provide either a single value
that will be used for all of the segments, or a list of 11 values that
will be used for each of the segments.
extSVD : str
Path of an input FITS file containing a SVD computed by the
:func:`~zap.SVDoutput` function. Otherwise the SVD is computed.
skycubefits : str
Path for the optional output of the sky that is subtracted from the
cube. This is simply the input cube minus the output cube.
svdoutputfits : str
Output FITS file containing the eigenbasis. Default to `ZAP_SVD.fits`.
mask : str
A 2D fits image to exclude regions that may contaminate the zlevel or
eigenspectra. This image should be constructed from the datacube itself
to match the dimensionality. Sky regions should be marked as 0, and
astronomical sources should be identified with an integer greater than
or equal to 1. Default to None.
interactive : bool
If True, a :class:`~zap.zclass` object containing all information on
the ZAP process is returned, and can be used to explore the
eigenspectra and recompute the output (with the
:meth:`~zap.zclass.reprocess` method). In this case, the output files
are not saved (`outcubefits` and `skycubefits` are ignored). Default to
False.
"""
if not isinstance(musecubefits, string_types):
raise TypeError('The process method only accepts a single datacube '
'filename.')
# make sure it has the right extension
outcubefits = outcubefits.split('.fits')[0] + '.fits'
# check if outcubefits/skycubefits exists before beginning
check_file_exists(outcubefits)
check_file_exists(skycubefits)
if extSVD is None:
check_file_exists(svdoutputfits)
if ncpu is not None:
global NCPU
NCPU = ncpu
# Check for consistency between weighted median and zlevel keywords
if cftype == 'weight' and zlevel == 'none':
raise ValueError('Weighted median requires a zlevel calculation')
if optimizeType not in ('none', 'normal', 'enhanced'):
raise ValueError('Invalid value for optimizeType')
if extSVD is not None and mask is not None:
raise ValueError('extSVD and mask parameters are incompatible: if mask'
' must be used, then the SVD has to be recomputed')
if mask is not None or (extSVD is None and cfwidthSVD != cfwidthSP):
# Compute the SVD separately, only if a mask is given, or if the
# cfwidth values differ and extSVD is not given. Otherwise, the SVD
# will be computed in the _run method, which allows to avoid running
# twice the zlevel and continuumfilter steps.
SVDoutput(musecubefits, svdoutputfits=svdoutputfits,
clean=clean, zlevel=zlevel, cftype=cftype,
cfwidth=cfwidthSVD, mask=mask)
extSVD = svdoutputfits
zobj = zclass(musecubefits)
zobj._run(clean=clean, zlevel=zlevel, cfwidth=cfwidthSP, cftype=cftype,
pevals=pevals, nevals=nevals, optimizeType=optimizeType,
extSVD=extSVD)
if interactive:
# Return the zobj object without saving files
return zobj
if zobj.run_zlevel != 'extSVD' and svdoutputfits is not None:
# Save SVD only if it was computed in _run, i.e. if an external SVD
# was not given
zobj.writeSVD(svdoutputfits=svdoutputfits)
if skycubefits is not None:
zobj.writeskycube(skycubefits=skycubefits)
zobj.mergefits(outcubefits)
def SVDoutput(musecubefits, svdoutputfits='ZAP_SVD.fits', clean=True,
zlevel='median', cftype='weight', cfwidth=100, mask=None,
ncpu=None):
""" Performs the SVD decomposition of a datacube.
This allows to use the SVD for a different datacube.
Parameters
----------
musecubefits : str
Input FITS file, containing a cube with data in the first extension.
svdoutputfits : str
Output FITS file. Default to ZAP_SVD.fits
clean : bool
If True (default value), the NaN values are cleaned. Spaxels with more
then 25% of NaN values are removed, the others are replaced with an
interpolation from the neighbors.
zlevel : str
Method for the zeroth order sky removal: `none`, `sigclip` or `median`
(default).
cftype : str
Method for the continuum filter: `median` or `weight` (default). For
the `weight` method, a zeroth order sky is required (see `zlevel`).
cfwidth : int or float
Window size for the continuum filter, default to 300.
mask : str
Path of a FITS file containing a mask (1 for objects, 0 for sky).
"""
logger.info('Running ZAP %s !', __version__)
logger.info('Processing %s to compute the SVD', musecubefits)
check_file_exists(svdoutputfits)
if ncpu is not None:
global NCPU
NCPU = ncpu
# Check for consistency between weighted median and zlevel keywords
if cftype == 'weight' and zlevel == 'none':
raise ValueError('Weighted median requires a zlevel calculation')
zobj = zclass(musecubefits)
# clean up the nan values
if clean:
zobj._nanclean()
# if mask is supplied, apply it
if mask is not None:
zobj._applymask(mask)
# Extract the spectra that we will be working with
zobj._extract()
# remove the median along the spectral axis
if zlevel.lower() != 'none':
zobj._zlevel(calctype=zlevel)
# remove the continuum level - this is multiprocessed to speed it up
zobj._continuumfilter(cftype=cftype, cfwidth=cfwidth)
# do the multiprocessed SVD calculation
zobj._msvd()
# write to file
zobj.writeSVD(svdoutputfits=svdoutputfits)
def contsubfits(musecubefits, contsubfn='CONTSUB_CUBE.fits', cfwidth=100):
""" A multiprocessed implementation of the continuum removal.
This process distributes the data to many processes that then reassemble
the data. Uses two filters, a small scale (less than the line spread
function) uniform filter, and a large scale median filter to capture the
structure of a variety of continuum shapes.
"""
check_file_exists(contsubfn)
hdu = fits.open(musecubefits)
data = hdu[1].data
stack = data.reshape(data.shape[0], (data.shape[1] * data.shape[2]))
contarray = _continuumfilter(stack, 'median', cfwidth=cfwidth)
# remove continuum features
stack -= contarray
hdu[1].data = stack.reshape(data.shape[0], data.shape[1], data.shape[2])
hdu.writeto(contsubfn)
hdu.close()
def nancleanfits(musecubefits, outfn='NANCLEAN_CUBE.fits', rejectratio=0.25,
boxsz=1):
"""
Detects NaN values in cube and removes them by replacing them with an
interpolation of the nearest neighbors in the data cube. The positions in
the cube are retained in nancube for later remasking.
Parameters
----------
musecubefits : str
Input FITS file, containing a cube with data in the first extension.
outfn : str
Output FITS file. Default to NANCLEAN_CUBE.fits
rejectratio : float
Defines a cutoff for the ratio of NAN to total pixels in a spaxel
before the spaxel is avoided completely. Default to 0.25
boxsz : int
Defines the number of pixels around the offending NaN pixel.
Default to 1, which looks for the 26 nearest neighbors which
is a 3x3x3 cube.
"""
check_file_exists(outfn)
hdu = fits.open(musecubefits)
cleancube = _nanclean(hdu[1].data, rejectratio=rejectratio, boxsz=boxsz)
hdu[1].data = cleancube[0]
hdu.writeto(outfn)
hdu.close()
def check_file_exists(filename):
if filename is not None and os.path.exists(filename):
raise IOError('Output file "{0}" exists'.format(filename))
def timeit(func):
@wraps(func)
def wrapped(*args, **kwargs):
t0 = time()
res = func(*args, **kwargs)
logger.info('%s - Time: %.2f sec.', func.__name__, time() - t0)
return res
return wrapped
###############################################################################
##################################### Process Steps ###########################
###############################################################################
class zclass(object):
""" Main class to run each of the steps of ZAP.
Attributes
----------
cleancube : numpy.ndarray
The final datacube after removing all of the residual features.
contarray : numpy.ndarray
A 2D array containing the subtracted continuum per spaxel.
cube : numpy.ndarray
The original cube with the zlevel subtraction performed per spaxel.
especeval : list of (eigenspectra, eval)
A list containing the full set of eigenspectra and eigenvalues
generated by the SVD calculation that is used toy reconstruct the
entire datacube.
laxis : numpy.ndarray
A 1d array containing the wavelength solution generated from the header
parameters.
wcs : astropy.wcs.WCS
WCS object with the wavelength solution.
lranges : list
A list of the wavelength bin limits used in segmenting the sepctrum
for SVD.
nancube : numpy.ndarray
A 3d boolean datacube containing True in voxels where a NaN value was
replaced with an interpolation.
nevals : numpy.ndarray
A 1d array containing the number of eigenvalues used per segment to
reconstruct the residuals.
normstack : numpy.ndarray
A normalized version of the datacube decunstructed into a 2d array.
varlist : numpy.ndarray
An array for each segment with the variance curve, calculated for the
optimize method.
pranges : numpy.ndarray
The pixel indices of the bounding regions for each spectral segment.
recon : numpy.ndarray
A 2d array containing the reconstructed emission line residuals.
run_clean : bool
Boolean that indicates that the NaN cleaning method was used.
run_zlevel : bool
Boolean indicating that the zero level correction was used.
stack : numpy.ndarray
The datacube deconstructed into a 2d array for use in the the SVD.
subespeceval : list of (eigenspectra, eval)
The subset of eigenvalues and eigenspectra used to reconstruct the sky
residuals.
variancearray : numpy.ndarray
A list of length nsegments containing variances calculated per spaxel
used for normalization
y,x : numpy.ndarray
The position in the cube of the spaxels that are in the 2d
deconstructed stack
zlsky : numpy.ndarray
A 1d array containing the result of the zero level subtraction
"""
def __init__(self, musecubefits):
""" Initialization of the zclass.
Pulls the datacube into the class and trims it based on the known
optimal spectral range of MUSE.
"""
hdu = fits.open(musecubefits)
self.cube = hdu[1].data
self.header = hdu[1].header
self.musecubefits = musecubefits
hdu.close()
# Workaround for floating points errors in wcs computation: if cunit is
# specified, wcslib will convert in meters instead of angstroms, so we
# remove cunit before creating the wcs object
header = self.header.copy()
unit = u.Unit(header.pop('CUNIT3'))
self.wcs = WCS(header).sub([3])
# Create Lambda axis
wlaxis = np.arange(self.cube.shape[0])
self.laxis = self.wcs.all_pix2world(wlaxis, 0)[0]
if unit != u.angstrom:
# Make sure lambda is in angstroms
self.laxis = (self.laxis * unit).to(u.angstrom).value
# NaN Cleaning
self.run_clean = False
self.nancube = None
self._boxsz = 1
self._rejectratio = 0.25
# Mask file
self.maskfile = None
# zlevel parameters
self.run_zlevel = False
self.zlsky = np.zeros_like(self.laxis)
# Extraction results
self.stack = None
self.y = None
self.x = None
# Normalization Maps
self.contarray = None
self.variancearray = None
self.normstack = None
# identify the spectral range of the dataset
laxmin = min(self.laxis)
laxmax = max(self.laxis)
# List of segmentation limits in the optical
skyseg = np.array(SKYSEG)
skyseg = skyseg[(skyseg > laxmin) & (skyseg < laxmax)]
# segment limit in angstroms
self.lranges = (np.vstack([np.append(laxmin - 10, skyseg),
np.append(skyseg, laxmax + 10)])).T
# segment limit in pixels
laxis = self.laxis
lranges = self.lranges
pranges = []
for i in range(len(lranges)):
paxis = wlaxis[(laxis > lranges[i, 0]) & (laxis <= lranges[i, 1])]
pranges.append((np.min(paxis), np.max(paxis) + 1))
self.pranges = np.array(pranges)
# eigenspace Subset
self.especeval = []
self.subespeceval = []
# Reconstruction of sky features
self.recon = None
self.cleancube = None
self.varlist = None # container for variance curves
@timeit
def _run(self, clean=True, zlevel='median', cftype='weight',
cfwidth=100, pevals=[], nevals=[], optimizeType='normal',
extSVD=None):
""" Perform all zclass to ZAP a datacube:
- NaN re/masking,
- deconstruction into "stacks",
- zerolevel subraction,
- continuum removal,
- normalization,
- singular value decomposition,
- eigenvector selection,
- residual reconstruction and subtraction,
- data cube reconstruction.
"""
logger.info('Running ZAP %s !', __version__)
self.optimizeType = optimizeType
# clean up the nan values
if clean:
self._nanclean()
# Extract the spectra that we will be working with
self._extract()
# remove the median along the spectral axis
if extSVD is None:
if zlevel.lower() != 'none':
self._zlevel(calctype=zlevel)
else:
self._externalzlevel(extSVD)
# remove the continuum level - this is multiprocessed to speed it up
self._continuumfilter(cfwidth=cfwidth, cftype=cftype)
# do the multiprocessed SVD calculation
if extSVD is None:
self._msvd()
else:
self._externalSVD(extSVD)
# choose some fraction of eigenspectra or some finite number of
# eigenspectra
if optimizeType != 'none' or (nevals == [] and pevals == []):
self.optimize()
self.chooseevals(nevals=self.nevals)
else:
self.chooseevals(pevals=pevals, nevals=nevals)
# reconstruct the sky residuals using the subset of eigenspace
self.reconstruct()
# stuff the new spectra back into the cube
self.remold()
# Clean up the nan value spaxels
def _nanclean(self):
"""
Detects NaN values in cube and removes them by replacing them with an
interpolation of the nearest neighbors in the data cube. The positions
in the cube are retained in nancube for later remasking.
"""
self.cube, self.nancube = _nanclean(
self.cube, rejectratio=self._rejectratio, boxsz=self._boxsz)
self.run_clean = True
@timeit
def _extract(self):
"""
Deconstruct the datacube into a 2d array, since spatial information is
not required, and the linear algebra routines require 2d arrays.
The operation rejects any spaxel with even a single NaN value, since
this would cause the linear algebra routines to crash.
Adds the x and y data of these positions into the zclass
"""
logger.debug('Extracting to 2D')
# make a map of spaxels with NaNs
badmap = (np.logical_not(np.isfinite(self.cube))).sum(axis=0)
# get positions of those with no NaNs
self.y, self.x = np.where(badmap == 0)
# extract those positions into a 2d array
self.stack = self.cube[:, self.y, self.x]
logger.debug('%d valid spaxels', len(self.x))
def _externalzlevel(self, extSVD):
"""Remove the zero level from the extSVD file."""
logger.info('Using external zlevel')
self.zlsky = fits.getdata(extSVD, 0)
self.stack -= self.zlsky[:, np.newaxis]
self.run_zlevel = 'extSVD'
@timeit
def _zlevel(self, calctype='median'):
"""
Removes a 'zero' level from each spectral plane. Spatial information is
not required, so it operates on the extracted stack.
Operates on stack, leaving it with this level removed and adds the data
'zlsky' to the class. zlsky is a spectrum of the zero levels.
This zero level is currently calculated with a median.
Experimental operations -
- exclude top quartile
- run in an iterative sigma clipped mode
"""
self.run_zlevel = calctype
if calctype != 'none':
logger.info('Subtracting Zero Level')
zlstack = self.stack
if calctype == 'median':
logger.info('Median zlevel calculation')
func = _imedian
elif calctype == 'sigclip':
logger.info('Iterative Sigma Clipping zlevel calculation')
func = _isigclip
self.zlsky = np.hstack(parallel_map(func, zlstack, NCPU, axis=0))
self.stack -= self.zlsky[:, np.newaxis]
else:
logger.info('Skipping zlevel subtraction')
def _continuumfilter(self, cfwidth=100, cftype='weight'):
""" A multiprocessed implementation of the continuum removal.
This process distributes the data to many processes that then
reassemble the data. Uses two filters, a small scale (less than the
line spread function) uniform filter, and a large scale median filter
to capture the structure of a variety of continuum shapes.
added to class
contarray - the removed continuua
normstack - "normalized" version of the stack with the continuua
removed
"""
logger.info('Applying Continuum Filter, cfwidth=%d', cfwidth)
if cftype not in ('weight', 'median', 'none'):
raise ValueError("cftype must be 'weight' or 'median', got {}"
.format(cftype))
self._cftype = cftype
self._cfwidth = cfwidth
if cftype == 'median':
weight = None
elif cftype == 'weight':
weight = np.abs(self.zlsky - (np.max(self.zlsky) + 1))
# remove continuum features
if cftype == 'none':
self.contarray = np.zeros_like(self.stack)
self.normstack = self.stack.copy()
else:
self.contarray = _continuumfilter(self.stack, cftype,
weight=weight, cfwidth=cfwidth)
self.normstack = self.stack - self.contarray
@timeit
def _msvd(self):
""" Multiprocessed singular value decomposition.
First the normstack is normalized per segment per spaxel by the
variance. Takes the normalized, spectral segments and distributes them
to the individual svd methods.
"""
logger.info('Calculating SVD')
# normalize the variance in the segments
nseg = len(self.pranges)
self.variancearray = var = np.zeros((nseg, self.stack.shape[1]))
for i in range(nseg):
pmin, pmax = self.pranges[i]
var[i, :] = np.var(self.normstack[pmin:pmax, :], axis=0)
self.normstack[pmin:pmax, :] /= var[i, :]
logger.debug('Beginning SVD on %d segments', nseg)
indices = [x[0] for x in self.pranges[1:]]
self.especeval = parallel_map(_isvd, self.normstack, indices, axis=0)
def chooseevals(self, nevals=[], pevals=[]):
""" Choose the number of eigenspectra/evals to use for reconstruction.
User supplies the number of eigen spectra to be used (neval) or the
percentage of the eigenspectra that were calculated (peval) from each
spectral segment to be used.
The user can either provide a single value to be used for all segments,
or provide an array that defines neval or peval per segment.
"""
nranges = len(self.especeval)
nevals = np.atleast_1d(nevals)
pevals = np.atleast_1d(pevals)
nespec = np.array([self.especeval[i][0].shape[1]
for i in range(nranges)])
# deal with no selection
if len(nevals) == 0 and len(pevals) == 0:
logger.info('Number of modes not selected')
nevals = np.array([1])
# deal with an input list
if len(nevals) > 1:
if len(nevals) != nranges:
nevals = np.array([nevals[0]])
logger.info('Chosen eigenspectra array does not correspond to '
'number of segments')
else:
logger.info('Choosing %s eigenspectra for segments', nevals)
if len(pevals) > 1:
if len(pevals) != nranges:
pevals = np.array([pevals[0]])
logger.info('Chosen eigenspectra array does not correspond to '
'number of segments')
else:
logger.info('Choosing %s%% of eigenspectra for segments',
pevals)
nevals = (pevals * nespec / 100.).round().astype(int)
# deal with single value entries
if len(pevals) == 1:
logger.info('Choosing %s%% of eigenspectra for all segments',
pevals)
nevals = (pevals * nespec / 100.).round().astype(int)
elif len(nevals) == 1:
logger.info('Choosing %s eigenspectra for all segments', nevals)
nevals = np.zeros(nranges, dtype=int) + nevals
# take subset of the eigenspectra and put them in a list
subespeceval = []
for i in range(nranges):
eigenspectra, evals = self.especeval[i]
tevals = (evals[0:nevals[i], :]).copy()
teigenspectra = (eigenspectra[:, 0:nevals[i]]).copy()
subespeceval.append((teigenspectra, tevals))
self.subespeceval = subespeceval
self.nevals = nevals
@timeit
def reconstruct(self):
"""Reconstruct the residuals from a given set of eigenspectra and
eigenvalues
"""
logger.info('Reconstructing Sky Residuals')
nseg = len(self.especeval)
rec = [(eig[:, :, np.newaxis] * ev[np.newaxis, :, :]).sum(axis=1)
for eig, ev in self.subespeceval]
# rescale to correct variance
for i in range(nseg):
rec[i] *= self.variancearray[i, :]
self.recon = np.concatenate(rec)
# stuff the stack back into a cube
def remold(self):
""" Subtracts the reconstructed residuals and places the cleaned
spectra into the duplicated datacube.
"""
logger.info('Applying correction and reshaping data product')
self.cleancube = self.cube.copy()
self.cleancube[:, self.y, self.x] = self.stack - self.recon
if self.run_clean:
self.cleancube[self.nancube] = np.nan
# redo the residual reconstruction with a different set of parameters
def reprocess(self, pevals=[], nevals=[]):
"""
A method that redoes the eigenvalue selection, reconstruction, and
remolding of the data.
"""
self.chooseevals(pevals=pevals, nevals=nevals)
self.reconstruct()
self.remold()
@timeit
def optimize(self):
""" Function to optimize the number of components used to characterize
the residuals.
This function calculates the variance per segment with an increasing
number of eigenspectra/eigenvalues. It then deterimines the point at
which the second derivative of this variance curve reaches zero. When
this occurs, the linear reduction in variance is attributable to the
removal of astronomical features rather than emission line residuals.
"""
logger.info('Optimizing')
normstack = self.stack - self.contarray
nseg = len(self.especeval)
self.nevals = np.zeros(nseg, dtype=int)
indices = [x[0] for x in self.pranges[1:]]
self.varlist = parallel_map(_ivarcurve, normstack, indices, axis=0,
especeval=self.especeval,
variancearray=self.variancearray)
if self.optimizeType == 'enhanced':
logger.info('Enhanced Optimization')
else:
logger.info('Normal Optimization')
for i in range(nseg):
# optimize
varlist = self.varlist[i]
deriv = varlist[1:] - varlist[:-1]
deriv2 = deriv[1:] - deriv[:-1]
noptpix = varlist.size
if self.optimizeType != 'enhanced':
# statistics on the derivatives
ind = int(.5 * (noptpix - 2))
mn1 = deriv[ind:].mean()
std1 = deriv[ind:].std() * 2
mn2 = deriv2[ind:].mean()
std2 = deriv2[ind:].std() * 2
# look for crossing points. When they get within 1 sigma of
# mean in settled region.
# pad by 1 for 1st deriv
cross1 = np.append([False], deriv >= (mn1 - std1))
# pad by 2 for 2nd
cross2 = np.append([False, False],
np.abs(deriv2) <= (mn2 + std2))
cross = np.logical_or(cross1, cross2)
else:
# statistics on the derivatives
ind = int(.75 * (noptpix - 2))
mn1 = deriv[ind:].mean()
std1 = deriv[ind:].std()
mn2 = deriv2[ind:].mean()
std2 = deriv2[ind:].std()
# pad by 1 for 1st deriv
cross = np.append([False], deriv >= (mn1 - std1))
self.nevals[i] = np.where(cross)[0][0]
# #########################################################################
# #################################### Extra Functions ####################
# #########################################################################
def make_contcube(self):
""" Remold the continuum array so it can be investigated.
Takes the continuum stack and returns it into a familiar cube form.
"""
contcube = self.cube.copy() * np.nan
contcube[:, self.y, self.x] = self.contarray
return contcube
def _externalSVD(self, extSVD):
logger.info('Calculating eigenvalues for input eigenspectra')
hdu = fits.open(extSVD)
nseg = len(self.pranges)
# normalize the variance in the segments
self.variancearray = np.zeros((nseg, self.stack.shape[1]))
for i in range(nseg):
pmin, pmax = self.pranges[i]
self.variancearray[i, :] = np.var(self.normstack[pmin:pmax, :],
axis=0)
self.normstack[pmin:pmax, :] /= self.variancearray[i, :]
especeval = []
for i in range(nseg):
eigenspectra = hdu[i + 1].data
ns = self.normstack[self.pranges[i][0]:self.pranges[i][1]]
evals = np.transpose(np.transpose(ns).dot(eigenspectra))
especeval.append([eigenspectra, evals])
self.especeval = especeval
hdu.close()
def _applymask(self, mask):
"""Apply a mask to the input data to provide a cleaner basis set.
mask is >1 for objects, 0 for sky so that people can use sextractor.
The file is read with ``astropy.io.fits.getdata`` which first tries to
read the primary extension, then the first extension is no data was
found before.
"""
logger.info('Applying Mask for SVD Calculation from %s', mask)
self.maskfile = mask
mask = fits.getdata(mask).astype(bool)
nmasked = np.count_nonzero(mask)
logger.info('Masking %d pixels (%d%%)', nmasked,
nmasked / np.prod(mask.shape) * 100)
self.cube[:, mask] = np.nan
###########################################################################
##################################### Output Functions ####################
###########################################################################
def writecube(self, outcubefits='DATACUBE_ZAP.fits'):
"""Write the processed datacube to an individual fits file."""
check_file_exists(outcubefits)
# fix up for writing
outhead = _newheader(self)
# create hdu and write
outhdu = fits.PrimaryHDU(data=self.cleancube, header=outhead)
outhdu.writeto(outcubefits)
logger.info('Cube file saved to %s', outcubefits)
def writeskycube(self, skycubefits='SKYCUBE_ZAP.fits'):
"""Write the processed datacube to an individual fits file."""
check_file_exists(skycubefits)
# fix up for writing
outcube = self.cube - self.cleancube
outhead = _newheader(self)
# create hdu and write
outhdu = fits.PrimaryHDU(data=outcube, header=outhead)
outhdu.writeto(skycubefits)
logger.info('Sky cube file saved to %s', skycubefits)
def mergefits(self, outcubefits):
"""Merge the ZAP cube into the full muse datacube and write."""
# make sure it has the right extension
outcubefits = outcubefits.split('.fits')[0] + '.fits'
check_file_exists(outcubefits)
hdu = fits.open(self.musecubefits)
hdu[1].header = _newheader(self)
hdu[1].data = self.cleancube
hdu.writeto(outcubefits)
hdu.close()
logger.info('Cube file saved to %s', outcubefits)
def writeSVD(self, svdoutputfits='ZAP_SVD.fits'):
"""Write the SVD to an individual fits file."""
check_file_exists(svdoutputfits)
header = fits.Header()
header['ZAPvers'] = (__version__, 'ZAP version')
header['ZAPzlvl'] = (self.run_zlevel, 'ZAP zero level correction')
header['ZAPclean'] = (self.run_clean,
'ZAP NaN cleaning performed for calculation')
header['ZAPcftyp'] = (self._cftype, 'ZAP continuum filter type')
header['ZAPcfwid'] = (self._cfwidth, 'ZAP continuum filter size')
header['ZAPmask'] = (self.maskfile, 'ZAP mask used to remove sources')
nseg = len(self.pranges)
header['ZAPnseg'] = (nseg, 'Number of segments used for ZAP SVD')
hdu = fits.HDUList([fits.PrimaryHDU(self.zlsky)])
for i in range(len(self.pranges)):
hdu.append(fits.ImageHDU(self.especeval[i][0]))
# write for later use
hdu.writeto(svdoutputfits)
logger.info('SVD file saved to %s', svdoutputfits)
def plotvarcurve(self, i=0, ax=None):
if len(self.varlist) == 0:
logger.warning('No varlist found. The optimize method must be '
'run first.')
return
# optimize
deriv = (np.roll(self.varlist[i], -1) - self.varlist[i])[:-1]
deriv2 = (np.roll(deriv, -1) - deriv)[:-1]
noptpix = self.varlist[i].size
if self.optimizeType == 'normal':
# statistics on the derivatives
mn1 = deriv[.5 * (noptpix - 2):].mean()
std1 = deriv[.5 * (noptpix - 2):].std() * 2
mn2 = deriv2[.5 * (noptpix - 2):].mean()
std2 = deriv2[.5 * (noptpix - 2):].std() * 2
else:
# statistics on the derivatives
mn1 = deriv[.75 * (noptpix - 2):].mean()
std1 = deriv[.75 * (noptpix - 2):].std()
mn2 = deriv2[.75 * (noptpix - 2):].mean()
std2 = deriv2[.75 * (noptpix - 2):].std()
if ax is None:
import matplotlib.pyplot as plt
fig, ax = plt.subplots(3, 1, figsize=[10, 15])
ax1, ax2, ax3 = ax
ax1.plot(self.varlist[i], linewidth=3)
ax1.plot([self.nevals[i], self.nevals[i]],
[min(self.varlist[i]), max(self.varlist[i])])
ax1.set_ylabel('Variance')
ax2.plot(np.arange(deriv.size), deriv)
ax2.plot([0, len(deriv)], [mn1, mn1], 'k')
ax2.plot([0, len(deriv)], [mn1 - std1, mn1 - std1], '0.5')
ax2.plot([self.nevals[i] - 1, self.nevals[i] - 1],
[min(deriv), max(deriv)])
ax2.set_ylabel('d/dn Var')
ax3.plot(np.arange(deriv2.size), np.abs(deriv2))
ax3.plot([0, len(deriv2)], [mn2, mn2], 'k')
ax3.plot([0, len(deriv2)], [mn2 + std2, mn2 + std2], '0.5')
ax3.plot([self.nevals[i] - 2, self.nevals[i] - 2],
[min(deriv2), max(deriv2)])
ax3.set_ylabel('(d^2/dn^2) Var')
# ax3.set_xlabel('Number of Components')
ax1.set_title('Segment {0}, {1} - {2} Angstroms'.format(
i, self.lranges[i][0], self.lranges[i][1]))
###############################################################################
##################################### Helper Functions ########################
###############################################################################
def worker(f, i, chunk, out_q, err_q, kwargs):
try:
result = f(i, chunk, **kwargs)
except Exception as e:
err_q.put(e)
return
# output the result and task ID to output queue
out_q.put((i, result))
def parallel_map(func, arr, indices, **kwargs):
logger.debug('Running function %s with indices: %s',
func.__name__, indices)
manager = Manager()
out_q = manager.Queue()
err_q = manager.Queue()
jobs = []
axis = kwargs.pop('axis', None)
chunks = np.array_split(arr, indices, axis=axis)
for i, chunk in enumerate(chunks):
p = Process(target=worker, args=(func, i, chunk, out_q, err_q, kwargs))
jobs.append(p)
p.start()
# gather the results
for proc in jobs:
proc.join()
if not err_q.empty():
# kill all on any exception from any one slave
raise err_q.get()
# Processes finish in arbitrary order. Process IDs double
# as index in the resultant array.
results = [None] * len(jobs)
while not out_q.empty():
idx, result = out_q.get()
results[idx] = result
return results
##### Continuum Filtering #####
@timeit
def _continuumfilter(stack, cftype, weight=None, cfwidth=300):
if cftype == 'median':
func = _icfmedian
weight = None
elif cftype == 'weight':
func = _icfweight
c = parallel_map(func, stack, NCPU, axis=1, weight=weight, cfwidth=cfwidth)
return np.concatenate(c, axis=1)
def _icfweight(i, stack, weight=None, cfwidth=None):
return np.array([wmedian(row, weight, cfwidth=cfwidth)
for row in stack.T]).T
def _icfmedian(i, stack, weight=None, cfwidth=None):
ufilt = 3 # set this to help with extreme over/under corrections
return ndi.median_filter(
ndi.uniform_filter(stack, (ufilt, 1)), (cfwidth, 1))
def rolling_window(a, window): # function for striding to help speed up
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def wmedian(spec, wt, cfwidth=100):
""" Performs a weighted median filtering of a 1d spectrum
Operates using a cumulative sum curve
Parameters
----------
spec : numpy.ndarray
Input 1d spectrum to be filtered
wt : numpy.ndarray
A spectrum of equal length as the input array to provide the weights.
cfwidth : int or float
Window size for the continuum filter, for the SVD computation.
Default to 100.
"""
# ignore the warning (feature not a bug)
old_settings = np.seterr(divide='ignore')
spec = np.pad(spec, (cfwidth, cfwidth), 'constant', constant_values=0)
wt = np.abs(wt)
wt = np.pad(wt, (cfwidth, cfwidth), 'constant',
constant_values=(np.min(wt) / 1000., np.min(wt) / 1000.))
# do some striding for speed
swin = rolling_window(spec, cfwidth) # create window container array
wwin = rolling_window(wt, cfwidth) # create window container array
# sort based on data
srt = np.argsort(swin, axis=-1)
ind = np.ogrid[0:swin.shape[0], 0:swin.shape[1]]
sdata = swin[ind[0], srt]
swt = wwin[ind[0], srt]
# calculate accumulated weights
awt = np.cumsum(swt, axis=-1)
# new weightsort for normalization and consideration of data
nw = (awt - 0.5 * swt) / awt[:, -1][:, np.newaxis]
# find the midpoint in the new weight sort
s = np.argmin(np.abs(nw - 0.5), axis=-1)
sl = np.arange(len(s))
nws = nw[sl, s]
nws1 = nw[sl, s - 1]
f1 = (nws - 0.5) / (nws - nws1)
f2 = (0.5 - nws1) / (nws - nws1)
wmed = sdata[sl, s - 1] * f1 + sdata[sl, s] * f2
width = cfwidth // 2
wmed = wmed[width:-width - 1]
np.seterr(old_settings['divide'])
return wmed
# ### SVD #####
def _isvd(i, normstack):
"""
Perform single value decomposition and Calculate PC amplitudes (projection)
outputs are eigenspectra operates on a 2D array.
eigenspectra = [nbins, naxes]
evals = [naxes, nobj]
data = [nbins, nobj]
"""
inormstack = normstack.T
U, s, V = np.linalg.svd(inormstack, full_matrices=0)
eigenspectra = np.transpose(V)
evals = inormstack.dot(eigenspectra)
logger.info('Finished SVD Segment %d', i)
return [eigenspectra, evals.T]
# ### OPTIMIZE #####
def _ivarcurve(i, istack, especeval=None, variancearray=None):
"""
Reconstruct the residuals from a given set of eigenspectra and eigenvalues.
this is a special version for caculating the variance curve. It adds the
contribution of a single mode to an existing reconstruction.
"""
iprecon = np.zeros_like(istack)
eigenspectra, evals = especeval[i]
variance = variancearray[i]
ivarlist = []
totalnevals = int(np.round(evals.shape[0] * 0.25))
progress_step = int(totalnevals * .2)
to_percent = 100. / (totalnevals - 1.)
info = logger.info
for nevals in range(totalnevals):
if nevals and (nevals % progress_step) == 0:
info('Seg %d: %d%% complete ', i, int(nevals * to_percent))
eig = eigenspectra[:, nevals]
ev = evals[nevals, :]
# broadcast evals on evects and sum
iprecon += (eig[:, np.newaxis] * ev[np.newaxis, :])
icleanstack = istack - (iprecon * variance)
# calculate the variance on the cleaned segment
ivarlist.append(np.var(icleanstack))
return np.array(ivarlist)
def _newheader(zobj):
"""Put the pertinent zap parameters into the header"""
header = zobj.header.copy()
header['COMMENT'] = 'These data have been ZAPped!'
header.append(('ZAPvers', __version__, 'ZAP version'), end=True)
# zlevel removal performed
header.append(('ZAPzlvl', zobj.run_zlevel, 'ZAP zero level correction'))
# Nanclean performed
header['ZAPclean'] = (zobj.run_clean,
'ZAP NaN cleaning performed for calculation')
# Continuum Filtering
header['ZAPcftyp'] = (zobj._cftype, 'ZAP continuum filter type')
header['ZAPcfwid'] = (zobj._cfwidth, 'ZAP continuum filter size')
# number of segments
nseg = len(zobj.pranges)
header['ZAPnseg'] = (nseg, 'Number of segments used for ZAP SVD')
# per segment variables
for i in range(nseg):
header['ZAPseg{0}'.format(i)] = (
'{0}:{1}'.format(zobj.pranges[i][0], zobj.pranges[i][1] - 1),
'spectrum segment (pixels)')
header['ZAPnev{0}'.format(i)] = (zobj.nevals[i],
'number of eigenvals/spectra used')
return header
def _isigclip(i, istack):
mn = []
for col in istack:
clipped, bot, top = sigmaclip(col, low=3, high=3)
mn.append(clipped.mean())
return np.array(mn)
def _imedian(i, istack):
return np.median(istack, axis=1)
@timeit
def _nanclean(cube, rejectratio=0.25, boxsz=1):
"""
Detects NaN values in cube and removes them by replacing them with an
interpolation of the nearest neighbors in the data cube. The positions in
the cube are retained in nancube for later remasking.
"""
logger.info('Cleaning NaN values in the cube')
cleancube = cube.copy()
badcube = np.logical_not(np.isfinite(cleancube)) # find NaNs
badmap = badcube.sum(axis=0) # map of total nans in a spaxel
# choose some maximum number of bad pixels in the spaxel and extract
# positions
badmask = badmap > (rejectratio * cleancube.shape[0])
logger.info('Rejected %d spaxels with more than %.1f%% NaN pixels',
np.count_nonzero(badmask), rejectratio * 100)
# make cube mask of bad spaxels
badcube &= (~badmask[np.newaxis, :, :])
z, y, x = np.where(badcube)
neighbor = np.zeros((z.size, (2 * boxsz + 1)**3))
icounter = 0
logger.info("Fixing %d remaining NaN pixels", len(z))
# loop over samplecubes
nz, ny, nx = cleancube.shape
for j in range(-boxsz, boxsz + 1, 1):
for k in range(-boxsz, boxsz + 1, 1):
for l in range(-boxsz, boxsz + 1, 1):
iz, iy, ix = z + l, y + k, x + j
outsider = ((ix <= 0) | (ix >= nx - 1) |
(iy <= 0) | (iy >= ny - 1) |
(iz <= 0) | (iz >= nz - 1))
ins = ~outsider
neighbor[ins, icounter] = cleancube[iz[ins], iy[ins], ix[ins]]
neighbor[outsider, icounter] = np.nan
icounter = icounter + 1
mn = np.ma.masked_invalid(neighbor)
cleancube[z, y, x] = mn.mean(axis=1).filled(np.nan)
return cleancube, badcube
|
application.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import logging
import subprocess
import tempfile
import textwrap
import threading
from pathlib import Path
from typing import IO, List
from flask import Flask, request, jsonify
from flask_cors import CORS
logging.basicConfig(
format="%(asctime)s %(levelname)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.DEBUG,
)
LOG: logging.Logger = logging.getLogger(__name__)
def _consume(stream: IO[str]) -> str:
buffer: List[str] = []
def _consume() -> None:
while True:
line = stream.readline()
if line:
decoded = line.strip()
LOG.debug(decoded)
buffer.append(decoded)
else:
break
thread = threading.Thread(target=_consume)
thread.start()
thread.join()
return "\n".join(buffer)
class Pyre:
def __init__(self) -> None:
self._directory: Path = Path(tempfile.mkdtemp())
LOG.debug(f"Starting server in `{self._directory}`...")
pyre_configuration = textwrap.dedent(
"""
{{
"source_directories": ["."]
}}
"""
)
LOG.debug(f"Writing configuration:\n{pyre_configuration}")
pyre_configuration_path = self._directory / ".pyre_configuration"
pyre_configuration_path.write_text(pyre_configuration)
LOG.debug("Writing watchman configuration")
watchman_configuration_path = self._directory / ".watchmanconfig"
watchman_configuration_path.write_text("{}\n")
LOG.debug("Starting watchman")
subprocess.check_call(["watchman", "watch", str(self._directory)])
LOG.debug("Priming the server")
# TODO(T82114844): incremental is borked on Ubuntu 20.04.
subprocess.check_call(
["pyre", "--noninteractive", "check"], cwd=self._directory
)
def check(self, input: str) -> str:
LOG.debug(f"Writing code:\n{input}")
code_path = self._directory / "input.py"
code_path.write_text(input)
# TODO(T82114844): incremental is borked on Ubuntu 20.04.
with subprocess.Popen(
["pyre", "--output=json", "--noninteractive", "check"],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=self._directory,
text=True,
) as process:
# pyre-fixme[6]: Expected `IO[bytes]` for 1st param but got
# `Optional[IO[typing.Any]]`.
stderr = _consume(process.stderr)
# pyre-fixme[6]: Expected `IO[bytes]` for 1st param but got
# `Optional[IO[typing.Any]]`.
stdout = _consume(process.stdout)
return_code = process.wait()
if return_code > 1:
LOG.error(f"Returning error: {stderr}")
result = jsonify(errors=[stderr])
else:
errors = json.loads(stdout)
result = jsonify(data={"errors": errors, "stderr": stderr})
return result
pyre = Pyre()
application = Flask(__name__)
CORS(application)
@application.route("/check", methods=["GET", "POST"])
def check() -> str:
input = (
request.args.get("input")
or request.form.get("input")
or request.json.get("input")
)
if input is None:
return jsonify(errors=["Input not provided"])
LOG.info(f"Checking `{input}`...")
return pyre.check(input)
@application.route("/")
def index() -> str:
return "index"
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--debug", action="store_true")
arguments: argparse.Namespace = parser.parse_args()
application.debug = arguments.debug
application.run()
|
__init__.py | # __init__.py โ Debexpo application test package
#
# This file is part of debexpo -
# https://salsa.debian.org/mentors.debian.net-team/debexpo
#
# Copyright ยฉ 2008 Jonny Lamb <jonny@debian.org>
# Copyright ยฉ 2010 Jan Dittberner <jandd@debian.org>
# Copyright ยฉ 2019 Baptiste Beauplat <lyknode@cilg.org>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
Debexpo application test package.
When the test runner finds and executes tests within this directory,
this file will be loaded to setup the test environment.
"""
from os import walk
from os.path import join
from logging import getLogger
from socketserver import TCPServer
from http.server import SimpleHTTPRequestHandler, BaseHTTPRequestHandler
from threading import Thread
from django.test import TransactionTestCase, TestCase
# import tempfile
# from datetime import datetime
# from unittest import TestCase
#
from debexpo.accounts.models import User, Profile, UserStatus
from debexpo.keyring.models import GPGAlgo, Key
from debexpo.packages.models import Package, PackageUpload, SourcePackage, \
Priority, Section, Distribution, Component, \
BinaryPackage
__all__ = ['environ', 'url', 'TestController']
environ = {}
log = getLogger(__name__)
class DefaultTestController():
"""
Base class for testing controllers.
"""
_AUTHDATA = {'username': 'email@example.com',
'password': 'password',
'commit': 'submit'}
_GPG_KEY = """-----BEGIN PGP PUBLIC KEY BLOCK-----
mDMEW/GBqhYJKwYBBAHaRw8BAQdA+6hBA4PcdcPwgMsKGQXrqwbJemLBgS1PkKZg
RFlKdKi0IHByaW1hcnkgaWQgPHByaW1hcnlAZXhhbXBsZS5vcmc+iJMEExYIADsC
GwMFCwkIBwIGFQoJCAsCBBYCAwECHgECF4AWIQRVkwbu4cjBst0cc7HENHgc6HHz
3wUCW/GB7AIZAQAKCRDENHgc6HHz35EOAP9lXBb8lm72xPeMdjRL+TU83PimD0NZ
urQfnnLVZOu4tAEAqdrz/2q41mScnKJFAnQ5pan5FYlUnDR2WVp1kiFoPwu0HVRl
c3QgdXNlciA8ZW1haWxAZXhhbXBsZS5jb20+iJAEExYIADgWIQRVkwbu4cjBst0c
c7HENHgc6HHz3wUCW/GB6AIbAwULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRDE
NHgc6HHz3yr6AP9MyMaz+dsOC3R/WnjE8EdM42mpf3VkKY0icS60K/Aj3QD/XkIA
qs+ItQAUoeqZM3jh0HvLwUESxm6FtCltwyGlqwW4OARb8YGqEgorBgEEAZdVAQUB
AQdANrk3qq/eP1TEWfFZqhR0vcz7YB9c5+OnvMV+xO4W3nQDAQgHiHgEGBYIACAW
IQRVkwbu4cjBst0cc7HENHgc6HHz3wUCW/GBqgIbDAAKCRDENHgc6HHz3/CHAP0c
hxes4Ebtg7N8B/BoMYwmUVvmMVmoV+ef/vqYvfm6sgEA6fKzMSXllw57UJ90Unyn
xOwJ1heEnfmgPkuiz7jFCAq4MwReCQ2QFgkrBgEEAdpHDwEBB0A+v2Y8n88j+WwI
Q3hChPR7xa49prRSyKRnGBD/AXhJfYjvBBgWCgAgFiEEVZMG7uHIwbLdHHOxxDR4
HOhx898FAl4JDZACGwIAgQkQxDR4HOhx8992IAQZFgoAHRYhBLPPezP4B2M420+o
DoeRkoMRdTvXBQJeCQ2QAAoJEIeRkoMRdTvX0AcA/i8tjP8ihM2nJHRXwBnrh/iH
v0eSEi3sH+j0fwy9OBLJAP9ne01k9LkCXplS8ys+0u0e4545IIbiw8D4ToupD25q
CiIIAP4hwNooM6bAlg2HDYTUxJl4LA0qlJS66qnXv94Q8p4VngD/Y5O0AJw06BCw
Xcgnuh6Rlywt6uiaFIGYnGefYPGXRAA=
=26Kz
-----END PGP PUBLIC KEY BLOCK-----"""
_GPG_FINGERPRINT = '559306EEE1C8C1B2DD1C73B1C434781CE871F3DF'
_GPG_TYPE = '22'
_GPG_SIZE = 256
_GPG_UIDS = [('primary id', 'primary@example.org'),
('Test user', 'email@example.com')]
def _add_gpg_key(self, user, data, fingerprint, algo, size):
key = Key()
key.key = data
key.fingerprint = fingerprint
key.user = user
key.algorithm = GPGAlgo.objects.get(gpg_algorithm_id=algo)
key.size = size
key.save()
key.update_subkeys()
def _setup_example_user(self, gpg=False, email='email@example.com'):
"""Add an example user.
The example user with name ``Test user``, email address
``email@example.com`` and password ``password`` is added to
the database.
This method may be used in the setUp method of derived test
classes.
"""
# Create a test user and save it.
user = User.objects.create_user(email, 'Test user', 'password')
user.save()
profile = Profile(user=user, status=UserStatus.contributor.value)
profile.save()
if gpg:
self._add_gpg_key(user, self._GPG_KEY, self._GPG_FINGERPRINT,
self._GPG_TYPE, self._GPG_SIZE)
def _remove_example_user(self):
"""Remove the example user.
This method removes the example user created in
_setup_example_user.
This method must be used in the tearDown method of derived
test classes that use _setup_example_user.
"""
user = User.objects.filter(email='email@example.com')
user.delete()
def _setup_example_package(self):
"""Add an example package.
The example package with name ``testpackage`` is added to
the database.
This method may be used in the setUp method of derived test
classes.
"""
user = User.objects.get(email='email@example.com')
package = Package.objects.get_or_create(name='testpackage')[0]
package_upload = PackageUpload(
uploader=user,
package=package,
version='1.0-1',
distribution=Distribution.objects.get_or_create(name='unstable')[0],
component=Component.objects.get_or_create(name='main')[0],
closes='943216')
package_upload.save()
source = SourcePackage(
upload=package_upload,
maintainer='Test User <email@example.com>',
section=Section.objects.get_or_create(name='admin')[0],
priority=Priority.objects.get_or_create(name='optional')[0]
)
source.save()
binary = BinaryPackage(
upload=package_upload,
name='testpackage',
description='A short description here',
)
binary.save()
package = Package.objects.get_or_create(name='anotherpackage',
in_debian=True)[0]
package_upload = PackageUpload(
uploader=user,
package=package,
version='1.0-1',
distribution=Distribution.objects.get_or_create(name='buster')[0],
component=Component.objects.get_or_create(name='non-free')[0],
closes='')
package_upload.save()
source = SourcePackage(
upload=package_upload,
maintainer='Another maintainer <another@example.com>',
section=Section.objects.get_or_create(name='utils')[0],
priority=Priority.objects.get_or_create(name='standard')[0]
)
source.save()
binary = BinaryPackage(
upload=package_upload,
name='libanotherpackage',
description='Another short description here',
)
binary.save()
def _remove_example_package(self):
"""Remove the example package.
This method removes the example package created in
_setup_example_package.
This method must be used in the tearDown method of derived
test classes that use _setup_example_package.
"""
for name in ('testpackage', 'anotherpackage'):
try:
package = Package.objects.get(name=name)
except Package.DoesNotExist:
pass
else:
package.delete()
def _assert_no_leftover(self, path):
matches = self._find_all('', path)
for match in matches:
log.error('leftover: {}'.format(match))
self.assertFalse(matches)
def _find_all(self, name, path):
"""Find a file in a path"""
result = []
for root, dirs, files in walk(path):
for filename in files:
if name in filename:
result.append(join(root, filename))
return result
class InfinityHTTPHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200, 'OK')
self.end_headers()
while True:
try:
self.wfile.write(bytes(''.zfill(4 * 1024 * 1024), 'UTF-8'))
except Exception:
break
class TestingTCPServer(TCPServer):
allow_reuse_address = True
class TestingHTTPServer():
def __init__(self, handler=None, port=0):
if not handler:
handler = SimpleHTTPRequestHandler
self.httpd = TestingTCPServer(("localhost", port), handler)
_, self.port = self.httpd.server_address
self.thread = Thread(target=self.httpd.serve_forever)
def __enter__(self):
self.thread.start()
return self
def __exit__(self, type, value, traceback):
self.httpd.shutdown()
class TransactionTestController(DefaultTestController,
TransactionTestCase):
serialized_rollback = True
class TestController(DefaultTestController, TestCase):
pass
|
local_job_service.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from __future__ import absolute_import
import logging
import os
import queue
import shutil
import subprocess
import tempfile
import threading
import time
import traceback
from builtins import object
from typing import TYPE_CHECKING
from typing import List
from typing import Optional
import grpc
from google.protobuf import text_format # type: ignore # not in typeshed
from apache_beam.metrics import monitoring_infos
from apache_beam.portability.api import beam_artifact_api_pb2
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.portability.api import beam_job_api_pb2
from apache_beam.portability.api import beam_job_api_pb2_grpc
from apache_beam.portability.api import beam_provision_api_pb2
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners.portability import abstract_job_service
from apache_beam.runners.portability import artifact_service
from apache_beam.runners.portability import fn_api_runner
from apache_beam.utils.thread_pool_executor import UnboundedThreadPoolExecutor
if TYPE_CHECKING:
from google.protobuf import struct_pb2 # pylint: disable=ungrouped-imports
from apache_beam.portability.api import beam_runner_api_pb2
_LOGGER = logging.getLogger(__name__)
def _iter_queue(q):
while True:
yield q.get(block=True)
class LocalJobServicer(abstract_job_service.AbstractJobServiceServicer):
"""Manages one or more pipelines, possibly concurrently.
Experimental: No backward compatibility guaranteed.
Servicer for the Beam Job API.
This JobService uses a basic local implementation of runner to run the job.
This JobService is not capable of managing job on remote clusters.
By default, this JobService executes the job in process but still uses GRPC
to communicate pipeline and worker state. It can also be configured to use
inline calls rather than GRPC (for speed) or launch completely separate
subprocesses for the runner and worker(s).
"""
def __init__(self, staging_dir=None):
super(LocalJobServicer, self).__init__()
self._cleanup_staging_dir = staging_dir is None
self._staging_dir = staging_dir or tempfile.mkdtemp()
self._artifact_service = artifact_service.BeamFilesystemArtifactService(
self._staging_dir)
self._artifact_staging_endpoint = None # type: Optional[endpoints_pb2.ApiServiceDescriptor]
def create_beam_job(self,
preparation_id, # stype: str
job_name, # type: str
pipeline, # type: beam_runner_api_pb2.Pipeline
options # type: struct_pb2.Struct
):
# type: (...) -> BeamJob
# TODO(angoenka): Pass an appropriate staging_session_token. The token can
# be obtained in PutArtifactResponse from JobService
if not self._artifact_staging_endpoint:
# The front-end didn't try to stage anything, but the worker may
# request what's here so we should at least store an empty manifest.
self._artifact_service.CommitManifest(
beam_artifact_api_pb2.CommitManifestRequest(
staging_session_token=preparation_id,
manifest=beam_artifact_api_pb2.Manifest()))
provision_info = fn_api_runner.ExtendedProvisionInfo(
beam_provision_api_pb2.ProvisionInfo(
job_id=preparation_id,
job_name=job_name,
pipeline_options=options,
retrieval_token=self._artifact_service.retrieval_token(
preparation_id)),
self._staging_dir)
return BeamJob(
preparation_id,
pipeline,
options,
provision_info,
self._artifact_staging_endpoint)
def get_bind_address(self):
"""Return the address used to open the port on the gRPC server.
This is often, but not always the same as the service address. For
example, to make the service accessible to external machines, override this
to return '[::]' and override `get_service_address()` to return a publicly
accessible host name.
"""
return self.get_service_address()
def get_service_address(self):
"""Return the host name at which this server will be accessible.
In particular, this is provided to the client upon connection as the
artifact staging endpoint.
"""
return 'localhost'
def start_grpc_server(self, port=0):
self._server = grpc.server(UnboundedThreadPoolExecutor())
port = self._server.add_insecure_port(
'%s:%d' % (self.get_bind_address(), port))
beam_job_api_pb2_grpc.add_JobServiceServicer_to_server(self, self._server)
beam_artifact_api_pb2_grpc.add_ArtifactStagingServiceServicer_to_server(
self._artifact_service, self._server)
hostname = self.get_service_address()
self._artifact_staging_endpoint = endpoints_pb2.ApiServiceDescriptor(
url='%s:%d' % (hostname, port))
self._server.start()
_LOGGER.info('Grpc server started at %s on port %d' % (hostname, port))
return port
def stop(self, timeout=1):
self._server.stop(timeout)
if os.path.exists(self._staging_dir) and self._cleanup_staging_dir:
shutil.rmtree(self._staging_dir, ignore_errors=True)
def GetJobMetrics(self, request, context=None):
if request.job_id not in self._jobs:
raise LookupError("Job {} does not exist".format(request.job_id))
result = self._jobs[request.job_id].result
monitoring_info_list = []
for mi in result._monitoring_infos_by_stage.values():
monitoring_info_list.extend(mi)
# Filter out system metrics
user_monitoring_info_list = [
x for x in monitoring_info_list
if monitoring_infos._is_user_monitoring_info(x) or
monitoring_infos._is_user_distribution_monitoring_info(x)
]
return beam_job_api_pb2.GetJobMetricsResponse(
metrics=beam_job_api_pb2.MetricResults(
committed=user_monitoring_info_list))
class SubprocessSdkWorker(object):
"""Manages a SDK worker implemented as a subprocess communicating over grpc.
"""
def __init__(self,
worker_command_line, # type: bytes
control_address,
worker_id=None
):
self._worker_command_line = worker_command_line
self._control_address = control_address
self._worker_id = worker_id
def run(self):
logging_server = grpc.server(UnboundedThreadPoolExecutor())
logging_port = logging_server.add_insecure_port('[::]:0')
logging_server.start()
logging_servicer = BeamFnLoggingServicer()
beam_fn_api_pb2_grpc.add_BeamFnLoggingServicer_to_server(
logging_servicer, logging_server)
logging_descriptor = text_format.MessageToString(
endpoints_pb2.ApiServiceDescriptor(url='localhost:%s' % logging_port))
control_descriptor = text_format.MessageToString(
endpoints_pb2.ApiServiceDescriptor(url=self._control_address))
env_dict = dict(
os.environ,
CONTROL_API_SERVICE_DESCRIPTOR=control_descriptor,
LOGGING_API_SERVICE_DESCRIPTOR=logging_descriptor
)
# only add worker_id when it is set.
if self._worker_id:
env_dict['WORKER_ID'] = self._worker_id
with fn_api_runner.SUBPROCESS_LOCK:
p = subprocess.Popen(
self._worker_command_line,
shell=True,
env=env_dict)
try:
p.wait()
if p.returncode:
raise RuntimeError(
'Worker subprocess exited with return code %s' % p.returncode)
finally:
if p.poll() is None:
p.kill()
logging_server.stop(0)
class BeamJob(abstract_job_service.AbstractBeamJob):
"""This class handles running and managing a single pipeline.
The current state of the pipeline is available as self.state.
"""
def __init__(self,
job_id, # type: str
pipeline,
options,
provision_info, # type: fn_api_runner.ExtendedProvisionInfo
artifact_staging_endpoint # type: Optional[endpoints_pb2.ApiServiceDescriptor]
):
super(BeamJob, self).__init__(
job_id, provision_info.provision_info.job_name, pipeline, options)
self._provision_info = provision_info
self._artifact_staging_endpoint = artifact_staging_endpoint
self._state_queues = [] # type: List[queue.Queue]
self._log_queues = [] # type: List[queue.Queue]
self.daemon = True
self.result = None
def set_state(self, new_state):
"""Set the latest state as an int enum and notify consumers"""
timestamp = super(BeamJob, self).set_state(new_state)
if timestamp is not None:
# Inform consumers of the new state.
for queue in self._state_queues:
queue.put((new_state, timestamp))
def prepare(self):
pass
def artifact_staging_endpoint(self):
return self._artifact_staging_endpoint
def run(self):
self.set_state(beam_job_api_pb2.JobState.STARTING)
self._run_thread = threading.Thread(target=self._run_job)
self._run_thread.start()
def _run_job(self):
self.set_state(beam_job_api_pb2.JobState.RUNNING)
with JobLogHandler(self._log_queues):
try:
result = fn_api_runner.FnApiRunner(
provision_info=self._provision_info).run_via_runner_api(
self._pipeline_proto)
_LOGGER.info('Successfully completed job.')
self.set_state(beam_job_api_pb2.JobState.DONE)
self.result = result
except: # pylint: disable=bare-except
_LOGGER.exception('Error running pipeline.')
_LOGGER.exception(traceback)
self.set_state(beam_job_api_pb2.JobState.FAILED)
raise
def cancel(self):
if not self.is_terminal_state(self.state):
self.set_state(beam_job_api_pb2.JobState.CANCELLING)
# TODO(robertwb): Actually cancel...
self.set_state(beam_job_api_pb2.JobState.CANCELLED)
def get_state_stream(self):
# Register for any new state changes.
state_queue = queue.Queue()
self._state_queues.append(state_queue)
for state, timestamp in self.with_state_history(_iter_queue(state_queue)):
yield state, timestamp
if self.is_terminal_state(state):
break
def get_message_stream(self):
# Register for any new messages.
log_queue = queue.Queue()
self._log_queues.append(log_queue)
self._state_queues.append(log_queue)
for msg in self.with_state_history(_iter_queue(log_queue)):
if isinstance(msg, tuple):
assert len(msg) == 2 and isinstance(msg[0], int)
current_state = msg[0]
yield msg
if self.is_terminal_state(current_state):
break
else:
yield msg
class BeamFnLoggingServicer(beam_fn_api_pb2_grpc.BeamFnLoggingServicer):
def Logging(self, log_bundles, context=None):
for log_bundle in log_bundles:
for log_entry in log_bundle.log_entries:
_LOGGER.info('Worker: %s', str(log_entry).replace('\n', ' '))
return iter([])
class JobLogHandler(logging.Handler):
"""Captures logs to be returned via the Beam Job API.
Enabled via the with statement."""
# Mapping from logging levels to LogEntry levels.
LOG_LEVEL_MAP = {
logging.FATAL: beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR,
logging.CRITICAL: beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR,
logging.ERROR: beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR,
logging.WARNING: beam_job_api_pb2.JobMessage.JOB_MESSAGE_WARNING,
logging.INFO: beam_job_api_pb2.JobMessage.JOB_MESSAGE_BASIC,
logging.DEBUG: beam_job_api_pb2.JobMessage.JOB_MESSAGE_DEBUG,
}
def __init__(self, log_queues):
super(JobLogHandler, self).__init__()
self._last_id = 0
self._logged_thread = None
self._log_queues = log_queues
def __enter__(self):
# Remember the current thread to demultiplex the logs of concurrently
# running pipelines (as Python log handlers are global).
self._logged_thread = threading.current_thread()
logging.getLogger().addHandler(self)
def __exit__(self, *args):
self._logged_thread = None
self.close()
def _next_id(self):
self._last_id += 1
return str(self._last_id)
def emit(self, record):
if self._logged_thread is threading.current_thread():
msg = beam_job_api_pb2.JobMessage(
message_id=self._next_id(),
time=time.strftime('%Y-%m-%d %H:%M:%S.',
time.localtime(record.created)),
importance=self.LOG_LEVEL_MAP[record.levelno],
message_text=self.format(record))
# Inform all message consumers.
for queue in self._log_queues:
queue.put(msg)
|
Sync.py | from bin.service import ConfluenceAPI
from bin.service import Gitlab
from bin.service import JiraAPI
from bin.service import Logger
from bin.service import Environment
from bin.service import SciKitLearn
import threading
import yaml
import os
import time
import numpy
confluence_done = False
jira_done = False
git_done = False
class Sync:
def __init__(self):
self.logger = Logger.Logger()
self.environment = Environment.Environment()
def run(self, override=False):
if self.is_running() and override is False:
return
self.set_running(True)
self.set_last(time.time())
start = time.time()
global confluence_done, jira_done, git_done
try:
def confluence_thread():
global confluence_done
print('--- confluence thread started ---')
confluence = ConfluenceAPI.ConfluenceAPI()
confluence.sync_entries(0)
confluence_done = True
def jira_thread():
global jira_done
print('--- jira thread started ---')
jira = JiraAPI.JiraAPI()
jira.sync_entries(0)
jira_done = True
def git_thread():
global git_done
print('--- git thread started ---')
gitlab = Gitlab.Gitlab()
gitlab.sync_commits(0)
git_done = True
confluence_process = threading.Thread(target=confluence_thread)
confluence_process.start()
jira_process = threading.Thread(target=jira_thread)
jira_process.start()
if self.environment.get_service_enable_git():
git_process = threading.Thread(target=git_thread)
git_process.start()
while False in [confluence_done, jira_done, git_done]:
pass
confluence_process.join()
jira_process.join()
git_process.join()
else:
while False in [confluence_done, jira_done]:
pass
confluence_process.join()
jira_process.join()
print('--- training started ---')
scikit = SciKitLearn.SciKitLearn()
scikit.train()
end = time.time()
self.add_runtime((end-start))
self.set_running(False)
except Exception as e:
print(e)
self.logger.add_entry('PhoenixSync', e)
self.set_running(False)
def set_running(self, running=True):
state = self.load_yaml()
if state is not None:
state['running'] = running
self.store_yaml(state)
def set_last(self, last):
state = self.load_yaml()
if state is not None:
state['last'] = last
self.store_yaml(state)
def add_runtime(self, runtime):
state = self.load_yaml()
if state is not None:
state['runtimes'].append(runtime)
state['average'] = float(numpy.average(state['runtimes']))
self.store_yaml(state)
def is_running(self):
state = self.load_yaml()
runs = True
if state is not None:
is_running = state['running']
half_a_day = 60 * 60 * 12
got_time = state['last'] != 0
is_in_range = time.time() - state['last'] <= half_a_day
runs = is_running or (got_time and is_in_range)
return runs
def load_yaml(self):
sync_path = self.environment.get_path_sync_state()
data = None
if os.path.exists(sync_path):
file = open(sync_path, "r", encoding='utf8')
data = yaml.load(file, Loader=yaml.FullLoader)
if data is None:
data = {'last': time.time(), 'average': 0, 'runtimes': [], 'running': False}
return data
def store_yaml(self, data):
if data is None:
data = {'last': time.time(), 'average': 0, 'runtimes': [], 'running': False}
sync_path = self.environment.get_path_sync_state()
file = open(sync_path, "w", encoding='utf8')
yaml.dump(data, file, Dumper=yaml.Dumper)
|
ipscan.py | # -*- coding: utf-8 -*-
'''
Author=yanyun
Email=yanyuneternal@163.com
'''
import platform
import os
import threading
import time
'''
call for example
print "start time %s"%time.ctime()
ipscan.ipscan('192.168.1',2,255)
print "start time %s"%time.ctime()
'''
def get_os():
os = platform.system()
if os == "Windows":
return "n"
else:
return "c"
def ping_ip(ip_str):
cmd = ["ping", "-{op}".format(op=get_os()),
"1", ip_str]
output = os.popen(" ".join(cmd)).readlines()
#print cmd
for line in list(output):
if not line:
continue
if str(line).upper().find("TTL") >=0:
print "ip: %s is alive"%ip_str
return
#print "ip: %s is dead"%ip_str
def ipscan(ip_prefix,ip_b,ip_e):
threads=[]
for i in range(ip_b,ip_e):
ip = '%s.%s'%(ip_prefix,i)
#print ip
t=threading.Thread(target=ping_ip,args=(ip,))
threads.append(t)
for t in threads:
t.setDaemon(True)
t.start()
time.sleep(0.1)
t.join()
|
test_run.py | import contextvars
import functools
import platform
import sys
import threading
import time
import types
import warnings
from contextlib import contextmanager, ExitStack
from math import inf
from textwrap import dedent
import attr
import outcome
import sniffio
import pytest
from .tutil import slow, check_sequence_matches, gc_collect_harder
from ... import _core
from ..._threads import to_thread_run_sync
from ..._timeouts import sleep, fail_after
from ...testing import (
wait_all_tasks_blocked,
Sequencer,
assert_checkpoints,
)
# slightly different from _timeouts.sleep_forever because it returns the value
# its rescheduled with, which is really only useful for tests of
# rescheduling...
async def sleep_forever():
return await _core.wait_task_rescheduled(lambda _: _core.Abort.SUCCEEDED)
# Some of our tests need to leak coroutines, and thus trigger the
# "RuntimeWarning: coroutine '...' was never awaited" message. This context
# manager should be used anywhere this happens to hide those messages, because
# when expected they're clutter.
@contextmanager
def ignore_coroutine_never_awaited_warnings():
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message="coroutine '.*' was never awaited"
)
try:
yield
finally:
# Make sure to trigger any coroutine __del__ methods now, before
# we leave the context manager.
gc_collect_harder()
def test_basic():
async def trivial(x):
return x
assert _core.run(trivial, 8) == 8
with pytest.raises(TypeError):
# Missing an argument
_core.run(trivial)
with pytest.raises(TypeError):
# Not an async function
_core.run(lambda: None)
async def trivial2(x):
await _core.checkpoint()
return x
assert _core.run(trivial2, 1) == 1
def test_initial_task_error():
async def main(x):
raise ValueError(x)
with pytest.raises(ValueError) as excinfo:
_core.run(main, 17)
assert excinfo.value.args == (17,)
def test_run_nesting():
async def inception():
async def main(): # pragma: no cover
pass
return _core.run(main)
with pytest.raises(RuntimeError) as excinfo:
_core.run(inception)
assert "from inside" in str(excinfo.value)
async def test_nursery_warn_use_async_with():
with pytest.raises(RuntimeError) as excinfo:
on = _core.open_nursery()
with on:
pass # pragma: no cover
excinfo.match(
r"use 'async with open_nursery\(...\)', not 'with open_nursery\(...\)'"
)
# avoid unawaited coro.
async with on:
pass
async def test_nursery_main_block_error_basic():
exc = ValueError("whoops")
with pytest.raises(ValueError) as excinfo:
async with _core.open_nursery():
raise exc
assert excinfo.value is exc
async def test_child_crash_basic():
exc = ValueError("uh oh")
async def erroring():
raise exc
try:
# nursery.__aexit__ propagates exception from child back to parent
async with _core.open_nursery() as nursery:
nursery.start_soon(erroring)
except ValueError as e:
assert e is exc
async def test_basic_interleave():
async def looper(whoami, record):
for i in range(3):
record.append((whoami, i))
await _core.checkpoint()
record = []
async with _core.open_nursery() as nursery:
nursery.start_soon(looper, "a", record)
nursery.start_soon(looper, "b", record)
check_sequence_matches(
record,
[{("a", 0), ("b", 0)}, {("a", 1), ("b", 1)}, {("a", 2), ("b", 2)}]
)
def test_task_crash_propagation():
looper_record = []
async def looper():
try:
while True:
await _core.checkpoint()
except _core.Cancelled:
print("looper cancelled")
looper_record.append("cancelled")
async def crasher():
raise ValueError("argh")
async def main():
async with _core.open_nursery() as nursery:
nursery.start_soon(looper)
nursery.start_soon(crasher)
with pytest.raises(ValueError) as excinfo:
_core.run(main)
assert looper_record == ["cancelled"]
assert excinfo.value.args == ("argh",)
def test_main_and_task_both_crash():
# If main crashes and there's also a task crash, then we get both in a
# MultiError
async def crasher():
raise ValueError
async def main():
async with _core.open_nursery() as nursery:
nursery.start_soon(crasher)
raise KeyError
with pytest.raises(_core.MultiError) as excinfo:
_core.run(main)
print(excinfo.value)
assert {type(exc)
for exc in excinfo.value.exceptions} == {ValueError, KeyError}
def test_two_child_crashes():
async def crasher(etype):
raise etype
async def main():
async with _core.open_nursery() as nursery:
nursery.start_soon(crasher, KeyError)
nursery.start_soon(crasher, ValueError)
with pytest.raises(_core.MultiError) as excinfo:
_core.run(main)
assert {type(exc)
for exc in excinfo.value.exceptions} == {ValueError, KeyError}
async def test_child_crash_wakes_parent():
async def crasher():
raise ValueError
with pytest.raises(ValueError):
async with _core.open_nursery() as nursery:
nursery.start_soon(crasher)
await sleep_forever()
async def test_reschedule():
t1 = None
t2 = None
async def child1():
nonlocal t1, t2
t1 = _core.current_task()
print("child1 start")
x = await sleep_forever()
print("child1 woke")
assert x == 0
print("child1 rescheduling t2")
_core.reschedule(t2, outcome.Error(ValueError()))
print("child1 exit")
async def child2():
nonlocal t1, t2
print("child2 start")
t2 = _core.current_task()
_core.reschedule(t1, outcome.Value(0))
print("child2 sleep")
with pytest.raises(ValueError):
await sleep_forever()
print("child2 successful exit")
async with _core.open_nursery() as nursery:
nursery.start_soon(child1)
# let t1 run and fall asleep
await _core.checkpoint()
nursery.start_soon(child2)
async def test_current_time():
t1 = _core.current_time()
# Windows clock is pretty low-resolution -- appveyor tests fail unless we
# sleep for a bit here.
time.sleep(time.get_clock_info("perf_counter").resolution)
t2 = _core.current_time()
assert t1 < t2
async def test_current_time_with_mock_clock(mock_clock):
start = mock_clock.current_time()
assert mock_clock.current_time() == _core.current_time()
assert mock_clock.current_time() == _core.current_time()
mock_clock.jump(3.14)
assert start + 3.14 == mock_clock.current_time() == _core.current_time()
async def test_current_clock(mock_clock):
assert mock_clock is _core.current_clock()
async def test_current_task():
parent_task = _core.current_task()
async def child():
assert _core.current_task().parent_nursery.parent_task is parent_task
async with _core.open_nursery() as nursery:
nursery.start_soon(child)
async def test_root_task():
root = _core.current_root_task()
assert root.parent_nursery is None
def test_out_of_context():
with pytest.raises(RuntimeError):
_core.current_task()
with pytest.raises(RuntimeError):
_core.current_time()
async def test_current_statistics(mock_clock):
# Make sure all the early startup stuff has settled down
await wait_all_tasks_blocked()
# A child that sticks around to make some interesting stats:
async def child():
try:
await sleep_forever()
except _core.Cancelled:
pass
stats = _core.current_statistics()
print(stats)
# 2 system tasks + us
assert stats.tasks_living == 3
assert stats.run_sync_soon_queue_size == 0
async with _core.open_nursery() as nursery:
nursery.start_soon(child)
await wait_all_tasks_blocked()
token = _core.current_trio_token()
token.run_sync_soon(lambda: None)
token.run_sync_soon(lambda: None, idempotent=True)
stats = _core.current_statistics()
print(stats)
# 2 system tasks + us + child
assert stats.tasks_living == 4
# the exact value here might shift if we change how we do accounting
# (currently it only counts tasks that we already know will be
# runnable on the next pass), but still useful to at least test the
# difference between now and after we wake up the child:
assert stats.tasks_runnable == 0
assert stats.run_sync_soon_queue_size == 2
nursery.cancel_scope.cancel()
stats = _core.current_statistics()
print(stats)
assert stats.tasks_runnable == 1
# Give the child a chance to die and the run_sync_soon a chance to clear
await _core.checkpoint()
await _core.checkpoint()
with _core.CancelScope(deadline=_core.current_time() + 5):
stats = _core.current_statistics()
print(stats)
assert stats.seconds_to_next_deadline == 5
stats = _core.current_statistics()
print(stats)
assert stats.seconds_to_next_deadline == inf
@attr.s(eq=False, hash=False)
class TaskRecorder:
record = attr.ib(factory=list)
def before_run(self):
self.record.append(("before_run",))
def task_scheduled(self, task):
self.record.append(("schedule", task))
def before_task_step(self, task):
assert task is _core.current_task()
self.record.append(("before", task))
def after_task_step(self, task):
assert task is _core.current_task()
self.record.append(("after", task))
def after_run(self):
self.record.append(("after_run",))
def filter_tasks(self, tasks):
for item in self.record:
if item[0] in ("schedule", "before", "after") and item[1] in tasks:
yield item
if item[0] in ("before_run", "after_run"):
yield item
def test_instruments(recwarn):
r1 = TaskRecorder()
r2 = TaskRecorder()
r3 = TaskRecorder()
task = None
# We use a child task for this, because the main task does some extra
# bookkeeping stuff that can leak into the instrument results, and we
# don't want to deal with it.
async def task_fn():
nonlocal task
task = _core.current_task()
for _ in range(4):
await _core.checkpoint()
# replace r2 with r3, to test that we can manipulate them as we go
_core.remove_instrument(r2)
with pytest.raises(KeyError):
_core.remove_instrument(r2)
# add is idempotent
_core.add_instrument(r3)
_core.add_instrument(r3)
for _ in range(1):
await _core.checkpoint()
async def main():
async with _core.open_nursery() as nursery:
nursery.start_soon(task_fn)
_core.run(main, instruments=[r1, r2])
# It sleeps 5 times, so it runs 6 times. Note that checkpoint()
# reschedules the task immediately upon yielding, before the
# after_task_step event fires.
expected = (
[("before_run",), ("schedule", task)] +
[("before", task), ("schedule", task), ("after", task)] * 5 +
[("before", task), ("after", task), ("after_run",)]
)
assert len(r1.record) > len(r2.record) > len(r3.record)
assert r1.record == r2.record + r3.record
assert list(r1.filter_tasks([task])) == expected
def test_instruments_interleave():
tasks = {}
async def two_step1():
tasks["t1"] = _core.current_task()
await _core.checkpoint()
async def two_step2():
tasks["t2"] = _core.current_task()
await _core.checkpoint()
async def main():
async with _core.open_nursery() as nursery:
nursery.start_soon(two_step1)
nursery.start_soon(two_step2)
r = TaskRecorder()
_core.run(main, instruments=[r])
expected = [
("before_run",),
("schedule", tasks["t1"]),
("schedule", tasks["t2"]),
{
("before", tasks["t1"]),
("schedule", tasks["t1"]),
("after", tasks["t1"]),
("before", tasks["t2"]),
("schedule", tasks["t2"]),
("after", tasks["t2"])
},
{
("before", tasks["t1"]),
("after", tasks["t1"]),
("before", tasks["t2"]),
("after", tasks["t2"])
},
("after_run",),
] # yapf: disable
print(list(r.filter_tasks(tasks.values())))
check_sequence_matches(list(r.filter_tasks(tasks.values())), expected)
def test_null_instrument():
# undefined instrument methods are skipped
class NullInstrument:
pass
async def main():
await _core.checkpoint()
_core.run(main, instruments=[NullInstrument()])
def test_instrument_before_after_run():
record = []
class BeforeAfterRun:
def before_run(self):
record.append("before_run")
def after_run(self):
record.append("after_run")
async def main():
pass
_core.run(main, instruments=[BeforeAfterRun()])
assert record == ["before_run", "after_run"]
def test_instrument_task_spawn_exit():
record = []
class SpawnExitRecorder:
def task_spawned(self, task):
record.append(("spawned", task))
def task_exited(self, task):
record.append(("exited", task))
async def main():
return _core.current_task()
main_task = _core.run(main, instruments=[SpawnExitRecorder()])
assert ("spawned", main_task) in record
assert ("exited", main_task) in record
# This test also tests having a crash before the initial task is even spawned,
# which is very difficult to handle.
def test_instruments_crash(caplog):
record = []
class BrokenInstrument:
def task_scheduled(self, task):
record.append("scheduled")
raise ValueError("oops")
def close(self):
# Shouldn't be called -- tests that the instrument disabling logic
# works right.
record.append("closed") # pragma: no cover
async def main():
record.append("main ran")
return _core.current_task()
r = TaskRecorder()
main_task = _core.run(main, instruments=[r, BrokenInstrument()])
assert record == ["scheduled", "main ran"]
# the TaskRecorder kept going throughout, even though the BrokenInstrument
# was disabled
assert ("after", main_task) in r.record
assert ("after_run",) in r.record
# And we got a log message
exc_type, exc_value, exc_traceback = caplog.records[0].exc_info
assert exc_type is ValueError
assert str(exc_value) == "oops"
assert "Instrument has been disabled" in caplog.records[0].message
async def test_cancel_scope_repr(mock_clock):
scope = _core.CancelScope()
assert "unbound" in repr(scope)
with scope:
assert "active" in repr(scope)
scope.deadline = _core.current_time() - 1
assert "deadline is 1.00 seconds ago" in repr(scope)
scope.deadline = _core.current_time() + 10
assert "deadline is 10.00 seconds from now" in repr(scope)
# when not in async context, can't get the current time
assert "deadline" not in await to_thread_run_sync(repr, scope)
scope.cancel()
assert "cancelled" in repr(scope)
assert "exited" in repr(scope)
def test_cancel_points():
async def main1():
with _core.CancelScope() as scope:
await _core.checkpoint_if_cancelled()
scope.cancel()
with pytest.raises(_core.Cancelled):
await _core.checkpoint_if_cancelled()
_core.run(main1)
async def main2():
with _core.CancelScope() as scope:
await _core.checkpoint()
scope.cancel()
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
_core.run(main2)
async def main3():
with _core.CancelScope() as scope:
scope.cancel()
with pytest.raises(_core.Cancelled):
await sleep_forever()
_core.run(main3)
async def main4():
with _core.CancelScope() as scope:
scope.cancel()
await _core.cancel_shielded_checkpoint()
await _core.cancel_shielded_checkpoint()
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
_core.run(main4)
async def test_cancel_edge_cases():
with _core.CancelScope() as scope:
# Two cancels in a row -- idempotent
scope.cancel()
scope.cancel()
await _core.checkpoint()
assert scope.cancel_called
assert scope.cancelled_caught
with _core.CancelScope() as scope:
# Check level-triggering
scope.cancel()
with pytest.raises(_core.Cancelled):
await sleep_forever()
with pytest.raises(_core.Cancelled):
await sleep_forever()
async def test_cancel_scope_multierror_filtering():
async def crasher():
raise KeyError
try:
with _core.CancelScope() as outer:
try:
async with _core.open_nursery() as nursery:
# Two children that get cancelled by the nursery scope
nursery.start_soon(sleep_forever) # t1
nursery.start_soon(sleep_forever) # t2
nursery.cancel_scope.cancel()
with _core.CancelScope(shield=True):
await wait_all_tasks_blocked()
# One child that gets cancelled by the outer scope
nursery.start_soon(sleep_forever) # t3
outer.cancel()
# And one that raises a different error
nursery.start_soon(crasher) # t4
# and then our __aexit__ also receives an outer Cancelled
except _core.MultiError as multi_exc:
# Since the outer scope became cancelled before the
# nursery block exited, all cancellations inside the
# nursery block continue propagating to reach the
# outer scope.
assert len(multi_exc.exceptions) == 5
summary = {}
for exc in multi_exc.exceptions:
summary.setdefault(type(exc), 0)
summary[type(exc)] += 1
assert summary == {_core.Cancelled: 4, KeyError: 1}
raise
except AssertionError: # pragma: no cover
raise
except BaseException as exc:
# This is ouside the outer scope, so all the Cancelled
# exceptions should have been absorbed, leaving just a regular
# KeyError from crasher()
assert type(exc) is KeyError
else: # pragma: no cover
assert False
async def test_precancelled_task():
# a task that gets spawned into an already-cancelled nursery should begin
# execution (https://github.com/python-trio/trio/issues/41), but get a
# cancelled error at its first blocking call.
record = []
async def blocker():
record.append("started")
await sleep_forever()
async with _core.open_nursery() as nursery:
nursery.cancel_scope.cancel()
nursery.start_soon(blocker)
assert record == ["started"]
async def test_cancel_shielding():
with _core.CancelScope() as outer:
with _core.CancelScope() as inner:
await _core.checkpoint()
outer.cancel()
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
assert inner.shield is False
with pytest.raises(TypeError):
inner.shield = "hello"
assert inner.shield is False
inner.shield = True
assert inner.shield is True
# shield protects us from 'outer'
await _core.checkpoint()
with _core.CancelScope() as innerest:
innerest.cancel()
# but it doesn't protect us from scope inside inner
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
await _core.checkpoint()
inner.shield = False
# can disable shield again
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
# re-enable shield
inner.shield = True
await _core.checkpoint()
# shield doesn't protect us from inner itself
inner.cancel()
# This should now raise, but be absorbed by the inner scope
await _core.checkpoint()
assert inner.cancelled_caught
# make sure that cancellation propagates immediately to all children
async def test_cancel_inheritance():
record = set()
async def leaf(ident):
try:
await sleep_forever()
except _core.Cancelled:
record.add(ident)
async def worker(ident):
async with _core.open_nursery() as nursery:
nursery.start_soon(leaf, ident + "-l1")
nursery.start_soon(leaf, ident + "-l2")
async with _core.open_nursery() as nursery:
nursery.start_soon(worker, "w1")
nursery.start_soon(worker, "w2")
nursery.cancel_scope.cancel()
assert record == {"w1-l1", "w1-l2", "w2-l1", "w2-l2"}
async def test_cancel_shield_abort():
with _core.CancelScope() as outer:
async with _core.open_nursery() as nursery:
outer.cancel()
nursery.cancel_scope.shield = True
# The outer scope is cancelled, but this task is protected by the
# shield, so it manages to get to sleep
record = []
async def sleeper():
record.append("sleeping")
try:
await sleep_forever()
except _core.Cancelled:
record.append("cancelled")
nursery.start_soon(sleeper)
await wait_all_tasks_blocked()
assert record == ["sleeping"]
# now when we unshield, it should abort the sleep.
nursery.cancel_scope.shield = False
# wait for the task to finish before entering the nursery
# __aexit__, because __aexit__ could make it spuriously look like
# this worked by cancelling the nursery scope. (When originally
# written, without these last few lines, the test spuriously
# passed, even though shield assignment was buggy.)
with _core.CancelScope(shield=True):
await wait_all_tasks_blocked()
assert record == ["sleeping", "cancelled"]
async def test_basic_timeout(mock_clock):
start = _core.current_time()
with _core.CancelScope() as scope:
assert scope.deadline == inf
scope.deadline = start + 1
assert scope.deadline == start + 1
assert not scope.cancel_called
mock_clock.jump(2)
await _core.checkpoint()
await _core.checkpoint()
await _core.checkpoint()
assert not scope.cancel_called
start = _core.current_time()
with _core.CancelScope(deadline=start + 1) as scope:
mock_clock.jump(2)
await sleep_forever()
# But then the scope swallowed the exception... but we can still see it
# here:
assert scope.cancel_called
assert scope.cancelled_caught
# changing deadline
start = _core.current_time()
with _core.CancelScope() as scope:
await _core.checkpoint()
scope.deadline = start + 10
await _core.checkpoint()
mock_clock.jump(5)
await _core.checkpoint()
scope.deadline = start + 1
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
async def test_cancel_scope_nesting():
# Nested scopes: if two triggering at once, the outer one wins
with _core.CancelScope() as scope1:
with _core.CancelScope() as scope2:
with _core.CancelScope() as scope3:
scope3.cancel()
scope2.cancel()
await sleep_forever()
assert scope3.cancel_called
assert not scope3.cancelled_caught
assert scope2.cancel_called
assert scope2.cancelled_caught
assert not scope1.cancel_called
assert not scope1.cancelled_caught
# shielding
with _core.CancelScope() as scope1:
with _core.CancelScope() as scope2:
scope1.cancel()
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
scope2.shield = True
await _core.checkpoint()
scope2.cancel()
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
# if a scope is pending, but then gets popped off the stack, then it
# isn't delivered
with _core.CancelScope() as scope:
scope.cancel()
await _core.cancel_shielded_checkpoint()
await _core.checkpoint()
assert not scope.cancelled_caught
# Regression test for https://github.com/python-trio/trio/issues/1175
async def test_unshield_while_cancel_propagating():
with _core.CancelScope() as outer:
with _core.CancelScope() as inner:
outer.cancel()
try:
await _core.checkpoint()
finally:
inner.shield = True
assert outer.cancelled_caught and not inner.cancelled_caught
async def test_cancel_unbound():
async def sleep_until_cancelled(scope):
with scope, fail_after(1):
await sleep_forever()
# Cancel before entry
scope = _core.CancelScope()
scope.cancel()
async with _core.open_nursery() as nursery:
nursery.start_soon(sleep_until_cancelled, scope)
# Cancel after entry
scope = _core.CancelScope()
async with _core.open_nursery() as nursery:
nursery.start_soon(sleep_until_cancelled, scope)
await wait_all_tasks_blocked()
scope.cancel()
# Shield before entry
scope = _core.CancelScope()
scope.shield = True
with _core.CancelScope() as outer, scope:
outer.cancel()
await _core.checkpoint()
scope.shield = False
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
# Can't reuse
with _core.CancelScope() as scope:
await _core.checkpoint()
scope.cancel()
await _core.checkpoint()
assert scope.cancel_called
assert not scope.cancelled_caught
with pytest.raises(RuntimeError) as exc_info:
with scope:
pass # pragma: no cover
assert "single 'with' block" in str(exc_info.value)
# Can't reenter
with _core.CancelScope() as scope:
with pytest.raises(RuntimeError) as exc_info:
with scope:
pass # pragma: no cover
assert "single 'with' block" in str(exc_info.value)
# Can't enter from multiple tasks simultaneously
scope = _core.CancelScope()
async def enter_scope():
with scope:
await sleep_forever()
async with _core.open_nursery() as nursery:
nursery.start_soon(enter_scope, name="this one")
await wait_all_tasks_blocked()
with pytest.raises(RuntimeError) as exc_info:
with scope:
pass # pragma: no cover
assert "single 'with' block" in str(exc_info.value)
nursery.cancel_scope.cancel()
# If not yet entered, cancel_called is true when the deadline has passed
# even if cancel() hasn't been called yet
scope = _core.CancelScope(deadline=_core.current_time() + 1)
assert not scope.cancel_called
scope.deadline -= 1
assert scope.cancel_called
scope.deadline += 1
assert scope.cancel_called # never become un-cancelled
async def test_cancel_scope_misnesting():
outer = _core.CancelScope()
inner = _core.CancelScope()
with ExitStack() as stack:
stack.enter_context(outer)
with inner:
with pytest.raises(RuntimeError, match="still within its child"):
stack.close()
# No further error is raised when exiting the inner context
# If there are other tasks inside the abandoned part of the cancel tree,
# they get cancelled when the misnesting is detected
async def task1():
with pytest.raises(_core.Cancelled):
await sleep_forever()
# Even if inside another cancel scope
async def task2():
with _core.CancelScope():
with pytest.raises(_core.Cancelled):
await sleep_forever()
with ExitStack() as stack:
stack.enter_context(_core.CancelScope())
async with _core.open_nursery() as nursery:
nursery.start_soon(task1)
nursery.start_soon(task2)
await wait_all_tasks_blocked()
with pytest.raises(RuntimeError, match="still within its child"):
stack.close()
# Variant that makes the child tasks direct children of the scope
# that noticed the misnesting:
nursery_mgr = _core.open_nursery()
nursery = await nursery_mgr.__aenter__()
try:
nursery.start_soon(task1)
nursery.start_soon(task2)
nursery.start_soon(sleep_forever)
await wait_all_tasks_blocked()
nursery.cancel_scope.__exit__(None, None, None)
finally:
with pytest.raises(RuntimeError) as exc_info:
await nursery_mgr.__aexit__(*sys.exc_info())
assert "which had already been exited" in str(exc_info.value)
assert type(exc_info.value.__context__) is _core.MultiError
assert len(exc_info.value.__context__.exceptions) == 3
cancelled_in_context = False
for exc in exc_info.value.__context__.exceptions:
assert isinstance(exc, RuntimeError)
assert "closed before the task exited" in str(exc)
cancelled_in_context |= isinstance(
exc.__context__, _core.Cancelled
)
assert cancelled_in_context # for the sleep_forever
# Trying to exit a cancel scope from an unrelated task raises an error
# without affecting any state
async def task3(task_status):
with _core.CancelScope() as scope:
task_status.started(scope)
await sleep_forever()
async with _core.open_nursery() as nursery:
scope = await nursery.start(task3)
with pytest.raises(RuntimeError, match="from unrelated"):
scope.__exit__(None, None, None)
scope.cancel()
@slow
async def test_timekeeping():
# probably a good idea to use a real clock for *one* test anyway...
TARGET = 1.0
# give it a few tries in case of random CI server flakiness
for _ in range(4):
real_start = time.perf_counter()
with _core.CancelScope() as scope:
scope.deadline = _core.current_time() + TARGET
await sleep_forever()
real_duration = time.perf_counter() - real_start
accuracy = real_duration / TARGET
print(accuracy)
# Actual time elapsed should always be >= target time
# (== is possible depending on system behavior for time.perf_counter resolution
if 1.0 <= accuracy < 2: # pragma: no branch
break
else: # pragma: no cover
assert False
async def test_failed_abort():
stubborn_task = [None]
stubborn_scope = [None]
record = []
async def stubborn_sleeper():
stubborn_task[0] = _core.current_task()
with _core.CancelScope() as scope:
stubborn_scope[0] = scope
record.append("sleep")
x = await _core.wait_task_rescheduled(lambda _: _core.Abort.FAILED)
assert x == 1
record.append("woke")
try:
await _core.checkpoint_if_cancelled()
except _core.Cancelled:
record.append("cancelled")
async with _core.open_nursery() as nursery:
nursery.start_soon(stubborn_sleeper)
await wait_all_tasks_blocked()
assert record == ["sleep"]
stubborn_scope[0].cancel()
await wait_all_tasks_blocked()
# cancel didn't wake it up
assert record == ["sleep"]
# wake it up again by hand
_core.reschedule(stubborn_task[0], outcome.Value(1))
assert record == ["sleep", "woke", "cancelled"]
def test_broken_abort():
async def main():
# These yields are here to work around an annoying warning -- we're
# going to crash the main loop, and if we (by chance) do this before
# the run_sync_soon task runs for the first time, then Python gives us
# a spurious warning about it not being awaited. (I mean, the warning
# is correct, but here we're testing our ability to deliver a
# semi-meaningful error after things have gone totally pear-shaped, so
# it's not relevant.) By letting the run_sync_soon_task run first, we
# avoid the warning.
await _core.checkpoint()
await _core.checkpoint()
with _core.CancelScope() as scope:
scope.cancel()
# None is not a legal return value here
await _core.wait_task_rescheduled(lambda _: None)
with pytest.raises(_core.TrioInternalError):
_core.run(main)
# Because this crashes, various __del__ methods print complaints on
# stderr. Make sure that they get run now, so the output is attached to
# this test.
gc_collect_harder()
def test_error_in_run_loop():
# Blow stuff up real good to check we at least get a TrioInternalError
async def main():
task = _core.current_task()
task._schedule_points = "hello!"
await _core.checkpoint()
with ignore_coroutine_never_awaited_warnings():
with pytest.raises(_core.TrioInternalError):
_core.run(main)
async def test_spawn_system_task():
record = []
async def system_task(x):
record.append(("x", x))
record.append(("ki", _core.currently_ki_protected()))
await _core.checkpoint()
_core.spawn_system_task(system_task, 1)
await wait_all_tasks_blocked()
assert record == [("x", 1), ("ki", True)]
# intentionally make a system task crash
def test_system_task_crash():
async def crasher():
raise KeyError
async def main():
_core.spawn_system_task(crasher)
await sleep_forever()
with pytest.raises(_core.TrioInternalError):
_core.run(main)
def test_system_task_crash_MultiError():
async def crasher1():
raise KeyError
async def crasher2():
raise ValueError
async def system_task():
async with _core.open_nursery() as nursery:
nursery.start_soon(crasher1)
nursery.start_soon(crasher2)
async def main():
_core.spawn_system_task(system_task)
await sleep_forever()
with pytest.raises(_core.TrioInternalError) as excinfo:
_core.run(main)
me = excinfo.value.__cause__
assert isinstance(me, _core.MultiError)
assert len(me.exceptions) == 2
for exc in me.exceptions:
assert isinstance(exc, (KeyError, ValueError))
def test_system_task_crash_plus_Cancelled():
# Set up a situation where a system task crashes with a
# MultiError([Cancelled, ValueError])
async def crasher():
try:
await sleep_forever()
except _core.Cancelled:
raise ValueError
async def cancelme():
await sleep_forever()
async def system_task():
async with _core.open_nursery() as nursery:
nursery.start_soon(crasher)
nursery.start_soon(cancelme)
async def main():
_core.spawn_system_task(system_task)
# then we exit, triggering a cancellation
with pytest.raises(_core.TrioInternalError) as excinfo:
_core.run(main)
assert type(excinfo.value.__cause__) is ValueError
def test_system_task_crash_KeyboardInterrupt():
async def ki():
raise KeyboardInterrupt
async def main():
_core.spawn_system_task(ki)
await sleep_forever()
with pytest.raises(_core.TrioInternalError) as excinfo:
_core.run(main)
assert isinstance(excinfo.value.__cause__, KeyboardInterrupt)
# This used to fail because checkpoint was a yield followed by an immediate
# reschedule. So we had:
# 1) this task yields
# 2) this task is rescheduled
# ...
# 3) next iteration of event loop starts, runs timeouts
# 4) this task has timed out
# 5) ...but it's on the run queue, so the timeout is queued to be delivered
# the next time that it's blocked.
async def test_yield_briefly_checks_for_timeout(mock_clock):
with _core.CancelScope(deadline=_core.current_time() + 5):
await _core.checkpoint()
with pytest.raises(_core.Cancelled):
mock_clock.jump(10)
await _core.checkpoint()
# This tests that sys.exc_info is properly saved/restored as we swap between
# tasks. It turns out that the interpreter automagically handles this for us
# so there's no special code in Trio required to pass this test, but it's
# still nice to know that it works :-).
#
# Update: it turns out I was right to be nervous! see the next test...
async def test_exc_info():
record = []
seq = Sequencer()
async def child1():
with pytest.raises(ValueError) as excinfo:
try:
async with seq(0):
pass # we don't yield until seq(2) below
record.append("child1 raise")
raise ValueError("child1")
except ValueError:
record.append("child1 sleep")
async with seq(2):
pass
assert "child2 wake" in record
record.append("child1 re-raise")
raise
assert excinfo.value.__context__ is None
record.append("child1 success")
async def child2():
with pytest.raises(KeyError) as excinfo:
async with seq(1):
pass # we don't yield until seq(3) below
assert "child1 sleep" in record
record.append("child2 wake")
assert sys.exc_info() == (None, None, None)
try:
raise KeyError("child2")
except KeyError:
record.append("child2 sleep again")
async with seq(3):
pass
assert "child1 re-raise" in record
record.append("child2 re-raise")
raise
assert excinfo.value.__context__ is None
record.append("child2 success")
async with _core.open_nursery() as nursery:
nursery.start_soon(child1)
nursery.start_soon(child2)
assert record == [
"child1 raise", "child1 sleep", "child2 wake", "child2 sleep again",
"child1 re-raise", "child1 success", "child2 re-raise",
"child2 success"
]
# At least as of CPython 3.6, using .throw() to raise an exception inside a
# coroutine/generator causes the original exc_info state to be lost, so things
# like re-raising and exception chaining are broken.
#
# https://bugs.python.org/issue29587
async def test_exc_info_after_yield_error():
child_task = None
async def child():
nonlocal child_task
child_task = _core.current_task()
try:
raise KeyError
except Exception:
try:
await sleep_forever()
except Exception:
pass
raise
with pytest.raises(KeyError):
async with _core.open_nursery() as nursery:
nursery.start_soon(child)
await wait_all_tasks_blocked()
_core.reschedule(child_task, outcome.Error(ValueError()))
# Similar to previous test -- if the ValueError() gets sent in via 'throw',
# then Python's normal implicit chaining stuff is broken.
async def test_exception_chaining_after_yield_error():
child_task = None
async def child():
nonlocal child_task
child_task = _core.current_task()
try:
raise KeyError
except Exception:
await sleep_forever()
with pytest.raises(ValueError) as excinfo:
async with _core.open_nursery() as nursery:
nursery.start_soon(child)
await wait_all_tasks_blocked()
_core.reschedule(child_task, outcome.Error(ValueError()))
assert isinstance(excinfo.value.__context__, KeyError)
async def test_nursery_exception_chaining_doesnt_make_context_loops():
async def crasher():
raise KeyError
with pytest.raises(_core.MultiError) as excinfo:
async with _core.open_nursery() as nursery:
nursery.start_soon(crasher)
raise ValueError
# the MultiError should not have the KeyError or ValueError as context
assert excinfo.value.__context__ is None
def test_TrioToken_identity():
async def get_and_check_token():
token = _core.current_trio_token()
# Two calls in the same run give the same object
assert token is _core.current_trio_token()
return token
t1 = _core.run(get_and_check_token)
t2 = _core.run(get_and_check_token)
assert t1 is not t2
assert t1 != t2
assert hash(t1) != hash(t2)
async def test_TrioToken_run_sync_soon_basic():
record = []
def cb(x):
record.append(("cb", x))
token = _core.current_trio_token()
token.run_sync_soon(cb, 1)
assert not record
await wait_all_tasks_blocked()
assert record == [("cb", 1)]
def test_TrioToken_run_sync_soon_too_late():
token = None
async def main():
nonlocal token
token = _core.current_trio_token()
_core.run(main)
assert token is not None
with pytest.raises(_core.RunFinishedError):
token.run_sync_soon(lambda: None) # pragma: no branch
async def test_TrioToken_run_sync_soon_idempotent():
record = []
def cb(x):
record.append(x)
token = _core.current_trio_token()
token.run_sync_soon(cb, 1)
token.run_sync_soon(cb, 1, idempotent=True)
token.run_sync_soon(cb, 1, idempotent=True)
token.run_sync_soon(cb, 1, idempotent=True)
token.run_sync_soon(cb, 2, idempotent=True)
token.run_sync_soon(cb, 2, idempotent=True)
await wait_all_tasks_blocked()
assert len(record) == 3
assert sorted(record) == [1, 1, 2]
# ordering test
record = []
for _ in range(3):
for i in range(100):
token.run_sync_soon(cb, i, idempotent=True)
await wait_all_tasks_blocked()
# We guarantee FIFO
assert record == list(range(100))
def test_TrioToken_run_sync_soon_idempotent_requeue():
# We guarantee that if a call has finished, queueing it again will call it
# again. Due to the lack of synchronization, this effectively means that
# we have to guarantee that once a call has *started*, queueing it again
# will call it again. Also this is much easier to test :-)
record = []
def redo(token):
record.append(None)
try:
token.run_sync_soon(redo, token, idempotent=True)
except _core.RunFinishedError:
pass
async def main():
token = _core.current_trio_token()
token.run_sync_soon(redo, token, idempotent=True)
await _core.checkpoint()
await _core.checkpoint()
await _core.checkpoint()
_core.run(main)
assert len(record) >= 2
def test_TrioToken_run_sync_soon_after_main_crash():
record = []
async def main():
token = _core.current_trio_token()
# After main exits but before finally cleaning up, callback processed
# normally
token.run_sync_soon(lambda: record.append("sync-cb"))
raise ValueError
with pytest.raises(ValueError):
_core.run(main)
assert record == ["sync-cb"]
def test_TrioToken_run_sync_soon_crashes():
record = set()
async def main():
token = _core.current_trio_token()
token.run_sync_soon(lambda: dict()["nope"])
# check that a crashing run_sync_soon callback doesn't stop further
# calls to run_sync_soon
token.run_sync_soon(lambda: record.add("2nd run_sync_soon ran"))
try:
await sleep_forever()
except _core.Cancelled:
record.add("cancelled!")
with pytest.raises(_core.TrioInternalError) as excinfo:
_core.run(main)
assert type(excinfo.value.__cause__) is KeyError
assert record == {"2nd run_sync_soon ran", "cancelled!"}
async def test_TrioToken_run_sync_soon_FIFO():
N = 100
record = []
token = _core.current_trio_token()
for i in range(N):
token.run_sync_soon(lambda j: record.append(j), i)
await wait_all_tasks_blocked()
assert record == list(range(N))
def test_TrioToken_run_sync_soon_starvation_resistance():
# Even if we push callbacks in from callbacks, so that the callback queue
# never empties out, then we still can't starve out other tasks from
# running.
token = None
record = []
def naughty_cb(i):
nonlocal token
try:
token.run_sync_soon(naughty_cb, i + 1)
except _core.RunFinishedError:
record.append(("run finished", i))
async def main():
nonlocal token
token = _core.current_trio_token()
token.run_sync_soon(naughty_cb, 0)
record.append("starting")
for _ in range(20):
await _core.checkpoint()
_core.run(main)
assert len(record) == 2
assert record[0] == "starting"
assert record[1][0] == "run finished"
assert record[1][1] >= 19
def test_TrioToken_run_sync_soon_threaded_stress_test():
cb_counter = 0
def cb():
nonlocal cb_counter
cb_counter += 1
def stress_thread(token):
try:
while True:
token.run_sync_soon(cb)
time.sleep(0)
except _core.RunFinishedError:
pass
async def main():
token = _core.current_trio_token()
thread = threading.Thread(target=stress_thread, args=(token,))
thread.start()
for _ in range(10):
start_value = cb_counter
while cb_counter == start_value:
await sleep(0.01)
_core.run(main)
print(cb_counter)
async def test_TrioToken_run_sync_soon_massive_queue():
# There are edge cases in the wakeup fd code when the wakeup fd overflows,
# so let's try to make that happen. This is also just a good stress test
# in general. (With the current-as-of-2017-02-14 code using a socketpair
# with minimal buffer, Linux takes 6 wakeups to fill the buffer and macOS
# takes 1 wakeup. So 1000 is overkill if anything. Windows OTOH takes
# ~600,000 wakeups, but has the same code paths...)
COUNT = 1000
token = _core.current_trio_token()
counter = [0]
def cb(i):
# This also tests FIFO ordering of callbacks
assert counter[0] == i
counter[0] += 1
for i in range(COUNT):
token.run_sync_soon(cb, i)
await wait_all_tasks_blocked()
assert counter[0] == COUNT
async def test_slow_abort_basic():
with _core.CancelScope() as scope:
scope.cancel()
with pytest.raises(_core.Cancelled):
task = _core.current_task()
token = _core.current_trio_token()
def slow_abort(raise_cancel):
result = outcome.capture(raise_cancel)
token.run_sync_soon(_core.reschedule, task, result)
return _core.Abort.FAILED
await _core.wait_task_rescheduled(slow_abort)
async def test_slow_abort_edge_cases():
record = []
async def slow_aborter():
task = _core.current_task()
token = _core.current_trio_token()
def slow_abort(raise_cancel):
record.append("abort-called")
result = outcome.capture(raise_cancel)
token.run_sync_soon(_core.reschedule, task, result)
return _core.Abort.FAILED
with pytest.raises(_core.Cancelled):
record.append("sleeping")
await _core.wait_task_rescheduled(slow_abort)
record.append("cancelled")
# blocking again, this time it's okay, because we're shielded
await _core.checkpoint()
record.append("done")
with _core.CancelScope() as outer1:
with _core.CancelScope() as outer2:
async with _core.open_nursery() as nursery:
# So we have a task blocked on an operation that can't be
# aborted immediately
nursery.start_soon(slow_aborter)
await wait_all_tasks_blocked()
assert record == ["sleeping"]
# And then we cancel it, so the abort callback gets run
outer1.cancel()
assert record == ["sleeping", "abort-called"]
# In fact that happens twice! (This used to cause the abort
# callback to be run twice)
outer2.cancel()
assert record == ["sleeping", "abort-called"]
# But then before the abort finishes, the task gets shielded!
nursery.cancel_scope.shield = True
# Now we wait for the task to finish...
# The cancellation was delivered, even though it was shielded
assert record == ["sleeping", "abort-called", "cancelled", "done"]
async def test_task_tree_introspection():
tasks = {}
nurseries = {}
async def parent():
tasks["parent"] = _core.current_task()
assert tasks["parent"].child_nurseries == []
async with _core.open_nursery() as nursery1:
async with _core.open_nursery() as nursery2:
assert tasks["parent"].child_nurseries == [nursery1, nursery2]
assert tasks["parent"].child_nurseries == []
async with _core.open_nursery() as nursery:
nurseries["parent"] = nursery
nursery.start_soon(child1)
# Upward links survive after tasks/nurseries exit
assert nurseries["parent"].parent_task is tasks["parent"]
assert tasks["child1"].parent_nursery is nurseries["parent"]
assert nurseries["child1"].parent_task is tasks["child1"]
assert tasks["child2"].parent_nursery is nurseries["child1"]
nursery = _core.current_task().parent_nursery
# Make sure that chaining eventually gives a nursery of None (and not,
# for example, an error)
while nursery is not None:
t = nursery.parent_task
nursery = t.parent_nursery
async def child2():
tasks["child2"] = _core.current_task()
assert tasks["parent"].child_nurseries == [nurseries["parent"]]
assert nurseries["parent"].child_tasks == frozenset({tasks["child1"]})
assert tasks["child1"].child_nurseries == [nurseries["child1"]]
assert nurseries["child1"].child_tasks == frozenset({tasks["child2"]})
assert tasks["child2"].child_nurseries == []
async def child1():
tasks["child1"] = _core.current_task()
async with _core.open_nursery() as nursery:
nurseries["child1"] = nursery
nursery.start_soon(child2)
async with _core.open_nursery() as nursery:
nursery.start_soon(parent)
async def test_nursery_closure():
async def child1(nursery):
# We can add new tasks to the nursery even after entering __aexit__,
# so long as there are still tasks running
nursery.start_soon(child2)
async def child2():
pass
async with _core.open_nursery() as nursery:
nursery.start_soon(child1, nursery)
# But once we've left __aexit__, the nursery is closed
with pytest.raises(RuntimeError):
nursery.start_soon(child2)
async def test_spawn_name():
async def func1(expected):
task = _core.current_task()
assert expected in task.name
async def func2(): # pragma: no cover
pass
async with _core.open_nursery() as nursery:
for spawn_fn in [nursery.start_soon, _core.spawn_system_task]:
spawn_fn(func1, "func1")
spawn_fn(func1, "func2", name=func2)
spawn_fn(func1, "func3", name="func3")
spawn_fn(functools.partial(func1, "func1"))
spawn_fn(func1, "object", name=object())
async def test_current_effective_deadline(mock_clock):
assert _core.current_effective_deadline() == inf
with _core.CancelScope(deadline=5) as scope1:
with _core.CancelScope(deadline=10) as scope2:
assert _core.current_effective_deadline() == 5
scope2.deadline = 3
assert _core.current_effective_deadline() == 3
scope2.deadline = 10
assert _core.current_effective_deadline() == 5
scope2.shield = True
assert _core.current_effective_deadline() == 10
scope2.shield = False
assert _core.current_effective_deadline() == 5
scope1.cancel()
assert _core.current_effective_deadline() == -inf
scope2.shield = True
assert _core.current_effective_deadline() == 10
assert _core.current_effective_deadline() == -inf
assert _core.current_effective_deadline() == inf
# @coroutine is deprecated since python 3.8, which is fine with us.
@pytest.mark.filterwarnings("ignore:.*@coroutine.*:DeprecationWarning")
def test_nice_error_on_bad_calls_to_run_or_spawn():
def bad_call_run(*args):
_core.run(*args)
def bad_call_spawn(*args):
async def main():
async with _core.open_nursery() as nursery:
nursery.start_soon(*args)
_core.run(main)
class Deferred:
"Just kidding"
with ignore_coroutine_never_awaited_warnings():
for bad_call in bad_call_run, bad_call_spawn:
async def f(): # pragma: no cover
pass
with pytest.raises(TypeError) as excinfo:
bad_call(f())
assert "expecting an async function" in str(excinfo.value)
import asyncio
@asyncio.coroutine
def generator_based_coro(): # pragma: no cover
yield from asyncio.sleep(1)
with pytest.raises(TypeError) as excinfo:
bad_call(generator_based_coro())
assert "asyncio" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
bad_call(asyncio.Future())
assert "asyncio" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
bad_call(lambda: asyncio.Future())
assert "asyncio" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
bad_call(Deferred())
assert "twisted" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
bad_call(lambda: Deferred())
assert "twisted" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
bad_call(len, [1, 2, 3])
assert "appears to be synchronous" in str(excinfo.value)
async def async_gen(arg): # pragma: no cover
yield
with pytest.raises(TypeError) as excinfo:
bad_call(async_gen, 0)
msg = "expected an async function but got an async generator"
assert msg in str(excinfo.value)
# Make sure no references are kept around to keep anything alive
del excinfo
def test_calling_asyncio_function_gives_nice_error():
async def child_xyzzy():
import asyncio
await asyncio.Future()
async def misguided():
await child_xyzzy()
with pytest.raises(TypeError) as excinfo:
_core.run(misguided)
assert "asyncio" in str(excinfo.value)
# The traceback should point to the location of the foreign await
assert any( # pragma: no branch
entry.name == "child_xyzzy" for entry in excinfo.traceback
)
async def test_asyncio_function_inside_nursery_does_not_explode():
# Regression test for https://github.com/python-trio/trio/issues/552
with pytest.raises(TypeError) as excinfo:
async with _core.open_nursery() as nursery:
import asyncio
nursery.start_soon(sleep_forever)
await asyncio.Future()
assert "asyncio" in str(excinfo.value)
async def test_trivial_yields():
with assert_checkpoints():
await _core.checkpoint()
with assert_checkpoints():
await _core.checkpoint_if_cancelled()
await _core.cancel_shielded_checkpoint()
with assert_checkpoints():
async with _core.open_nursery():
pass
with _core.CancelScope() as cancel_scope:
cancel_scope.cancel()
with pytest.raises(_core.MultiError) as excinfo:
async with _core.open_nursery():
raise KeyError
assert len(excinfo.value.exceptions) == 2
assert {type(e)
for e in excinfo.value.exceptions} == {
KeyError, _core.Cancelled
}
async def test_nursery_start(autojump_clock):
async def no_args(): # pragma: no cover
pass
# Errors in calling convention get raised immediately from start
async with _core.open_nursery() as nursery:
with pytest.raises(TypeError):
await nursery.start(no_args)
async def sleep_then_start(
seconds, *, task_status=_core.TASK_STATUS_IGNORED
):
repr(task_status) # smoke test
await sleep(seconds)
task_status.started(seconds)
await sleep(seconds)
# Basic happy-path check: start waits for the task to call started(), then
# returns, passes back the value, and the given nursery then waits for it
# to exit.
for seconds in [1, 2]:
async with _core.open_nursery() as nursery:
assert len(nursery.child_tasks) == 0
t0 = _core.current_time()
assert await nursery.start(sleep_then_start, seconds) == seconds
assert _core.current_time() - t0 == seconds
assert len(nursery.child_tasks) == 1
assert _core.current_time() - t0 == 2 * seconds
# Make sure TASK_STATUS_IGNORED works so task function can be called
# directly
t0 = _core.current_time()
await sleep_then_start(3)
assert _core.current_time() - t0 == 2 * 3
# calling started twice
async def double_started(task_status=_core.TASK_STATUS_IGNORED):
task_status.started()
with pytest.raises(RuntimeError):
task_status.started()
async with _core.open_nursery() as nursery:
await nursery.start(double_started)
# child crashes before calling started -> error comes out of .start()
async def raise_keyerror(task_status=_core.TASK_STATUS_IGNORED):
raise KeyError("oops")
async with _core.open_nursery() as nursery:
with pytest.raises(KeyError):
await nursery.start(raise_keyerror)
# child exiting cleanly before calling started -> triggers a RuntimeError
async def nothing(task_status=_core.TASK_STATUS_IGNORED):
return
async with _core.open_nursery() as nursery:
with pytest.raises(RuntimeError) as excinfo:
await nursery.start(nothing)
assert "exited without calling" in str(excinfo.value)
# if the call to start() is cancelled, then the call to started() does
# nothing -- the child keeps executing under start(). The value it passed
# is ignored; start() raises Cancelled.
async def just_started(task_status=_core.TASK_STATUS_IGNORED):
task_status.started("hi")
async with _core.open_nursery() as nursery:
with _core.CancelScope() as cs:
cs.cancel()
with pytest.raises(_core.Cancelled):
await nursery.start(just_started)
# and if after the no-op started(), the child crashes, the error comes out
# of start()
async def raise_keyerror_after_started(
task_status=_core.TASK_STATUS_IGNORED
):
task_status.started()
raise KeyError("whoopsiedaisy")
async with _core.open_nursery() as nursery:
with _core.CancelScope() as cs:
cs.cancel()
with pytest.raises(_core.MultiError) as excinfo:
await nursery.start(raise_keyerror_after_started)
assert {type(e)
for e in excinfo.value.exceptions} == {_core.Cancelled, KeyError}
# trying to start in a closed nursery raises an error immediately
async with _core.open_nursery() as closed_nursery:
pass
t0 = _core.current_time()
with pytest.raises(RuntimeError):
await closed_nursery.start(sleep_then_start, 7)
assert _core.current_time() == t0
async def test_task_nursery_stack():
task = _core.current_task()
assert task._child_nurseries == []
async with _core.open_nursery() as nursery1:
assert task._child_nurseries == [nursery1]
with pytest.raises(KeyError):
async with _core.open_nursery() as nursery2:
assert task._child_nurseries == [nursery1, nursery2]
raise KeyError
assert task._child_nurseries == [nursery1]
assert task._child_nurseries == []
async def test_nursery_start_with_cancelled_nursery():
# This function isn't testing task_status, it's using task_status as a
# convenient way to get a nursery that we can test spawning stuff into.
async def setup_nursery(task_status=_core.TASK_STATUS_IGNORED):
async with _core.open_nursery() as nursery:
task_status.started(nursery)
await sleep_forever()
# Calls started() while children are asleep, so we can make sure
# that the cancellation machinery notices and aborts when a sleeping task
# is moved into a cancelled scope.
async def sleeping_children(fn, *, task_status=_core.TASK_STATUS_IGNORED):
async with _core.open_nursery() as nursery:
nursery.start_soon(sleep_forever)
nursery.start_soon(sleep_forever)
await wait_all_tasks_blocked()
fn()
task_status.started()
# Cancelling the setup_nursery just *before* calling started()
async with _core.open_nursery() as nursery:
target_nursery = await nursery.start(setup_nursery)
await target_nursery.start(
sleeping_children, target_nursery.cancel_scope.cancel
)
# Cancelling the setup_nursery just *after* calling started()
async with _core.open_nursery() as nursery:
target_nursery = await nursery.start(setup_nursery)
await target_nursery.start(sleeping_children, lambda: None)
target_nursery.cancel_scope.cancel()
async def test_nursery_start_keeps_nursery_open(autojump_clock):
async def sleep_a_bit(task_status=_core.TASK_STATUS_IGNORED):
await sleep(2)
task_status.started()
await sleep(3)
async with _core.open_nursery() as nursery1:
t0 = _core.current_time()
async with _core.open_nursery() as nursery2:
# Start the 'start' call running in the background
nursery1.start_soon(nursery2.start, sleep_a_bit)
# Sleep a bit
await sleep(1)
# Start another one.
nursery1.start_soon(nursery2.start, sleep_a_bit)
# Then exit this nursery. At this point, there are no tasks
# present in this nursery -- the only thing keeping it open is
# that the tasks will be placed into it soon, when they call
# started().
assert _core.current_time() - t0 == 6
# Check that it still works even if the task that the nursery is waiting
# for ends up crashing, and never actually enters the nursery.
async def sleep_then_crash(task_status=_core.TASK_STATUS_IGNORED):
await sleep(7)
raise KeyError
async def start_sleep_then_crash(nursery):
with pytest.raises(KeyError):
await nursery.start(sleep_then_crash)
async with _core.open_nursery() as nursery1:
t0 = _core.current_time()
async with _core.open_nursery() as nursery2:
nursery1.start_soon(start_sleep_then_crash, nursery2)
await wait_all_tasks_blocked()
assert _core.current_time() - t0 == 7
async def test_nursery_explicit_exception():
with pytest.raises(KeyError):
async with _core.open_nursery():
raise KeyError()
async def test_nursery_stop_iteration():
async def fail():
raise ValueError
try:
async with _core.open_nursery() as nursery:
nursery.start_soon(fail)
raise StopIteration
except _core.MultiError as e:
assert tuple(map(type, e.exceptions)) == (StopIteration, ValueError)
async def test_nursery_stop_async_iteration():
class it:
def __init__(self, count):
self.count = count
self.val = 0
async def __anext__(self):
await sleep(0)
val = self.val
if val >= self.count:
raise StopAsyncIteration
self.val += 1
return val
class async_zip:
def __init__(self, *largs):
self.nexts = [obj.__anext__ for obj in largs]
async def _accumulate(self, f, items, i):
items[i] = await f()
def __aiter__(self):
return self
async def __anext__(self):
nexts = self.nexts
items = [
None,
] * len(nexts)
got_stop = False
def handle(exc):
nonlocal got_stop
if isinstance(exc, StopAsyncIteration):
got_stop = True
return None
else: # pragma: no cover
return exc
with _core.MultiError.catch(handle):
async with _core.open_nursery() as nursery:
for i, f in enumerate(nexts):
nursery.start_soon(self._accumulate, f, items, i)
if got_stop:
raise StopAsyncIteration
return items
result = []
async for vals in async_zip(it(4), it(2)):
result.append(vals)
assert result == [[0, 0], [1, 1]]
async def test_traceback_frame_removal():
async def my_child_task():
raise KeyError()
try:
# Trick: For now cancel/nursery scopes still leave a bunch of tb gunk
# behind. But if there's a MultiError, they leave it on the MultiError,
# which lets us get a clean look at the KeyError itself. Someday I
# guess this will always be a MultiError (#611), but for now we can
# force it by raising two exceptions.
async with _core.open_nursery() as nursery:
nursery.start_soon(my_child_task)
nursery.start_soon(my_child_task)
except _core.MultiError as exc:
first_exc = exc.exceptions[0]
assert isinstance(first_exc, KeyError)
# The top frame in the exception traceback should be inside the child
# task, not trio/contextvars internals. And there's only one frame
# inside the child task, so this will also detect if our frame-removal
# is too eager.
frame = first_exc.__traceback__.tb_frame
assert frame.f_code is my_child_task.__code__
def test_contextvar_support():
var = contextvars.ContextVar("test")
var.set("before")
assert var.get() == "before"
async def inner():
task = _core.current_task()
assert task.context.get(var) == "before"
assert var.get() == "before"
var.set("after")
assert var.get() == "after"
assert var in task.context
assert task.context.get(var) == "after"
_core.run(inner)
assert var.get() == "before"
async def test_contextvar_multitask():
var = contextvars.ContextVar("test", default="hmmm")
async def t1():
assert var.get() == "hmmm"
var.set("hmmmm")
assert var.get() == "hmmmm"
async def t2():
assert var.get() == "hmmmm"
async with _core.open_nursery() as n:
n.start_soon(t1)
await wait_all_tasks_blocked()
assert var.get() == "hmmm"
var.set("hmmmm")
n.start_soon(t2)
await wait_all_tasks_blocked()
def test_system_task_contexts():
cvar = contextvars.ContextVar('qwilfish')
cvar.set("water")
async def system_task():
assert cvar.get() == "water"
async def regular_task():
assert cvar.get() == "poison"
async def inner():
async with _core.open_nursery() as nursery:
cvar.set("poison")
nursery.start_soon(regular_task)
_core.spawn_system_task(system_task)
await wait_all_tasks_blocked()
_core.run(inner)
def test_Nursery_init():
with pytest.raises(TypeError):
_core._run.Nursery(None, None)
async def test_Nursery_private_init():
# context manager creation should not raise
async with _core.open_nursery() as nursery:
assert False == nursery._closed
def test_Nursery_subclass():
with pytest.raises(TypeError):
class Subclass(_core._run.Nursery):
pass
def test_Cancelled_init():
with pytest.raises(TypeError):
raise _core.Cancelled
with pytest.raises(TypeError):
_core.Cancelled()
# private constructor should not raise
_core.Cancelled._create()
def test_Cancelled_str():
cancelled = _core.Cancelled._create()
assert str(cancelled) == 'Cancelled'
def test_Cancelled_subclass():
with pytest.raises(TypeError):
class Subclass(_core.Cancelled):
pass
def test_CancelScope_subclass():
with pytest.raises(TypeError):
class Subclass(_core.CancelScope):
pass
def test_sniffio_integration():
with pytest.raises(sniffio.AsyncLibraryNotFoundError):
sniffio.current_async_library()
async def check_inside_trio():
assert sniffio.current_async_library() == "trio"
_core.run(check_inside_trio)
with pytest.raises(sniffio.AsyncLibraryNotFoundError):
sniffio.current_async_library()
async def test_Task_custom_sleep_data():
task = _core.current_task()
assert task.custom_sleep_data is None
task.custom_sleep_data = 1
assert task.custom_sleep_data == 1
await _core.checkpoint()
assert task.custom_sleep_data is None
@types.coroutine
def async_yield(value):
yield value
async def test_permanently_detach_coroutine_object():
task = None
pdco_outcome = None
async def detachable_coroutine(task_outcome, yield_value):
await sleep(0)
nonlocal task, pdco_outcome
task = _core.current_task()
pdco_outcome = await outcome.acapture(
_core.permanently_detach_coroutine_object, task_outcome
)
await async_yield(yield_value)
async with _core.open_nursery() as nursery:
nursery.start_soon(
detachable_coroutine, outcome.Value(None), "I'm free!"
)
# If we get here then Trio thinks the task has exited... but the coroutine
# is still iterable
assert pdco_outcome is None
assert task.coro.send("be free!") == "I'm free!"
assert pdco_outcome == outcome.Value("be free!")
with pytest.raises(StopIteration):
task.coro.send(None)
# Check the exception paths too
task = None
pdco_outcome = None
with pytest.raises(KeyError):
async with _core.open_nursery() as nursery:
nursery.start_soon(
detachable_coroutine, outcome.Error(KeyError()), "uh oh"
)
throw_in = ValueError()
assert task.coro.throw(throw_in) == "uh oh"
assert pdco_outcome == outcome.Error(throw_in)
with pytest.raises(StopIteration):
task.coro.send(None)
async def bad_detach():
async with _core.open_nursery():
with pytest.raises(RuntimeError) as excinfo:
await _core.permanently_detach_coroutine_object(
outcome.Value(None)
)
assert "open nurser" in str(excinfo.value)
async with _core.open_nursery() as nursery:
nursery.start_soon(bad_detach)
async def test_detach_and_reattach_coroutine_object():
unrelated_task = None
task = None
async def unrelated_coroutine():
nonlocal unrelated_task
unrelated_task = _core.current_task()
async def reattachable_coroutine():
await sleep(0)
nonlocal task
task = _core.current_task()
def abort_fn(_): # pragma: no cover
return _core.Abort.FAILED
got = await _core.temporarily_detach_coroutine_object(abort_fn)
assert got == "not trio!"
await async_yield(1)
await async_yield(2)
with pytest.raises(RuntimeError) as excinfo:
await _core.reattach_detached_coroutine_object(
unrelated_task, None
)
assert "does not match" in str(excinfo.value)
await _core.reattach_detached_coroutine_object(task, "byebye")
await sleep(0)
async with _core.open_nursery() as nursery:
nursery.start_soon(unrelated_coroutine)
nursery.start_soon(reattachable_coroutine)
await wait_all_tasks_blocked()
assert unrelated_task is not None
assert task is not None
# Okay, it's detached. Here's our coroutine runner:
assert task.coro.send("not trio!") == 1
assert task.coro.send(None) == 2
assert task.coro.send(None) == "byebye"
# Now it's been reattached, and we can leave the nursery
async def test_detached_coroutine_cancellation():
abort_fn_called = False
task = None
async def reattachable_coroutine():
await sleep(0)
nonlocal task
task = _core.current_task()
def abort_fn(_):
nonlocal abort_fn_called
abort_fn_called = True
return _core.Abort.FAILED
await _core.temporarily_detach_coroutine_object(abort_fn)
await _core.reattach_detached_coroutine_object(task, None)
with pytest.raises(_core.Cancelled):
await sleep(0)
async with _core.open_nursery() as nursery:
nursery.start_soon(reattachable_coroutine)
await wait_all_tasks_blocked()
assert task is not None
nursery.cancel_scope.cancel()
task.coro.send(None)
assert abort_fn_called
def test_async_function_implemented_in_C():
# These used to crash because we'd try to mutate the coroutine object's
# cr_frame, but C functions don't have Python frames.
async def agen_fn(record):
assert not _core.currently_ki_protected()
record.append("the generator ran")
yield
run_record = []
agen = agen_fn(run_record)
_core.run(agen.__anext__)
assert run_record == ["the generator ran"]
async def main():
start_soon_record = []
agen = agen_fn(start_soon_record)
async with _core.open_nursery() as nursery:
nursery.start_soon(agen.__anext__)
assert start_soon_record == ["the generator ran"]
_core.run(main)
async def test_very_deep_cancel_scope_nesting():
# This used to crash with a RecursionError in CancelStatus.recalculate
with ExitStack() as exit_stack:
outermost_scope = _core.CancelScope()
exit_stack.enter_context(outermost_scope)
for _ in range(5000):
exit_stack.enter_context(_core.CancelScope())
outermost_scope.cancel()
|
lisp-etr.py | # -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-etr.py
#
# This file performs LISP Egress Tunnel Router (ETR) functionality.
#
# -----------------------------------------------------------------------------
import lisp
import lispconfig
import socket
import select
import threading
import time
import pcappy
import struct
import commands
import os
try:
import pytun
except:
pytun = None
#endtry
igmp_types = { 17 : "IGMP-query", 18 : "IGMPv1-report", 19 : "DVMRP",
20 : "PIMv1", 22 : "IGMPv2-report", 23 : "IGMPv2-leave",
30 : "mtrace-response", 31 : "mtrace-request", 34 : "IGMPv3-report" }
#------------------------------------------------------------------------------
#
# Global data structures relative to the lisp-etr process.
#
lisp_register_timer = None
lisp_trigger_register_timer = None
lisp_etr_info_timer = None
lisp_ephem_socket = None
lisp_ephem_port = lisp.lisp_get_ephemeral_port()
lisp_ipc_listen_socket = None
lisp_send_sockets = [None, None, None]
lisp_raw_socket = None
lisp_l2_socket = None
lisp_mac_header = None
LISP_MAP_REGISTER_INTERVAL = 60 # In units of seconds
#------------------------------------------------------------------------------
#
# lisp_etr_database_mapping_command
#
# This function supports adding additional RLOCs to a database-mapping entry
# that already exists.
#
def lisp_etr_database_mapping_command(kv_pair):
global lisp_trigger_register_timer
global lisp_send_sockets
lispconfig.lisp_database_mapping_command(kv_pair, lisp_ephem_port)
#
# Trigger Map-Register when all databaase-mappings are configured.
#
# Do not trigger Map-Register if NAT-traveral is configured. We may not
# have the global RLOC yet from Info-Replies. When the Info-Reply comes
# in we do trigger Map-Registers to all map-servers.
#
if (lisp.lisp_nat_traversal): return
if (lisp_trigger_register_timer != None and
lisp_trigger_register_timer.is_alive()): return
if (len(lisp.lisp_map_servers_list) > 0):
lisp_trigger_register_timer = threading.Timer(5,
lisp_process_register_timer, [lisp_send_sockets])
lisp_trigger_register_timer.start()
#endif
#enddef
#
# lisp_etr_show_command
#
# Show ETR configured map-servers and database-mappings.
#
def lisp_etr_show_command(clause):
#
# Show local found RLOCs.
#
output = lispconfig.lisp_show_myrlocs("")
#
# Show decapsulation stats.
#
output = lispconfig.lisp_show_decap_stats(output, "ETR")
#
# Show configured map-servers.
#
dns_suffix = lisp.lisp_decent_dns_suffix
if (dns_suffix == None):
dns_suffix = ":"
else:
dns_suffix = " (dns-suffix '{}'):".format(dns_suffix)
#endif
hover = "{} configured map-servers".format(len(lisp.lisp_map_servers_list))
title = "LISP-ETR Configured Map-Servers{}".format(dns_suffix)
title = lisp.lisp_span(title, hover)
hover = ("P = proxy-reply requested, M = merge-registrations " + \
"requested, N = Map-Notify requested")
reg_title = lisp.lisp_span("Registration<br>flags", hover)
output += lispconfig.lisp_table_header(title, "Address", "Auth-Type",
"xTR-ID", "Site-ID", reg_title, "Map-Registers<br>Sent",
"Map-Notifies<br>Received")
for ms in lisp.lisp_map_servers_list.values():
ms.resolve_dns_name()
ms_name = "" if ms.ms_name == "all" else ms.ms_name + "<br>"
addr_str = ms_name + ms.map_server.print_address_no_iid()
if (ms.dns_name): addr_str += "<br>" + ms.dns_name
xtr_id = "0x" + lisp.lisp_hex_string(ms.xtr_id)
flags = "{}-{}-{}-{}".format("P" if ms.proxy_reply else "p",
"M" if ms.merge_registrations else "m",
"N" if ms.want_map_notify else "n",
"R" if ms.refresh_registrations else "r")
registers_sent = ms.map_registers_sent + \
ms.map_registers_multicast_sent
output += lispconfig.lisp_table_row(addr_str,
"sha1" if (ms.alg_id == lisp.LISP_SHA_1_96_ALG_ID) else "sha2",
xtr_id, ms.site_id, flags, registers_sent,
ms.map_notifies_received)
#endfor
output += lispconfig.lisp_table_footer()
#
# Show database-mappings configured.
#
output = lispconfig.lisp_show_db_list("ETR", output)
#
# Show ELP configuration, if it exists.
#
if (len(lisp.lisp_elp_list) != 0):
output = lispconfig.lisp_show_elp_list(output)
#endif
#
# Show RLE configuration, if it exists.
#
if (len(lisp.lisp_rle_list) != 0):
output = lispconfig.lisp_show_rle_list(output)
#endif
#
# Show JSON configuration, if it exists.
#
if (len(lisp.lisp_json_list) != 0):
output = lispconfig.lisp_show_json_list(output)
#endif
#
# Show group-mappings, if they exist.
#
if (len(lisp.lisp_group_mapping_list) != 0):
title = "Configured Group Mappings:"
output += lispconfig.lisp_table_header(title, "Name", "Group Prefix",
"Sources", "Use MS")
for gm in lisp.lisp_group_mapping_list.values():
sources = ""
for s in gm.sources: sources += s + ", "
if (sources == ""):
sources = "*"
else:
sources = sources[0:-2]
#endif
output += lispconfig.lisp_table_row(gm.group_name,
gm.group_prefix.print_prefix(), sources, gm.use_ms_name)
#endfor
output += lispconfig.lisp_table_footer()
#endif
return(output)
#enddef
#
# lisp_etr_show_keys_command
#
# Call lispconfig.lisp_show_crypto_list().
#
def lisp_etr_show_keys_command(parameter):
return(lispconfig.lisp_show_crypto_list("ETR"))
#enddef
#
# lisp_map_server_command
#
# Store configured map-servers.
#
def lisp_map_server_command(kv_pairs):
global lisp_trigger_register_timer
global lisp_etr_info_timer
addresses = []
dns_names = []
key_id = 0
alg_id = 0
password = ""
proxy_reply = False
merge = False
refresh = False
want = False
site_id = 0
ms_name = None
ekey_id = 0
ekey = None
for kw in kv_pairs.keys():
value = kv_pairs[kw]
if (kw == "ms-name"):
ms_name = value[0]
#endif
if (kw == "address"):
for i in range(len(value)):
addresses.append(value[i])
#endfor
#endif
if (kw == "dns-name"):
for i in range(len(value)):
dns_names.append(value[i])
#endfor
#endif
if (kw == "authentication-type"):
alg_id = lisp.LISP_SHA_1_96_ALG_ID if (value == "sha1") else \
lisp.LISP_SHA_256_128_ALG_ID if (value == "sha2") else ""
#endif
if (kw == "authentication-key"):
if (alg_id == 0): alg_id = lisp.LISP_SHA_256_128_ALG_ID
auth_key = lisp.lisp_parse_auth_key(value)
key_id = auth_key.keys()[0]
password = auth_key[key_id]
#endif
if (kw == "proxy-reply"):
proxy_reply = True if value == "yes" else False
#endif
if (kw == "merge-registrations"):
merge = True if value == "yes" else False
#endif
if (kw == "refresh-registrations"):
refresh = True if value == "yes" else False
#endif
if (kw == "want-map-notify"):
want = True if value == "yes" else False
#endif
if (kw == "site-id"):
site_id = int(value)
#endif
if (kw == "encryption-key"):
ekey = lisp.lisp_parse_auth_key(value)
ekey_id = ekey.keys()[0]
ekey = ekey[ekey_id]
#Endif
#endfor
#
# Store internal data structure.
#
ms = None
for addr_str in addresses:
if (addr_str == ""): continue
ms = lisp.lisp_ms(addr_str, None, ms_name, alg_id, key_id, password,
proxy_reply, merge, refresh, want, site_id, ekey_id, ekey)
#endfor
for name in dns_names:
if (name == ""): continue
ms = lisp.lisp_ms(None, name, ms_name, alg_id, key_id, password,
proxy_reply, merge, refresh, want, site_id, ekey_id, ekey)
#endfor
#
# Trigger a Info-Request if we are doing NAT-traversal if this is the
# first Map-Server..
#
first_ms = (len(lisp.lisp_map_servers_list) == 1)
if (first_ms):
ms = lisp.lisp_map_servers_list.values()[0]
lisp_etr_info_timer = threading.Timer(2, lisp_etr_process_info_timer,
[ms.map_server])
lisp_etr_info_timer.start()
else:
#
# Trigger Map-Register to newly configured Map-Server.
#
# Do not trigger Map-Register if NAT-traveral is configured. We may not
# have the global RLOC yet from Info-Replies. When the Info-Reply comes
# in we do trigger Map-Registers to all map-servers.
#
if (lisp.lisp_nat_traversal): return
if (ms and len(lisp.lisp_db_list) > 0):
lisp_build_map_register(lisp_send_sockets, None, None, ms, False)
#endif
#endif
#
# Handle case where "lisp database-mapping" comes before "lisp map-server"
# in configuration file. We have to start periodic timer.
#
if (len(lisp.lisp_db_list) > 0):
if (lisp_trigger_register_timer != None and
lisp_trigger_register_timer.is_alive()): return
lisp_trigger_register_timer = threading.Timer(5,
lisp_process_register_timer, [lisp_send_sockets])
lisp_trigger_register_timer.start()
#endif
return
#enddef
#
# lisp_group_mapping_command
#
# Process the "lisp group-mapping" command clause.
#
def lisp_group_mapping_command(kv_pairs):
sources = []
group_prefix = None
rle_address = None
ms_name = "all"
for kw in kv_pairs.keys():
value = kv_pairs[kw]
if (kw == "group-name"):
group_name = value
#endif
if (kw == "group-prefix"):
if (group_prefix == None):
group_prefix = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
#endif
group_prefix.store_prefix(value)
#endif
if (kw == "instance-id"):
if (group_prefix == None):
group_prefix = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
#endif
group_prefix.instance_id = int(value)
#endif
if (kw == "ms-name"):
ms_name = value[0]
#endif
if (kw == "address"):
for source in value:
if (source != ""): sources.append(source)
#endfor
#endif
if (kw == "rle-address"):
if (rle_address == None):
rle_address = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
#endif
rle_address.store_address(value)
#endif
#endfor
gm = lisp.lisp_group_mapping(group_name, ms_name, group_prefix, sources,
rle_address)
gm.add_group()
return
#enddef
#
# lisp_build_map_register_records
#
# Build EID and RLOC records to be inserted in a Map-Register message.
#
def lisp_build_map_register_records(quiet, db, eid, group, ttl):
#
# Don't include RTR-list if there is no NAT in the path but nat-traversal
# is configured and NAT in path is tested. When there is a NAT, include
# all RTRs if lisp_register_all_rtrs is configured. Otherwise, if the
# array element is None, then the RTR is down and should be excluded in
# the list to register.
#
rtr_list = {}
for rloc_entry in db.rloc_set:
if (rloc_entry.translated_rloc.is_null()): continue
for rtr_str in lisp.lisp_rtr_list:
rtr = lisp.lisp_rtr_list[rtr_str]
if (lisp.lisp_register_all_rtrs == False and rtr == None):
lisp.lprint(" Exclude unreachable RTR {}".format( \
lisp.red(rtr_str, False)))
continue
#endif
if (rtr == None): continue
rtr_list[rtr_str] = rtr
#endif
break
#endfor
count = 0
eid_records = ""
for iid in [eid.instance_id] + eid.iid_list:
eid_record = lisp.lisp_eid_record()
eid_record.rloc_count = len(db.rloc_set) + len(rtr_list)
eid_record.authoritative = True
eid_record.record_ttl = ttl
eid_record.eid.copy_address(eid)
eid_record.eid.instance_id = iid
eid_record.eid.iid_list = []
eid_record.group.copy_address(group)
eid_records += eid_record.encode()
if (not quiet):
prefix_str = lisp.lisp_print_eid_tuple(eid, group)
decent_index = ""
if (lisp.lisp_decent_pull_xtr_configured()):
decent_index = lisp.lisp_get_decent_index(eid)
decent_index = lisp.bold(str(decent_index), False)
decent_index = ", decent-index {}".format(decent_index)
#endif
lisp.lprint(" EID-prefix {} for ms-name '{}'{}".format( \
lisp.green(prefix_str, False), db.use_ms_name, decent_index))
eid_record.print_record(" ", False)
#endif
for rloc_entry in db.rloc_set:
rloc_record = lisp.lisp_rloc_record()
rloc_record.store_rloc_entry(rloc_entry)
rloc_record.local_bit = rloc_entry.rloc.is_local()
rloc_record.reach_bit = True
eid_records += rloc_record.encode()
if (not quiet): rloc_record.print_record(" ")
#endfor
#
# If we are doing NAT-traversal, include a set or RTR RLOCs with
# priority 1. And set the global RLOCs to priority 254.
#
for rtr in rtr_list.values():
rloc_record = lisp.lisp_rloc_record()
rloc_record.rloc.copy_address(rtr)
rloc_record.priority = 254
rloc_record.rloc_name = "RTR"
rloc_record.weight = 0
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.local_bit = False
rloc_record.reach_bit = True
eid_records += rloc_record.encode()
if (not quiet): rloc_record.print_record(" RTR ")
#endfor
#
# Return to caller number of EID records written to returned buffer.
#
count += 1
#endfor
return(eid_records, count)
#enddef
#
# lisp_build_map_register
#
# From each configured "database-mapping" command, register mappings to
# configured map-servers.
#
def lisp_build_map_register(lisp_sockets, ttl, eid_only, ms_only, refresh):
#
# No database-mapping entries.
#
if (eid_only != None):
db_list_len = 1
else:
db_list_len = lisp.lisp_db_list_length()
if (db_list_len == 0): return
#endif
lisp.lprint("Build Map-Register for {} database-mapping entries". \
format(db_list_len))
#
# Set boolean if "decentralized-pull-xtr-[modulus,dns-suffix]" configured.
#
decent = lisp.lisp_decent_pull_xtr_configured()
#
# Go quiet with debug output when there are a lot of EID-records.
#
quiet = (db_list_len > 12)
ms_list = {}
if (decent):
#
# If "decentralized-pull-xtr-[modulus,dns-suffix]" is configured,
# decide which map-server this EID belongs too (and is registered with.
#
for db in lisp.lisp_db_list:
eid = db.eid if db.group.is_null() else db.group
dns_name = lisp.lisp_get_decent_dns_name(eid)
ms_list[dns_name] = []
#endfor
else:
#
# Set up each map-server names so we can decide which EID-prefixes go
# to which map-servers. [0] is eid_records and [1] is count.
#
for ms in lisp.lisp_map_servers_list.values():
if (ms_only != None and ms != ms_only): continue
ms_list[ms.ms_name] = []
#endfor
#endif
#
# Create data structure instances to build Map-Regiser message.
#
map_register = lisp.lisp_map_register()
map_register.nonce = 0xaabbccdddfdfdf00
map_register.xtr_id_present = True
if (ttl == None): ttl = lisp.LISP_REGISTER_TTL
#
# Traverse the databas-mapping associative array.
#
for db in lisp.lisp_db_list:
if (decent):
ms_dns_name = lisp.lisp_get_decent_dns_name(db.eid)
else:
ms_dns_name = db.use_ms_name
#endif
#
# Is db entry associated with a map-server name that is not
# configured?
#
if (ms_list.has_key(ms_dns_name) == False): continue
msl = ms_list[ms_dns_name]
if (msl == []):
msl = ["", 0]
ms_list[ms_dns_name].append(msl)
else:
msl = ms_list[ms_dns_name][-1]
#endif
#
# If dynamic-EIDs are discovered, add each of them to EID-records,
# unless, we are doing a trigger in which case a single dynamic-EID
# is built into an EID-record.
#
# Otherwise, add static EID-prefixes into EID-records, unless a single
# one is triggered.
#
eid_records = ""
if (db.dynamic_eid_configured()):
for dyn_eid in db.dynamic_eids.values():
eid = dyn_eid.dynamic_eid
if (eid_only == None or eid_only.is_exact_match(eid)):
records, count = lisp_build_map_register_records(quiet, db,
eid, db.group, ttl)
eid_records += records
msl[1] += count
#endif
#endfor
else:
if (eid_only == None):
eid_records, count = lisp_build_map_register_records(quiet, db,
db.eid, db.group, ttl)
msl[1] += count
#endif
#endif
#
# Add EID-records to correct map-server name set.
#
msl[0] += eid_records
if (msl[1] == 20):
msl = ["", 0]
ms_list[ms_dns_name].append(msl)
#endif
#endfor
#
# Send Map-Register to each configured map-server.
#
for ms in lisp.lisp_map_servers_list.values():
if (ms_only != None and ms != ms_only): continue
ms_dns_name = ms.dns_name if decent else ms.ms_name
if (ms_list.has_key(ms_dns_name) == False): continue
for msl in ms_list[ms_dns_name]:
#
# Build map-server specific fields.
#
map_register.record_count = msl[1]
if (map_register.record_count == 0): continue
map_register.nonce += 1
map_register.alg_id = ms.alg_id
map_register.key_id = ms.key_id
map_register.proxy_reply_requested = ms.proxy_reply
map_register.merge_register_requested = ms.merge_registrations
map_register.map_notify_requested = ms.want_map_notify
map_register.xtr_id = ms.xtr_id
map_register.site_id = ms.site_id
map_register.encrypt_bit = (ms.ekey != None)
if (ms.refresh_registrations):
map_register.map_register_refresh = refresh
#endif
if (ms.ekey != None): map_register.encryption_key_id = ms.ekey_id
packet = map_register.encode()
map_register.print_map_register()
#
# Append EID-records and encode xtr-ID and site-ID at end of
# Map-Register.
#
trailer = map_register.encode_xtr_id("")
eid_records = msl[0]
packet = packet + eid_records + trailer
ms.map_registers_sent += 1
lisp.lisp_send_map_register(lisp_sockets, packet, map_register, ms)
time.sleep(.001)
#endfor
#
# Do DNS lookup for Map-Server if "dns-name" configured.
#
ms.resolve_dns_name()
#
# Exit loop if we are triggering a Map-Register to a single
# Map-Server.
#
if (ms_only != None and ms == ms_only): break
#endfor
return
#enddef
#
# lisp_etr_process_info_timer
#
# Time to send a periodic Info-Request message. This must be done less often
# then sending periodic Map-Registers as well as less the the NAT timeout
# value which is usually one minute.
#
def lisp_etr_process_info_timer(ms):
global lisp_etr_info_timer
global lisp_ephem_socket
lisp.lisp_set_exception()
#
# Build Info-Request messages if we have any private RLOCs in database-
# mappings.
#
sockets = [lisp_ephem_socket, lisp_ephem_socket, lisp_ipc_listen_socket]
lisp.lisp_build_info_requests(sockets, ms, lisp.LISP_CTRL_PORT)
#
# Build Info-Request for RTRs so we can open up NAT state so RTRs
# can encapsulate to us when ETR is behind NAT.
#
allow_private = (os.getenv("LISP_RTR_BEHIND_NAT") == None)
for rtr in lisp.lisp_rtr_list.values():
if (rtr == None): continue
if (rtr.is_private_address() and allow_private == False):
r = lisp.red(rtr.print_address_no_iid(), False)
lisp.lprint("Skip over RTR private address {}".format(r))
continue
#endif
lisp.lisp_build_info_requests(sockets, rtr, lisp.LISP_DATA_PORT)
#endfor
#
# Restart periodic timer. For some reason only this timer has to be
# canceled. Found on while testing NAT-traversal on rasp-pi in Jul 2015.
#
lisp_etr_info_timer.cancel()
lisp_etr_info_timer = threading.Timer(lisp.LISP_INFO_INTERVAL,
lisp_etr_process_info_timer, [None])
lisp_etr_info_timer.start()
return
#enddef
#
# lisp_process_register_timer
#
# Time to send a periodic Map-Register.
#
def lisp_process_register_timer(lisp_sockets):
global lisp_register_timer
global lisp_ephem_socket
lisp.lisp_set_exception()
#
# Build and send Map-Register.
#
lisp_build_map_register(lisp_sockets, None, None, None, True)
#
# If we are are doing L2-overlays, then register as a join of the
# broadcast MAC address.
#
if (lisp.lisp_l2_overlay):
entry = [ None, "ffff-ffff-ffff", True ]
lisp_send_multicast_map_register(lisp_sockets, [entry])
#endif
#
# Restart periodic timer.
#
if (lisp_register_timer): lisp_register_timer.cancel()
lisp_register_timer = threading.Timer(LISP_MAP_REGISTER_INTERVAL,
lisp_process_register_timer, [lisp_send_sockets])
lisp_register_timer.start()
return
#enddef
#
# lisp_is_group_more_specific
#
# Take group address in string format and see if it is more specific than
# the group-prefix in class lisp_group_mapping(). If more specific, return
# mask-length, otherwise return -1.
#
def lisp_is_group_more_specific(group_str, group_mapping):
iid = group_mapping.group_prefix.instance_id
mask_len = group_mapping.group_prefix.mask_len
group = lisp.lisp_address(lisp.LISP_AFI_IPV4, group_str, 32, iid)
if (group.is_more_specific(group_mapping.group_prefix)): return(mask_len)
return(-1)
#enddef
#
# lisp_send_multicast_map_register
#
# Build a Map-Register message with a Multicast Info Type LCAF as an EID-record
# for each entry in the 'entries' array. And build an RLOC-record as an RLE
# describing this ETR as the RLOC to be used for replication.
#
# The entries is an array of (source, group, joinleave) tuples.
#
def lisp_send_multicast_map_register(lisp_sockets, entries):
length = len(entries)
if (length == 0): return
afi = None
if (entries[0][1].find(":") != -1): afi = lisp.LISP_AFI_IPV6
if (entries[0][1].find(".") != -1): afi = lisp.LISP_AFI_IPV4
if (entries[0][1].find("-") != -1): afi = lisp.LISP_AFI_MAC
if (afi == None):
lisp.lprint("lisp_send_multicast_map_register() invalid group address")
return
#endif
#
# Find all (*,G) entries in entries array and replace with (S,G) entries
# from lisp_group_mapping_list.
#
g_entries = []
for source, group, joinleave in entries:
if (source != None): continue
g_entries.append([group, joinleave])
#endfor
decent = lisp.lisp_decent_pull_xtr_configured()
ms_list = {}
entries = []
for group, joinleave in g_entries:
ms_gm = None
for gm in lisp.lisp_group_mapping_list.values():
mask_len = lisp_is_group_more_specific(group, gm)
if (mask_len == -1): continue
if (ms_gm == None or mask_len > ms_gm.group_prefix.mask_len):
ms_gm = gm
#endif
#endfor
if (ms_gm == None):
lisp.lprint("No group-mapping for {}, could be underlay group". \
format(group))
continue
#endif
lisp.lprint("Use group-mapping '{}' {} for group {}".format( \
ms_gm.group_name, ms_gm.group_prefix.print_prefix(), group))
iid = ms_gm.group_prefix.instance_id
ms_name = ms_gm.use_ms_name
rle = ms_gm.rle_address
#
# To obtain decent-index for a group address, just use group address
# and no source as part of hash. Because an ITR does not know if (*,G)
# or (S,G) is registered with the mapping system
#
key = ms_name
if (decent):
key = lisp.lisp_get_decent_dns_name_from_str(iid, group)
ms_list[key] = ["", 0]
#endif
if (len(ms_gm.sources) == 0):
entries.append(["0.0.0.0", group, iid, key, rle, joinleave])
continue
#endif
for s in ms_gm.sources:
ms_list[key] = ["", 0]
entries.append([s, group, iid, key, rle, joinleave])
#endfor
#endfor
length = len(entries)
if (length == 0): return
lisp.lprint("Build Map-Register for {} multicast entries".format(length))
#
# Build RLE node for RLOC-record encoding. If behind a NAT, we need to
# insert a global address as the RLE node address. We will do that in
# the entries for loop.
#
rle_node = lisp.lisp_rle_node()
rle_node.level = 128
translated_rloc = lisp.lisp_get_any_translated_rloc()
rle = lisp.lisp_rle("")
rle.rle_nodes.append(rle_node)
#
# Set up each map-server names so we can decide which EID-prefixes go
# to which map-servers. [0] is eid_records and [1] is count. The ms_list
# is already setup for when pull-based decent is used.
#
if (decent == False):
for ms in lisp.lisp_map_servers_list.values():
ms_list[ms.ms_name] = ["", 0]
#endfor
#endif
rloc_name = None
if (lisp.lisp_nat_traversal): rloc_name = lisp.lisp_hostname
#
# Count number of RTRs reachable so we know allocation count.
#
rtr_count = 0
for rtr in lisp.lisp_rtr_list.values():
if (rtr == None): continue
rtr_count += 1
#endfor
#
# Run through multicast entry array.
#
eid_records = ""
for source, group, iid, ms_dns_name, rle_addr, joinleave in entries:
#
# Is db entry associated with a map-server name that is not configured?
#
if (ms_list.has_key(ms_dns_name) == False): continue
eid_record = lisp.lisp_eid_record()
eid_record.rloc_count = 1 + rtr_count
eid_record.authoritative = True
eid_record.record_ttl = lisp.LISP_REGISTER_TTL if joinleave else 0
eid_record.eid = lisp.lisp_address(afi, source, 0, iid)
if (eid_record.eid.address == 0): eid_record.eid.mask_len = 0
eid_record.group = lisp.lisp_address(afi, group, 0, iid)
if (eid_record.group.is_mac_broadcast() and \
eid_record.eid.address == 0): eid_record.eid.mask_len = 0
decent_index = ""
ms_name = ""
if (lisp.lisp_decent_pull_xtr_configured()):
decent_index = lisp.lisp_get_decent_index(eid_record.group)
decent_index = lisp.bold(str(decent_index), False)
decent_index = "with decent-index {}".format(decent_index)
else:
decent_index = "for ms-name '{}'".format(ms_dns_name)
#endif
eid_str = lisp.green(eid_record.print_eid_tuple(), False)
lisp.lprint(" EID-prefix {} {}{}".format(eid_str, ms_name,
decent_index))
eid_records += eid_record.encode()
eid_record.print_record(" ", False)
ms_list[ms_dns_name][1] += 1
#
# Build our RLOC entry.
#
rloc_record = lisp.lisp_rloc_record()
rloc_record.rloc_name = rloc_name
#
# Decide on RLE address. Have NAT-traversal take precedent, otherwise
# use configured RLE in group-mapping. If one wasn't configured use
# lisp_myrlocs IPv4 address.
#
if (translated_rloc != None):
rle_node.address = translated_rloc
elif (rle_addr != None):
rle_node.address = rle_addr
else:
rle_node.address = rle_addr = lisp.lisp_myrlocs[0]
#endif
rloc_record.rle = rle
rloc_record.local_bit = True
rloc_record.reach_bit = True
rloc_record.priority = 255
rloc_record.weight = 0
rloc_record.mpriority = 1
rloc_record.mweight = 100
eid_records += rloc_record.encode()
rloc_record.print_record(" ")
#
# If we are doing NAT-traversal, include a set or RTR RLOCs with
# priority 1. And set the global RLOCs to priority 254.
#
for rtr in lisp.lisp_rtr_list.values():
if (rtr == None): continue
rloc_record = lisp.lisp_rloc_record()
rloc_record.rloc.copy_address(rtr)
rloc_record.priority = 254
rloc_record.rloc_name = "RTR"
rloc_record.weight = 0
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.local_bit = False
rloc_record.reach_bit = True
eid_records += rloc_record.encode()
rloc_record.print_record(" RTR ")
#endfor
#
# Add EID-records to correct map-server name set.
#
ms_list[ms_dns_name][0] += eid_records
#endfor
#
# Build map-server independent fields.
#
map_register = lisp.lisp_map_register()
map_register.nonce = 0xaabbccdddfdfdf00
map_register.xtr_id_present = True
map_register.proxy_reply_requested = True
map_register.map_notify_requested = False
map_register.merge_register_requested = True
#
# Send Map-Register to each configured map-server.
#
for ms in lisp.lisp_map_servers_list.values():
key = ms.dns_name if decent else ms.ms_name
#
# Get EID-records from correct map-server name set.
#
if (ms_list.has_key(key) == False): continue
#
# Build map-server specific fields.
#
map_register.record_count = ms_list[key][1]
if (map_register.record_count == 0): continue
map_register.nonce += 1
map_register.alg_id = ms.alg_id
map_register.alg_id = ms.key_id
map_register.xtr_id = ms.xtr_id
map_register.site_id = ms.site_id
map_register.encrypt_bit = (ms.ekey != None)
packet = map_register.encode()
map_register.print_map_register()
#
# Append EID-records and encode xtr-ID and site-ID at end of
# Map-Register.
#
trailer = map_register.encode_xtr_id("")
packet = packet + eid_records + trailer
ms.map_registers_multicast_sent += 1
lisp.lisp_send_map_register(lisp_sockets, packet, map_register, ms)
#
# Do DNS lookup for Map-Server if "dns-name" configured.
#
ms.resolve_dns_name()
#
# Go build more EID-records.
#
time.sleep(.001)
#endfor
return
#enddef
#
# IGMP record types.
#
lisp_igmp_record_types = { 1 : "include-mode", 2 : "exclude-mode",
3 : "change-to-include", 4 : "change-to-exclude", 5 : "allow-new-source",
6 : "block-old-sources" }
#
# lisp_process_igmp_packet
#
# Process IGMP packets.
#
# Basically odd types are Joins and even types are Leaves.
#
#
# An IGMPv1 and IGMPv2 report format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Version| Type | Unused | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Group Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# An IGMPv3 report format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 0x22 | Reserved | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | Number of Group Records (M) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Group Record [1] .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Group Record [2] .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . |
# . . .
# | . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Group Record [M] .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# An IGMPv3 group record format is:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record Type | Aux Data Len | Number of Sources (N) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Multicast Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source Address [1] |
# +- -+
# | Source Address [2] |
# +- -+
# . . .
# . . .
# . . .
# +- -+
# | Source Address [N] |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Auxiliary Data .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lisp_process_igmp_packet(packet):
global lisp_send_sockets
r = lisp.bold("Receive", False)
lisp.lprint("{} {}-byte IGMP packet: {}".format(r, len(packet),
lisp.lisp_format_packet(packet)))
#
# Jump over IP header.
#
header_offset = (struct.unpack("B", packet[0])[0] & 0x0f) * 4
#
# Check for IGMPv3 type value 0x22. Or process an IGMPv2 report.
#
igmp = packet[header_offset::]
igmp_type = struct.unpack("B", igmp[0])[0]
group = lisp.lisp_address(lisp.LISP_AFI_IPV4, "", 32, 0)
reports_and_leaves_only = (igmp_type in (0x12, 0x16, 0x17, 0x22))
if (reports_and_leaves_only == False):
igmp_str = "{} ({})".format(igmp_type, igmp_types[igmp_type]) if \
igmp_types.has_key(igmp_type) else igmp_type
lisp.lprint("IGMP type {} not supported".format(igmp_str))
return
#endif
if (len(igmp) < 8):
lisp.lprint("IGMP message too small")
return
#endif
#
# Maybe this is an IGMPv1 or IGMPv2 message so get group address. If
# IGMPv3, we will fix up group address in loop (for each group record).
#
group.address = socket.ntohl(struct.unpack("II", igmp[:8])[1])
group_str = group.print_address_no_iid()
#
# Process either IGMPv1 or IGMPv2 and exit.
#
if (igmp_type == 0x17):
lisp.lprint("IGMPv2 leave (*, {})".format(lisp.bold(group_str, False)))
lisp_send_multicast_map_register(lisp_send_sockets,
[[None, group_str, False]])
return
#endif
if (igmp_type in (0x12, 0x16)):
lisp.lprint("IGMPv{} join (*, {})".format( \
1 if (igmp_type == 0x12) else 2, lisp.bold(group_str, False)))
#
# Suppress for link-local groups.
#
if (group_str.find("224.0.0.") != -1):
lisp.lprint("Suppress registration for link-local groups")
else:
lisp_send_multicast_map_register(lisp_send_sockets,
[[None, group_str, True]])
#endif
#
# Finished with IGMPv1 or IGMPv2 processing.
#
return
#endif
#
# Parse each record for IGMPv3 (igmp_type == 0x22).
#
record_count = group.address
igmp = igmp[8::]
group_format = "BBHI"
group_size = struct.calcsize(group_format)
source_format = "I"
source_size = struct.calcsize(source_format)
source = lisp.lisp_address(lisp.LISP_AFI_IPV4, "", 32, 0)
#
# Traverse each group record.
#
register_entries = []
for i in range(record_count):
if (len(igmp) < group_size): return
record_type, x, source_count, address = struct.unpack(group_format,
igmp[:group_size])
igmp = igmp[group_size::]
if (lisp_igmp_record_types.has_key(record_type) == False):
lisp.lprint("Invalid record type {}".format(record_type))
continue
#endif
record_type_str = lisp_igmp_record_types[record_type]
source_count = socket.ntohs(source_count)
group.address = socket.ntohl(address)
group_str = group.print_address_no_iid()
lisp.lprint("Record type: {}, group: {}, source-count: {}".format( \
record_type_str, group_str, source_count))
#
# Determine if this is a join or leave. MODE_IS_INCLUDE (1) is a join.
# MODE_TO_EXCLUDE (4) with no sources is a join. CHANGE_TO_INCLUDE (5)
# is a join. Everything else is a leave.
#
joinleave = False
if (record_type in (1, 5)): joinleave = True
if (record_type == 4 and source_count == 0): joinleave = True
j_or_l = "join" if (joinleave) else "leave"
#
# Suppress registration for link-local groups.
#
if (group_str.find("224.0.0.") != -1):
lisp.lprint("Suppress registration for link-local groups")
continue
#endif
#
# (*,G) Join or Leave has been received if source count is 0.
#
# If this is IGMPv2 or just IGMPv3 reporting a group address, encode
# a (*,G) for the element in the register_entries array.
#
if (source_count == 0):
register_entries.append([None, group_str, joinleave])
lisp.lprint("IGMPv3 {} (*, {})".format(lisp.bold(j_or_l, False),
lisp.bold(group_str, False)))
#endif
#
# Process (S,G)s (source records)..
#
for j in range(source_count):
if (len(igmp) < source_size): return
address = struct.unpack(source_format, igmp[:source_size])[0]
source.address = socket.ntohl(address)
source_str = source.print_address_no_iid()
register_entries.append([source_str, group_str, joinleave])
lisp.lprint("{} ({}, {})".format(j_or_l,
lisp.green(source_str, False), lisp.bold(group_str, False)))
igmp = igmp[source_size::]
#endfor
#endfor
#
# Build Map-Register for (S,G) entries. Put them in a Multicast Info LCAF
# Type and put ourselves as an RLE. This is draft-farinacci-lisp-signal-
# free-multicast
#
if (len(register_entries) != 0):
lisp_send_multicast_map_register(lisp_send_sockets, register_entries)
#endif
return
#enddef
#
# lisp_etr_data_plane
#
# Capture a LISP encapsulated packet, decap it, process inner header, and
# re-encapsulated it.
#
def lisp_etr_data_plane(parms, not_used, packet):
global lisp_ipc_listen_socket
device = parms[0]
lisp_raw_socket = parms[1]
#
# Jump over MAC header if packet received on interface. There is a 4-byte
# internal header in any case (loopback interfaces will have a 4 byte
# header)..
#
if (lisp.lisp_is_macos() == False):
offset = 4 if device == "lo0" else 16
packet = packet[offset::]
#endif
#
# Check IGMP packet.
#
protocol = struct.unpack("B", packet[9])[0]
if (protocol == 2):
lisp_process_igmp_packet(packet)
return
#endif
#
# Check RLOC-probe Map-Request. We need to grab the TTL from IP header.
#
orig_packet = packet
packet, source, port, ttl = lisp.lisp_is_rloc_probe(packet, 0)
if (orig_packet != packet):
if (source == None): return
lisp.lisp_parse_packet(lisp_send_sockets, packet, source, port, ttl)
return
#endif
#
# First check if we are assembling IPv4 fragments. Do this only when
# not doing NAT-traversal. Otherwise, the kernel will do it when we
# receive the same packet on a raw socket (in lisp_etr_nat_data_plane()).
#
sport = socket.ntohs(struct.unpack("H", packet[20:22])[0])
if (sport == lisp.LISP_DATA_PORT): return
packet = lisp.lisp_reassemble(packet)
if (packet == None): return
packet = lisp.lisp_packet(packet)
status = packet.decode(True, lisp_ipc_listen_socket, lisp.lisp_decap_stats)
if (status == None): return
#
# Print some useful header fields.
#
packet.print_packet("Receive", True)
#
# If we are looping back Map-Registers via encapsulation, overwrite
# multicast address with source address. That means we are sending a
# Map-Register message to the lisp-core process from our local RLOC
# address to our local RLOC address. Also, zero out the UDP checksum
# since the destination address changes that affects the pseudo-header.
#
if (lisp.lisp_decent_push_configured and
packet.inner_dest.is_multicast_address() and \
packet.lisp_header.get_instance_id() == 0xffffff):
source = packet.inner_source.print_address_no_iid()
packet.strip_outer_headers()
packet = packet.packet[28::]
packet = lisp.lisp_packet_ipc(packet, source, sport)
lisp.lisp_ipc(packet, lisp_ipc_listen_socket, "lisp-ms")
return
#endif
#
# Packets are arriving on pcap interface. Need to check if another data-
# plane is running. If so, don't deliver duplicates.
#
if (lisp.lisp_ipc_data_plane):
lisp.dprint("Drop packet, external data-plane active")
return
#endif
#
# Increment global stats.
#
lisp.lisp_decap_stats["good-packets"].increment(len(packet.packet))
#
# Strip outer headers and start inner header forwarding logic.
#
packet.strip_outer_headers()
f_or_b = lisp.bold("Forward", False)
#
# Process inner header (checksum and decrement ttl).
#
L2 = packet.inner_dest.is_mac()
if (L2):
packet.packet = lisp.lisp_mac_input(packet.packet)
if (packet.packet == None): return
f_or_b = lisp.bold("Bridge", False)
elif (packet.inner_version == 4):
packet.packet = lisp.lisp_ipv4_input(packet.packet)
if (packet.packet == None): return
packet.inner_ttl = packet.outer_ttl
elif (packet.inner_version == 6):
packet.packet = lisp.lisp_ipv6_input(packet)
if (packet.packet == None): return
packet.inner_ttl = packet.outer_ttl
else:
lisp.dprint("Cannot parse inner packet header")
return
#endif
#
# Check if database-mapping exists for our local destination.
#
if (packet.inner_dest.is_multicast_address() == False):
db = lisp.lisp_db_for_lookups.lookup_cache(packet.inner_dest, False)
if (db):
db.increment_decap_stats(packet)
else:
lisp.dprint("No database-mapping found for EID {}".format( \
lisp.green(packet.inner_dest.print_address(), False)))
return
#endif
#endif
#
# We are going to forward or bridge the decapsulated packet.
#
addr_str = "{} -> {}".format(packet.inner_source.print_address(),
packet.inner_dest.print_address())
lisp.dprint("{} packet for EIDs {}: {} ...".format(f_or_b, \
lisp.green(addr_str, False),
lisp.lisp_format_packet(packet.packet[0:60])))
#
# If we are decapsulating a MAC frame, then use the L2 socket where
# the MAC header is already in packet.
#
if (L2):
packet.bridge_l2_packet(packet.inner_dest, db)
return
#endif
#
# Send on L2 socket since IPv6 raw sockets do not allow us to send an
# entire IPv6 header in payload. Prepend prebuilt MAC header.
#
if (packet.inner_version == 6):
packet.send_l2_packet(lisp_l2_socket, lisp_mac_header)
return
#endif
#
# Default to global raw socket otherwise get socket baesd on instance-ID.
#
raw_socket = packet.get_raw_socket()
if (raw_socket == None): raw_socket = lisp_raw_socket
#
# Send out.
#
packet.send_packet(raw_socket, packet.inner_dest)
return
#enddef
#
# lisp_etr_nat_data_plane
#
# Packet came in on a destination ephemeral port from a source port of 4341.
# That is a RTR encapsulated this packet that is coming through a NAT device.
#
# The packet has the outer IP and UDP headers stripped so the first byte of
# this supplied data packet has the LISP data header on it.
#
def lisp_etr_nat_data_plane(lisp_raw_socket, packet, source):
global lisp_ipc_listen_socket, lisp_send_sockets
#
# Decode LISP header.
#
lisp_header = packet
packet = lisp.lisp_packet(packet[8::])
if (packet.lisp_header.decode(lisp_header) == False): return
#
# Store outer source RLOC address so if we are doing lisp-crypto across
# NAT-traversal, we can find the decryption key.
#
packet.outer_source = lisp.lisp_address(lisp.LISP_AFI_IPV4, source,
lisp.LISP_IPV4_HOST_MASK_LEN, 0)
status = packet.decode(False, lisp_ipc_listen_socket,
lisp.lisp_decap_stats)
if (status == None): return
#
# Special case to log packets with no outer header but are considered
# decapsulated when coming through NATs. Since packets are sent from
# source port 4341, the kernel will strip outer header, so we don't have
# outer header context in lisp_packet().
#
if (lisp.lisp_flow_logging): packet.log_flow(False)
packet.print_packet("Kernel-decap", False)
lisp.dprint(packet.lisp_header.print_header(" "))
#
# If we are looping back Map-Registers via encapsulation, overwrite
# multicast address with source address. That means we are sending a
# Map-Register message to the lisp-core process from our local RLOC
# address to our local RLOC address. Also, zero out the UDP checksum
# since the destination address changes that affects the pseudo-header.
#
if (lisp.lisp_decent_push_configured and
packet.inner_dest.is_multicast_address() and \
packet.lisp_header.get_instance_id() == 0xffffff):
sport = packet.udp_sport
packet = packet.packet[28::]
packet = lisp.lisp_packet_ipc(packet, source, sport)
lisp.lisp_ipc(packet, lisp_ipc_listen_socket, "lisp-ms")
return
#endif
#
# Check if inner packet is a LISP control-packet. Typically RLOC-probes
# from RTRs can come through NATs. We want to reply to the global address
# of the RTR which is the outer source RLOC. We don't care about the
# inner source port since the RTR will decapsulate a data encapsulated
# RLOC-probe Map-Reply.
#
if (packet.lisp_header.get_instance_id() == 0xffffff):
packet = packet.packet
ttl = -1
if (lisp.lisp_is_rloc_probe_request(packet[28])):
ttl = struct.unpack("B", packet[8])[0] - 1
#endif
packet = packet[28::]
lisp.lisp_parse_packet(lisp_send_sockets, packet, source, 0, ttl)
return
#endif
#
# Packets are arriving on ephemeral socket. Need to check if another data-
# plane is running. If so, don't deliver duplicates.
#
if (lisp.lisp_ipc_data_plane):
lisp.dprint("Drop packet, external data-plane active")
return
#endif
#
# Increment global stats.
#
lisp.lisp_decap_stats["good-packets"].increment(len(packet.packet))
#
# Check if database-mapping exists for our local destination.
#
if (packet.inner_dest.is_multicast_address() == False):
db = lisp.lisp_db_for_lookups.lookup_cache(packet.inner_dest, False)
if (db):
db.increment_decap_stats(packet)
else:
lisp.dprint("No database-mapping found for EID {}".format( \
lisp.green(packet.inner_dest.print_address(), False)))
#endif
#endif
#endif
addr_str = "{} -> {}".format(packet.inner_source.print_address(),
packet.inner_dest.print_address())
lisp.dprint("{} packet for EIDs {}: {} ...".format( \
lisp.bold("NAT-Forward", False), lisp.green(addr_str, False),
lisp.lisp_format_packet(packet.packet[0:60])))
#
# Send on L2 socket since IPv6 raw sockets do not allow us to send an
# entire IPv6 header in payload. Prepend prebuilt MAC header
#
if (packet.inner_version == 6):
packet.send_l2_packet(lisp_l2_socket, lisp_mac_header)
return
#endif
#
# Default to global raw socket otherwise get socket baesd on instance-ID.
#
raw_socket = packet.get_raw_socket()
if (raw_socket == None): raw_socket = lisp_raw_socket
#
# Send out on raw socket.
#
packet.send_packet(raw_socket, packet.inner_dest)
return
#enddef
#
# lisp_register_ipv6_group_entries
#
# Find an IPv6 group-mapping and send a Map-Register for each configured IPv6
# source for the IPv6 group-prefix found.
#
def lisp_register_ipv6_group_entries(group, joinleave):
ms_gm = None
for gm in lisp.lisp_group_mapping_list.values():
mask_len = lisp_is_group_more_specific(group, gm)
if (mask_len == -1): continue
if (ms_gm == None or mask_len > ms_gm.mask_len): ms_gm = gm
#endfor
if (ms_gm == None): return
sg = []
for s in ms_gm.sources:
sg.append([s, group, joinleave])
#endfor
lisp_send_multicast_map_register(lisp_send_sockets, sg)
return
#enddef
#
# lisp_etr_join_leave_process
#
# Look at file-system to see if there is a join or leave to be done. This
# function will send joins in the form of building an IP/IGMPv2 packet to
# be passed to lisp_process_igmp_packet(). The groups that are joined are
# ones found as filenames in the current directory as "join-<group>". The
# IGMP Reports wil lbe sent to lisp_process_igmp_packet() every 30 seconds.
#
# For right now, if the group address is IPv6, send a Map-Register directly.
# We will get to MLD support later.
#
# This is used for testing and not meant for production deployment.
#
def lisp_etr_join_leave_process():
global lisp_send_sockets
lisp.lisp_set_exception()
swap = socket.htonl
ipigmp = [swap(0x46000020), swap(0x9fe60000), swap(0x0102d7cc),
swap(0x0acfc15a), swap(0xe00000fb), swap(0x94040000)]
packet = ""
for l in ipigmp: packet += struct.pack("I", l)
#
# Look for files in current directory for "join-<group>" and then send
# an IGMPv2 report to ourselves.
#
while (True):
groups = commands.getoutput("ls join-*").replace("join-", "")
groups = groups.split("\n")
for group in groups:
if (lisp.lisp_valid_address_format("address", group) == False):
continue
#endif
ipv6 = (group.find(":") != -1)
#
# Check if we are leaving group.
#
leavejoin = os.path.exists("leave-{}".format(group))
lisp.lprint("Internal {} group {}".format( \
"leaving" if leavejoin else "joining", group))
#
# Set IGMP message to Report or Leave. Then add group.
#
if (ipv6):
if (group.lower().find("ff02:") != -1):
lisp.lprint("Suppress registration for link-local groups")
continue
#endif
lisp_register_ipv6_group_entries(group, (leavejoin == False))
else:
send_packet = packet
if (leavejoin):
send_packet += struct.pack("I", swap(0x17000000))
else:
send_packet += struct.pack("I", swap(0x16000000))
#endif
octet = group.split(".")
value = int(octet[0]) << 24
value += int(octet[1]) << 16
value += int(octet[2]) << 8
value += int(octet[3])
send_packet += struct.pack("I", swap(value))
lisp_process_igmp_packet(send_packet)
time.sleep(.100)
#endif
#endfor
time.sleep(10)
#endwhile
return
#enddef
#
# lisp_etr_process
#
# This thread is for receiving encapsulated LISP packets address to destination
# port 4341. As well as IGMP reports. The IGMP reports can be captured on
# Ubuntu and Fedora but not on MacOS. The former supports IGMPv3 and the
# latter supports IGMPv2 if we listen on "en0".
#
def lisp_etr_process():
lisp.lisp_set_exception()
if (lisp.lisp_myrlocs[0] == None): return
#
# Find all multicast RLEs so we can receive packets on underlay multicast
# groups.
#
rles = lisp.lisp_get_all_multicast_rles()
#
# We need to listen on en0 when doing IGMP testing on MacOS.
#
device = "any"
# device = "en0" if lisp.lisp_is_macos() else "any"
# device = "lo0" if lisp.lisp_is_macos() else "any"
pcap = pcappy.open_live(device, 1600, 0, 100)
pfilter = "(proto 2) or "
pfilter += "((dst host "
for addr in lisp.lisp_get_all_addresses() + rles:
pfilter += "{} or ".format(addr)
#endif
pfilter = pfilter[0:-4]
pfilter += ") and ((udp dst port 4341 or 8472 or 4789) or "
pfilter += "(udp dst port 4342 and ip[28] == 0x12) or "
pfilter += "(proto 17 and (ip[6]&0xe0 == 0x20 or " + \
"(ip[6]&0xe0 == 0 and ip[7] != 0)))))"
lisp.lprint("Capturing packets for: '{}' on device {}".format(pfilter,
device))
pcap.filter = pfilter
#
# Enter receive loop.
#
pcap.loop(-1, lisp_etr_data_plane, [device, lisp_raw_socket])
return
#enddef
#
# lisp_etr_startup
#
# Intialize this LISP ETR process. This function returns no values.
#
def lisp_etr_startup():
global lisp_ipc_listen_socket
global lisp_ephem_socket
global lisp_send_sockets
global lisp_raw_socket
global lisp_l2_socket
global lisp_mac_header
lisp.lisp_i_am("etr")
lisp.lisp_set_exception()
lisp.lisp_print_banner("ETR starting up")
#
# Get local address for source RLOC for encapsulation.
#
lisp.lisp_get_local_interfaces()
lisp.lisp_get_local_macs()
if (lisp.lisp_get_local_addresses() == False): return(False)
#
# Prebuild MAC header for lisp_l2_socket sending. Disabled code in favor
# of using pytun. See below.
#
# m = lisp.lisp_mymacs.keys()[0]
# mac = ""
# for i in range(0, 12, 2): mac += chr(int(m[i:i+2], 16))
# lisp_mac_header = mac + mac + "\x86\xdd"
# lisp.dprint("Built MAC header for L2 socket:",
# lisp.lisp_format_packet(lisp_mac_header))
#
# Used on for listening for Info-Replies for NAT-traversal support.
#
s = lisp.lisp_open_listen_socket("0.0.0.0", str(lisp_ephem_port))
s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 32)
lisp_ephem_socket = s
#
# Open network send socket and internal listen socket.
#
lisp_ipc_listen_socket = lisp.lisp_open_listen_socket("", "lisp-etr")
lisp_send_sockets[0] = lisp_ephem_socket
lisp_send_sockets[1] = lisp.lisp_open_send_socket("", lisp.LISP_AFI_IPV6)
lisp_send_sockets[2] = lisp_ipc_listen_socket
#
# Open up raw socket so we can send with IP headers after decapsulation.
# There is a special case where the RTR's lisp_send_sockets array is of
# size 4 since we need to pass the raw socket through the lisp.py module
# to send a data encapsulated RLOC-probe to an ETR that sits behind a NAT.
# The test is in lisp_send_map_request() for this. This is the case in
# ETRs as well. All other components use an array size of 3 modulo.
#
lisp_raw_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW,
socket.IPPROTO_RAW)
lisp_raw_socket.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
lisp_send_sockets.append(lisp_raw_socket)
#
# Open a L2 socket so when we decapsulate and have to route an IPv6
# packet, we have the kernel receive a MAC frame on the loopback interface.
# We do this because there is no IP_HDRINCL for IPv6 raw sockets.
#
# Disabling this code in favor of using a tuntap tun interface via the
# pytun module. See code right below.
#
# if ("PF_PACKET" in dir(socket)):
# interface = "lo" if ("lo" in lisp.lisp_myinterfaces.keys()) else \
# "lo0" if ("lo0" in lisp.lisp_myinterfaces.keys()) else None
# if (interface != None):
# lisp_l2_socket = socket.socket(socket.PF_PACKET, socket.SOCK_RAW)
# lisp_l2_socket.bind(("lo", 0x86dd))
# #endif
# #endif
#
# Setup tuntap tunnel interface so when we decap IPv6 packets, we can
# send to kernel to route them.
#
if (pytun != None):
lisp_mac_header = '\x00\x00\x86\xdd'
device = "lispers.net"
try:
lisp_l2_socket = pytun.TunTapDevice(flags=pytun.IFF_TUN,
name=device)
os.system("ip link set dev {} up".format(device))
except:
lisp.lprint("Cannot create tuntap interface")
#endtry
#endif
#
# Start thread to listen on data socket.
#
threading.Thread(target=lisp_etr_process, args=[]).start()
#
# Test code to force IGMPv2 joins and leaves on an airplane. ;-)
#
threading.Thread(target=lisp_etr_join_leave_process, args=[]).start()
return(True)
#enddef
#
# lisp_etr_shutdown
#
# Shut down this process.
#
def lisp_etr_shutdown():
global lisp_register_timer
global lisp_etr_info_timer
#
# Cancel periodic Map-Register and Info timer threads.
#
if (lisp_register_timer): lisp_register_timer.cancel()
if (lisp_etr_info_timer): lisp_etr_info_timer.cancel()
#
# Close sockets.
#
lisp.lisp_close_socket(lisp_send_sockets[0], "")
lisp.lisp_close_socket(lisp_send_sockets[1], "")
lisp.lisp_close_socket(lisp_ipc_listen_socket, "lisp-etr")
return
#enddef
#
# lisp_etr_discover_eid
#
# Process IPC message from the lisp-itr process. It will be in the form of:
#
# "learn%<eid-string>%<interface-name>"
#
def lisp_etr_discover_eid(ipc):
ipc = ipc.split("%")
eid_str = ipc[1]
interface = ipc[2]
if (interface == "None"): interface = None
eid = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
eid.store_address(eid_str)
#
# Do database-mapping lookup.
#
db = lisp.lisp_db_for_lookups.lookup_cache(eid, False)
if (db == None or db.dynamic_eid_configured() == False):
lisp.lprint("ITR/ETR dynamic-EID configuration out of sync for {}". \
format(lisp.green(eid_str, False)))
return
#endif
#
# Do logic checks. That is do not remove an entry if it is not there and
# don't try to add an entry if it is already cached.
#
dyn_eid = None
if (db.dynamic_eids.has_key(eid_str)): dyn_eid = db.dynamic_eids[eid_str]
if (dyn_eid == None and interface == None):
lisp.lprint("ITR/ETR state mismatch for {}".format( \
lisp.green(eid_str, False)))
return
#endif
#
# Check if ITR is changing the interface to the same interface, meaning
# it is confused. Otherwise, the IPC is an interface change. Don't register
# in this case.
#
if (dyn_eid and interface):
if (dyn_eid.interface == interface):
lisp.lprint("ITR sent redundant IPC for {}".format( \
lisp.green(eid_str, False)))
else:
lisp.lprint("Dynamic-EID {} interface change, {} -> {}".format( \
lisp.green(eid_str, False), dyn_eid.interface, interface))
dyn_eid.interface = interface
#endif
return
#endif
#
# Add new entry and register it.
#
if (interface):
dyn_eid = lisp.lisp_dynamic_eid()
dyn_eid.dynamic_eid.copy_address(eid)
dyn_eid.interface = interface
dyn_eid.get_timeout(interface)
db.dynamic_eids[eid_str] = dyn_eid
reg = lisp.bold("Registering", False)
eid_str = lisp.bold(eid_str, False)
lisp.lprint("{} dynamic-EID {} on interface {}, timeout {}".format(reg,
lisp.green(eid_str, False), interface, dyn_eid.timeout))
lisp_build_map_register(lisp_send_sockets, None, eid, None, False)
#
# Add /32 to routing table.
#
if (lisp.lisp_is_macos() == False):
eid_str = eid.print_prefix_no_iid()
cmd = "ip route add {} dev {}".format(eid_str, interface)
os.system(cmd)
#endif
return
#endif
#
# Remove existig entry and deregister it.
#
if (db.dynamic_eids.has_key(eid_str)):
interface = db.dynamic_eids[eid_str].interface
dereg = lisp.bold("Deregistering", False)
lisp.lprint("{} dynamic-EID {}".format(dereg,
lisp.green(eid_str, False)))
lisp_build_map_register(lisp_send_sockets, 0, eid, None, False)
db.dynamic_eids.pop(eid_str)
#
# Delete /32 from routing table.
#
if (lisp.lisp_is_macos() == False):
eid_str = eid.print_prefix_no_iid()
cmd = "ip route delete {} dev {}".format(eid_str, interface)
os.system(cmd)
#endif
#endif
return
#enddef
#
# lisp_etr_process_rtr_updown
#
# Process IPC message from lisp-itr. It is telling the lisp-etr process if
# RLOC-probing has determined if the RTR has gone up or down. And therefore
# if it should be registered to the mapping system.
#
def lisp_etr_process_rtr_updown(ipc):
if (lisp.lisp_register_all_rtrs): return
opcode, rtr_str, status = ipc.split("%")
if (lisp.lisp_rtr_list.has_key(rtr_str) == False): return
lisp.lprint("Process ITR IPC message, RTR {} has gone {}".format(
lisp.red(rtr_str, False), lisp.bold(status, False)))
rtr = lisp.lisp_rtr_list[rtr_str]
if (status == "down"):
lisp.lisp_rtr_list[rtr_str] = None
return
#endif
rtr = lisp.lisp_address(lisp.LISP_AFI_IPV4, rtr_str, 32, 0)
lisp.lisp_rtr_list[rtr_str] = rtr
return
#enddef
#
# lisp_etr_process_nonce_ipc
#
# Process an nonce IPC message from the ITR. It wants to know when a nonce
# is echoed from a remote ITR.
#
def lisp_etr_process_nonce_ipc(ipc):
x, opcode, rloc_str, nonce = ipc.split("%")
nonce = int(nonce, 16)
echo_nonce = lisp.lisp_get_echo_nonce(None, rloc_str)
if (echo_nonce == None): echo_nonce = lisp.lisp_echo_nonce(rloc_str)
if (opcode == "R"):
echo_nonce.request_nonce_sent = nonce
lisp.lprint("Waiting for echo-nonce 0x{} from {}".format( \
lisp.lisp_hex_string(nonce), lisp.red(echo_nonce.rloc_str, False)))
elif (opcode == "E"):
echo_nonce.echo_nonce_sent = nonce
lisp.lprint("Sent echo-nonce 0x{} to {}".format( \
lisp.lisp_hex_string(nonce), lisp.red(echo_nonce.rloc_str, False)))
#endif
return
#enddef
#
# ETR commands procssed by this process.
#
lisp_etr_commands = {
"lisp xtr-parameters" : [lispconfig.lisp_xtr_command, {
"rloc-probing" : [True, "yes", "no"],
"nonce-echoing" : [True, "yes", "no"],
"data-plane-security" : [True, "yes", "no"],
"data-plane-logging" : [True, "yes", "no"],
"frame-logging" : [True, "yes", "no"],
"flow-logging" : [True, "yes", "no"],
"nat-traversal" : [True, "yes", "no"],
"checkpoint-map-cache" : [True, "yes", "no"],
"ipc-data-plane" : [True, "yes", "no"],
"decentralized-push-xtr" : [True, "yes", "no"],
"decentralized-pull-xtr-modulus" : [True, 1, 0xff],
"decentralized-pull-xtr-dns-suffix" : [True],
"register-reachable-rtrs" : [True, "yes", "no"],
"program-hardware" : [True, "yes", "no"] }],
"lisp interface" : [lispconfig.lisp_interface_command, {
"interface-name" : [True],
"device" : [True],
"instance-id" : [True, 0, 0xffffffff],
"dynamic-eid" : [True],
"dynamic-eid-device" : [True],
"lisp-nat" : [True, "yes", "no"],
"dynamic-eid-timeout" : [True, 0, 0xff] }],
"lisp map-server" : [lisp_map_server_command, {
"ms-name" : [True],
"address" : [True],
"dns-name" : [True],
"authentication-type" : [False, "sha1", "sha2"],
"authentication-key" : [False],
"encryption-key" : [False],
"proxy-reply" : [False, "yes", "no"],
"want-map-notify" : [False, "yes", "no"],
"merge-registrations" : [False, "yes", "no"],
"refresh-registrations" : [False, "yes", "no"],
"site-id" : [False, 1, 0xffffffffffffffff] }],
"lisp database-mapping" : [lisp_etr_database_mapping_command, {
"prefix" : [],
"mr-name" : [True],
"ms-name" : [True],
"instance-id" : [True, 0, 0xffffffff],
"secondary-instance-id" : [True, 0, 0xffffffff],
"eid-prefix" : [True],
"group-prefix" : [True],
"dynamic-eid" : [True, "yes", "no"],
"signature-eid" : [True, "yes", "no"],
"rloc" : [],
"rloc-record-name" : [True],
"elp-name" : [True],
"geo-name" : [True],
"rle-name" : [True],
"json-name" : [True],
"address" : [True],
"interface" : [True],
"priority" : [True, 0, 255],
"weight" : [True, 0, 100] }],
"lisp explicit-locator-path" : [lispconfig.lisp_elp_command, {
"elp-name" : [False],
"elp-node" : [],
"address" : [True],
"probe" : [True, "yes", "no"],
"strict" : [True, "yes", "no"],
"eid" : [True, "yes", "no"] }],
"lisp replication-list-entry" : [lispconfig.lisp_rle_command, {
"rle-name" : [False],
"rle-node" : [],
"address" : [True],
"level" : [True, 0, 255] }],
"lisp geo-coordinates" : [lispconfig.lisp_geo_command, {
"geo-name" : [False],
"geo-tag" : [False] }],
"lisp json" : [lispconfig.lisp_json_command, {
"json-name" : [False],
"json-string" : [False] }],
"lisp group-mapping" : [lisp_group_mapping_command, {
"group-name" : [False],
"ms-name" : [True],
"group-prefix" : [False],
"instance-id" : [True, 0, 0xffffffff],
"rle-address" : [False],
"sources" : [],
"address" : [True] }],
"show database-mapping" : [lisp_etr_show_command, { }],
"show etr-keys" : [lisp_etr_show_keys_command, {}],
"show etr-dynamic-eid" : [lispconfig.lisp_show_dynamic_eid_command, { }]
}
#------------------------------------------------------------------------------
#
# Main entry point for process.
#
if (lisp_etr_startup() == False):
lisp.lprint("lisp_etr_startup() failed")
lisp.lisp_print_banner("ETR abnormal exit")
exit(1)
#endif
socket_list = [lisp_ephem_socket, lisp_ipc_listen_socket]
while (True):
try: ready_list, w, x = select.select(socket_list, [], [])
except: break
#
# Process Info-Reply messages received on ephemeral port.
#
if (lisp_ephem_socket in ready_list):
opcode, source, port, packet = \
lisp.lisp_receive(lisp_ephem_socket, False)
if (source == ""): break
if (port == lisp.LISP_DATA_PORT):
lisp_etr_nat_data_plane(lisp_raw_socket, packet, source)
else:
if (lisp.lisp_is_rloc_probe_request(packet[0])):
lisp.lprint("ETR ignoring RLOC-probe request, using pcap")
continue
#endif
send_register = lisp.lisp_parse_packet(lisp_send_sockets, packet,
source, port)
#
# Info-Reply from map-server has new RTR-list, trigger a
# Map-Register and a Info-Request to the RTR.
#
if (send_register):
lisp_etr_info_timer = threading.Timer(0,
lisp_etr_process_info_timer, [None])
lisp_etr_info_timer.start()
lisp_register_timer = threading.Timer(0,
lisp_process_register_timer, [lisp_send_sockets])
lisp_register_timer.start()
#endif
#endif
#endif
#
# Process either commands, an IPC data-packet (for testing), or any
# protocol message on the IPC listen socket.
#
if (lisp_ipc_listen_socket in ready_list):
opcode, source, port, packet = \
lisp.lisp_receive(lisp_ipc_listen_socket, True)
if (source == ""): break
if (opcode == "command"):
if (packet.find("learn%") != -1):
lisp_etr_discover_eid(packet)
elif (packet.find("nonce%") != -1):
lisp_etr_process_nonce_ipc(packet)
elif (packet.find("clear%") != -1):
lispconfig.lisp_clear_decap_stats(packet)
elif (packet.find("rtr%") != -1):
lisp_etr_process_rtr_updown(packet)
elif (packet.find("stats%") != -1):
packet = packet.split("%")[-1]
lisp.lisp_process_data_plane_decap_stats(packet, None)
else:
lispconfig.lisp_process_command(lisp_ipc_listen_socket,
opcode, packet, "lisp-etr", [lisp_etr_commands])
#endif
elif (opcode == "api"):
lisp.lisp_process_api("lisp-etr", lisp_ipc_listen_socket, packet)
else:
if (lisp.lisp_is_rloc_probe_request(packet[0])):
lisp.lprint("ETR ignoring RLOC-probe request, using pcap")
continue
#endif
lisp.lisp_parse_packet(lisp_send_sockets, packet, source, port)
#endif
#endif
#endwhile
lisp_etr_shutdown()
lisp.lisp_print_banner("ETR normal exit")
exit(0)
#------------------------------------------------------------------------------
|
test_integration.py | import itertools
import logging
import os
import subprocess
import sys
import mock
import pytest
import six
import ddtrace
from ddtrace import Tracer
from ddtrace.internal import agent
from ddtrace.internal.runtime import container
from ddtrace.internal.writer import AgentWriter
from tests.utils import AnyFloat
from tests.utils import AnyInt
from tests.utils import AnyStr
from tests.utils import override_global_config
AGENT_VERSION = os.environ.get("AGENT_VERSION")
def test_configure_keeps_api_hostname_and_port():
"""
Ensures that when calling configure without specifying hostname and port,
previous overrides have been kept.
"""
tracer = Tracer()
if AGENT_VERSION == "testagent":
assert tracer.writer.agent_url == "http://localhost:9126"
else:
assert tracer.writer.agent_url == "http://localhost:8126"
tracer.configure(hostname="127.0.0.1", port=8127)
assert tracer.writer.agent_url == "http://127.0.0.1:8127"
tracer.configure(priority_sampling=True)
assert tracer.writer.agent_url == "http://127.0.0.1:8127"
def test_debug_mode():
p = subprocess.Popen(
[sys.executable, "-c", "import ddtrace"],
env=dict(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
p.wait()
assert p.stdout.read() == b""
assert b"DEBUG:ddtrace" not in p.stderr.read()
p = subprocess.Popen(
[sys.executable, "-c", "import ddtrace"],
env=dict(DD_TRACE_DEBUG="true"),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
p.wait()
assert p.stdout.read() == b""
# Stderr should have some debug lines
assert b"DEBUG:ddtrace" in p.stderr.read()
def test_output(tmpdir):
f = tmpdir.join("test.py")
f.write(
"""
import ddtrace
""".lstrip()
)
p = subprocess.Popen(
["ddtrace-run", sys.executable, "test.py"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=str(tmpdir),
)
p.wait()
assert p.stderr.read() == six.b("")
assert p.stdout.read() == six.b("")
assert p.returncode == 0
def test_start_in_thread(tmpdir):
f = tmpdir.join("test.py")
f.write(
"""
import threading
def target():
import ddtrace
t = threading.Thread(target=target)
t.start()
t.join()
""".lstrip()
)
p = subprocess.Popen(
["ddtrace-run", sys.executable, "test.py"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=str(tmpdir),
)
p.wait()
assert p.stderr.read() == six.b("")
assert p.stdout.read() == six.b("")
assert p.returncode == 0
@pytest.mark.skipif(AGENT_VERSION != "latest", reason="Agent v5 doesn't support UDS")
def test_single_trace_uds():
t = Tracer()
sockdir = "/tmp/ddagent/trace.sock"
t.configure(uds_path=sockdir)
with mock.patch("ddtrace.internal.writer.log") as log:
t.trace("client.testing").finish()
t.shutdown()
log.warning.assert_not_called()
log.error.assert_not_called()
def test_uds_wrong_socket_path():
t = Tracer()
t.configure(uds_path="/tmp/ddagent/nosockethere")
with mock.patch("ddtrace.internal.writer.log") as log:
t.trace("client.testing").finish()
t.shutdown()
calls = [
mock.call("failed to send traces to Datadog Agent at %s", "unix:///tmp/ddagent/nosockethere", exc_info=True)
]
log.error.assert_has_calls(calls)
@pytest.mark.skipif(AGENT_VERSION == "testagent", reason="FIXME: Test agent doesn't support this for some reason.")
def test_payload_too_large():
t = Tracer()
# Make sure a flush doesn't happen partway through.
t.configure(writer=AgentWriter(agent.get_trace_url(), processing_interval=1000))
with mock.patch("ddtrace.internal.writer.log") as log:
for i in range(100000):
with t.trace("operation") as s:
s.set_tag(str(i), "b" * 190)
s.set_tag(str(i), "a" * 190)
t.shutdown()
calls = [
mock.call(
"trace buffer (%s traces %db/%db) cannot fit trace of size %db, dropping",
AnyInt(),
AnyInt(),
AnyInt(),
AnyInt(),
)
]
log.warning.assert_has_calls(calls)
log.error.assert_not_called()
def test_large_payload():
t = Tracer()
# Traces are approx. 275 bytes.
# 10,000*275 ~ 3MB
with mock.patch("ddtrace.internal.writer.log") as log:
for i in range(10000):
with t.trace("operation"):
pass
t.shutdown()
log.warning.assert_not_called()
log.error.assert_not_called()
def test_child_spans():
t = Tracer()
with mock.patch("ddtrace.internal.writer.log") as log:
spans = []
for i in range(10000):
spans.append(t.trace("op"))
for s in spans:
s.finish()
t.shutdown()
log.warning.assert_not_called()
log.error.assert_not_called()
def test_metrics():
with override_global_config(dict(health_metrics_enabled=True)):
t = Tracer()
statsd_mock = mock.Mock()
t.writer.dogstatsd = statsd_mock
assert t.writer._report_metrics
with mock.patch("ddtrace.internal.writer.log") as log:
for _ in range(5):
spans = []
for i in range(3000):
spans.append(t.trace("op"))
for s in spans:
s.finish()
t.shutdown()
log.warning.assert_not_called()
log.error.assert_not_called()
statsd_mock.distribution.assert_has_calls(
[
mock.call("datadog.tracer.buffer.accepted.traces", 5, tags=[]),
mock.call("datadog.tracer.buffer.accepted.spans", 15000, tags=[]),
mock.call("datadog.tracer.http.requests", 1, tags=[]),
mock.call("datadog.tracer.http.sent.bytes", AnyInt()),
],
any_order=True,
)
def test_single_trace_too_large():
t = Tracer()
with mock.patch("ddtrace.internal.writer.log") as log:
with t.trace("huge"):
for i in range(100000):
with t.trace("operation") as s:
s.set_tag("a" * 10, "b" * 10)
t.shutdown()
calls = [mock.call("trace (%db) larger than payload buffer limit (%db), dropping", AnyInt(), AnyInt())]
log.warning.assert_has_calls(calls)
log.error.assert_not_called()
def test_trace_bad_url():
t = Tracer()
t.configure(hostname="bad", port=1111)
with mock.patch("ddtrace.internal.writer.log") as log:
with t.trace("op"):
pass
t.shutdown()
calls = [mock.call("failed to send traces to Datadog Agent at %s", "http://bad:1111", exc_info=True)]
log.error.assert_has_calls(calls)
def test_writer_headers():
t = Tracer()
t.writer._put = mock.Mock(wraps=t.writer._put)
with t.trace("op"):
pass
t.shutdown()
assert t.writer._put.call_count == 1
_, headers = t.writer._put.call_args[0]
assert headers.get("Datadog-Meta-Tracer-Version") == ddtrace.__version__
assert headers.get("Datadog-Meta-Lang") == "python"
assert headers.get("Content-Type") == "application/msgpack"
assert headers.get("X-Datadog-Trace-Count") == "1"
if container.get_container_info():
assert "Datadog-Container-Id" in headers
t = Tracer()
t.writer._put = mock.Mock(wraps=t.writer._put)
for _ in range(100):
with t.trace("op"):
pass
t.shutdown()
assert t.writer._put.call_count == 1
_, headers = t.writer._put.call_args[0]
assert headers.get("X-Datadog-Trace-Count") == "100"
t = Tracer()
t.writer._put = mock.Mock(wraps=t.writer._put)
for _ in range(10):
with t.trace("op"):
for _ in range(5):
t.trace("child").finish()
t.shutdown()
assert t.writer._put.call_count == 1
_, headers = t.writer._put.call_args[0]
assert headers.get("X-Datadog-Trace-Count") == "10"
@pytest.mark.skipif(AGENT_VERSION == "testagent", reason="Test agent doesn't support priority sampling responses.")
def test_priority_sampling_response():
# Send the data once because the agent doesn't respond with them on the
# first payload.
t = Tracer()
s = t.trace("operation", service="my-svc")
s.set_tag("env", "my-env")
s.finish()
assert "service:my-svc,env:my-env" not in t.writer._priority_sampler._by_service_samplers
t.shutdown()
# For some reason the agent doesn't start returning the service information
# immediately
import time
time.sleep(5)
t = Tracer()
s = t.trace("operation", service="my-svc")
s.set_tag("env", "my-env")
s.finish()
assert "service:my-svc,env:my-env" not in t.writer._priority_sampler._by_service_samplers
t.shutdown()
assert "service:my-svc,env:my-env" in t.writer._priority_sampler._by_service_samplers
def test_bad_endpoint():
t = Tracer()
t.writer._endpoint = "/bad"
with mock.patch("ddtrace.internal.writer.log") as log:
s = t.trace("operation", service="my-svc")
s.set_tag("env", "my-env")
s.finish()
t.shutdown()
calls = [
mock.call(
"unsupported endpoint '%s': received response %s from Datadog Agent (%s)",
"/bad",
404,
t.writer.agent_url,
)
]
log.error.assert_has_calls(calls)
@pytest.mark.skipif(AGENT_VERSION == "testagent", reason="FIXME: Test agent response is different.")
def test_bad_payload():
t = Tracer()
class BadEncoder:
def __len__(self):
return 0
def put(self, trace):
pass
def encode(self):
return ""
def encode_traces(self, traces):
return ""
t.writer._encoder = BadEncoder()
with mock.patch("ddtrace.internal.writer.log") as log:
t.trace("asdf").finish()
t.shutdown()
calls = [
mock.call(
"failed to send traces to Datadog Agent at %s: HTTP error status %s, reason %s",
"http://localhost:8126",
400,
"Bad Request",
)
]
log.error.assert_has_calls(calls)
def test_bad_encoder():
t = Tracer()
class BadEncoder:
def __len__(self):
return 0
def put(self, trace):
pass
def encode(self):
raise Exception()
def encode_traces(self, traces):
raise Exception()
t.writer._encoder = BadEncoder()
with mock.patch("ddtrace.internal.writer.log") as log:
t.trace("asdf").finish()
t.shutdown()
calls = [mock.call("failed to encode trace with encoder %r", t.writer._encoder, exc_info=True)]
log.error.assert_has_calls(calls)
@pytest.mark.skipif(AGENT_VERSION == "testagent", reason="Test agent doesn't support v0.3")
def test_downgrade():
t = Tracer()
t.writer._downgrade(None, None)
assert t.writer._endpoint == "v0.3/traces"
with mock.patch("ddtrace.internal.writer.log") as log:
s = t.trace("operation", service="my-svc")
s.finish()
t.shutdown()
log.warning.assert_not_called()
log.error.assert_not_called()
def test_span_tags():
t = Tracer()
with mock.patch("ddtrace.internal.writer.log") as log:
s = t.trace("operation", service="my-svc")
s.set_tag("env", "my-env")
s.set_metric("number", 123)
s.set_metric("number", 12.0)
s.set_metric("number", "1")
s.finish()
t.shutdown()
log.warning.assert_not_called()
log.error.assert_not_called()
def test_synchronous_writer_shutdown():
tracer = Tracer()
tracer.configure(writer=AgentWriter(tracer.writer.agent_url, sync_mode=True))
# Ensure this doesn't raise.
tracer.shutdown()
@pytest.mark.skipif(AGENT_VERSION == "testagent", reason="Test agent doesn't support empty trace payloads.")
def test_flush_log(caplog):
caplog.set_level(logging.INFO)
writer = AgentWriter(agent.get_trace_url())
with mock.patch("ddtrace.internal.writer.log") as log:
writer.write([])
writer.flush_queue(raise_exc=True)
calls = [
mock.call(
logging.DEBUG,
"sent %s in %.5fs to %s",
AnyStr(),
AnyFloat(),
writer.agent_url,
)
]
log.log.assert_has_calls(calls)
@pytest.mark.parametrize("logs_injection,debug_mode,patch_logging", itertools.product([True, False], repeat=3))
def test_regression_logging_in_context(tmpdir, logs_injection, debug_mode, patch_logging):
"""
When logs injection is enabled and the logger is patched
When a parent span closes before a child
The application does not deadlock due to context lock acquisition
"""
f = tmpdir.join("test.py")
f.write(
"""
import ddtrace
ddtrace.patch(logging=%s)
s1 = ddtrace.tracer.trace("1")
s2 = ddtrace.tracer.trace("2")
s1.finish()
s2.finish()
""".lstrip()
% str(patch_logging)
)
p = subprocess.Popen(
[sys.executable, "test.py"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=str(tmpdir),
env=dict(
DD_TRACE_LOGS_INJECTION=str(logs_injection).lower(),
DD_TRACE_DEBUG=str(debug_mode).lower(),
),
)
try:
p.wait(timeout=2)
except TypeError:
# timeout argument added in Python 3.3
p.wait()
assert p.returncode == 0
@pytest.mark.parametrize(
"call_basic_config,debug_mode",
itertools.permutations((True, False, None), 2),
)
def test_call_basic_config(ddtrace_run_python_code_in_subprocess, call_basic_config, debug_mode):
"""
When setting DD_CALL_BASIC_CONFIG env variable
When true
We call logging.basicConfig()
When false
We do not call logging.basicConfig()
When not set
We call logging.basicConfig()
"""
env = os.environ.copy()
if debug_mode is not None:
env["DD_TRACE_DEBUG"] = str(debug_mode).lower()
if call_basic_config is not None:
env["DD_CALL_BASIC_CONFIG"] = str(call_basic_config).lower()
has_root_handlers = call_basic_config
else:
has_root_handlers = True
out, err, status, pid = ddtrace_run_python_code_in_subprocess(
"""
import logging
root = logging.getLogger()
print(len(root.handlers))
""",
env=env,
)
assert status == 0
if has_root_handlers:
assert out == six.b("1\n")
else:
assert out == six.b("0\n")
def test_writer_env_configuration(run_python_code_in_subprocess):
env = os.environ.copy()
env["DD_TRACE_WRITER_BUFFER_SIZE_BYTES"] = "1000"
env["DD_TRACE_WRITER_MAX_PAYLOAD_SIZE_BYTES"] = "5000"
env["DD_TRACE_WRITER_INTERVAL_SECONDS"] = "5.0"
out, err, status, pid = run_python_code_in_subprocess(
"""
import ddtrace
assert ddtrace.tracer.writer._encoder.max_size == 1000
assert ddtrace.tracer.writer._encoder.max_item_size == 1000
assert ddtrace.tracer.writer._interval == 5.0
""",
env=env,
)
assert status == 0, (out, err)
def test_writer_env_configuration_defaults(run_python_code_in_subprocess):
out, err, status, pid = run_python_code_in_subprocess(
"""
import ddtrace
assert ddtrace.tracer.writer._encoder.max_size == 8 << 20
assert ddtrace.tracer.writer._encoder.max_item_size == 8 << 20
assert ddtrace.tracer.writer._interval == 1.0
""",
)
assert status == 0, (out, err)
def test_writer_env_configuration_ddtrace_run(ddtrace_run_python_code_in_subprocess):
env = os.environ.copy()
env["DD_TRACE_WRITER_BUFFER_SIZE_BYTES"] = "1000"
env["DD_TRACE_WRITER_MAX_PAYLOAD_SIZE_BYTES"] = "5000"
env["DD_TRACE_WRITER_INTERVAL_SECONDS"] = "5.0"
out, err, status, pid = ddtrace_run_python_code_in_subprocess(
"""
import ddtrace
assert ddtrace.tracer.writer._encoder.max_size == 1000
assert ddtrace.tracer.writer._encoder.max_item_size == 1000
assert ddtrace.tracer.writer._interval == 5.0
""",
env=env,
)
assert status == 0, (out, err)
def test_writer_env_configuration_ddtrace_run_defaults(ddtrace_run_python_code_in_subprocess):
out, err, status, pid = ddtrace_run_python_code_in_subprocess(
"""
import ddtrace
assert ddtrace.tracer.writer._encoder.max_size == 8 << 20
assert ddtrace.tracer.writer._encoder.max_item_size == 8 << 20
assert ddtrace.tracer.writer._interval == 1.0
""",
)
assert status == 0, (out, err)
def test_partial_flush_log(run_python_code_in_subprocess):
partial_flush_min_spans = 2
t = Tracer()
t.configure(
partial_flush_enabled=True,
partial_flush_min_spans=partial_flush_min_spans,
)
s1 = t.trace("1")
s2 = t.trace("2")
s3 = t.trace("3")
t_id = s3.trace_id
with mock.patch("ddtrace.internal.processor.trace.log") as log:
s3.finish()
s2.finish()
calls = [
mock.call("trace %d has %d spans, %d finished", t_id, 3, 1),
mock.call("Partially flushing %d spans for trace %d", partial_flush_min_spans, t_id),
]
log.debug.assert_has_calls(calls)
s1.finish()
t.shutdown()
|
modbus_server.py | """This file emulate a modbus server for tests."""
import logging
import threading
import time
from collections import defaultdict
from socketserver import TCPServer
from umodbus import conf
from umodbus.server.tcp import RequestHandler, get_server
from umodbus.utils import log_to_stream
log_to_stream(level=logging.DEBUG)
data_store = defaultdict(int)
conf.SIGNED_VALUES = False
TCPServer.allow_reuse_address = True
app = get_server(TCPServer, ('localhost', 5002), RequestHandler)
@app.route(slave_ids=[1], function_codes=[1, 2, 3, 4], addresses=list(range(0, 10)))
def read_data_store(slave_id, function_code, address):
"""Return value of address."""
return data_store[address]
@app.route(slave_ids=[1], function_codes=[5, 15], addresses=list(range(0, 10)))
def write_data_store(slave_id, function_code, address, value):
"""Set value for address."""
data_store[address] = value
if __name__ == '__main__':
try:
print("Start server...")
write_data_store(1, 6, 0, 0)
t = threading.Thread(target=app.serve_forever)
t.start()
print("Server online")
while True:
time.sleep(60)
write_data_store(1, 6, 0, read_data_store(1, 6, 0) + 1)
finally:
app.shutdown()
print("\nStop server")
app.server_close()
print("Server offline")
|
cli.py | import os
import sys
import threading
from contextlib import contextmanager
import click
import six
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
from dagster import check, seven
from dagster.cli.workspace import Workspace, get_workspace_from_kwargs, workspace_target_argument
from dagster.core.instance import DagsterInstance
from dagster.core.telemetry import START_DAGIT_WEBSERVER, log_action, log_repo_stats, upload_logs
from dagster.utils import DEFAULT_WORKSPACE_YAML_FILENAME
from .app import create_app_from_workspace
from .version import __version__
def create_dagit_cli():
return ui # pylint: disable=no-value-for-parameter
REPO_TARGET_WARNING = "Can only use ONE of --workspace/-w, --python-file/-f, --module-name/-m."
DEFAULT_DAGIT_HOST = "127.0.0.1"
DEFAULT_DAGIT_PORT = 3000
@click.command(
name="ui",
help=(
"Run dagit. Loads a repository or pipeline.\n\n{warning}".format(
warning=REPO_TARGET_WARNING
)
+ (
"\n\n Examples:"
"\n\n1. dagit (works if .{default_filename} exists)"
"\n\n2. dagit -w path/to/{default_filename}"
"\n\n3. dagit -f path/to/file.py"
"\n\n4. dagit -m some_module"
"\n\n5. dagit -f path/to/file.py -a define_repo"
"\n\n6. dagit -m some_module -a define_repo"
"\n\n7. dagit -p 3333"
"\n\nOptions Can also provide arguments via environment variables prefixed with DAGIT_"
"\n\n DAGIT_PORT=3333 dagit"
).format(default_filename=DEFAULT_WORKSPACE_YAML_FILENAME)
),
)
@workspace_target_argument
@click.option(
"--host",
"-h",
type=click.STRING,
default=DEFAULT_DAGIT_HOST,
help="Host to run server on, default is {default_host}".format(default_host=DEFAULT_DAGIT_HOST),
)
@click.option(
"--port",
"-p",
type=click.INT,
help="Port to run server on, default is {default_port}".format(default_port=DEFAULT_DAGIT_PORT),
)
@click.option(
"--path-prefix",
"-l",
type=click.STRING,
default="",
help="The path prefix where Dagit will be hosted (eg: /dagit), default is ''",
)
@click.option(
"--storage-fallback",
help="Base directory for dagster storage if $DAGSTER_HOME is not set",
default=None,
type=click.Path(),
)
@click.version_option(version=__version__, prog_name="dagit")
def ui(host, port, path_prefix, storage_fallback, **kwargs):
# add the path for the cwd so imports in dynamically loaded code work correctly
sys.path.append(os.getcwd())
if port is None:
port_lookup = True
port = DEFAULT_DAGIT_PORT
else:
port_lookup = False
if storage_fallback is None:
with seven.TemporaryDirectory() as storage_fallback:
host_dagit_ui(host, port, path_prefix, storage_fallback, port_lookup, **kwargs)
else:
host_dagit_ui(host, port, path_prefix, storage_fallback, port_lookup, **kwargs)
def host_dagit_ui(host, port, path_prefix, storage_fallback, port_lookup=True, **kwargs):
with DagsterInstance.get(storage_fallback) as instance:
with get_workspace_from_kwargs(kwargs, instance) as workspace:
if not workspace:
raise Exception("Unable to load workspace with cli_args: {}".format(kwargs))
host_dagit_ui_with_workspace(instance, workspace, host, port, path_prefix, port_lookup)
def host_dagit_ui_with_workspace(instance, workspace, host, port, path_prefix, port_lookup=True):
check.inst_param(instance, "instance", DagsterInstance)
check.inst_param(workspace, "workspace", Workspace)
if len(workspace.repository_location_handles) == 1:
repository_location_handle = workspace.repository_location_handles[0]
# Telemetry logic needs to be updated to support multi-repo / gRPC repo locations
# See https://github.com/dagster-io/dagster/issues/2752
if (
hasattr(repository_location_handle, "repository_code_pointer_dict")
and len(repository_location_handle.repository_code_pointer_dict) == 1
):
pointer = next(iter(repository_location_handle.repository_code_pointer_dict.values()))
from dagster.core.definitions.reconstructable import ReconstructableRepository
recon_repo = ReconstructableRepository(pointer)
log_repo_stats(instance=instance, repo=recon_repo, source="dagit")
app = create_app_from_workspace(workspace, instance, path_prefix)
start_server(instance, host, port, path_prefix, app, port_lookup)
@contextmanager
def uploading_logging_thread():
stop_event = threading.Event()
logging_thread = threading.Thread(target=upload_logs, args=([stop_event]))
try:
logging_thread.start()
yield
finally:
stop_event.set()
logging_thread.join()
def start_server(instance, host, port, path_prefix, app, port_lookup, port_lookup_attempts=0):
server = pywsgi.WSGIServer((host, port), app, handler_class=WebSocketHandler)
print( # pylint: disable=print-call
"Serving on http://{host}:{port}{path_prefix} in process {pid}".format(
host=host, port=port, path_prefix=path_prefix, pid=os.getpid()
)
)
log_action(instance, START_DAGIT_WEBSERVER)
with uploading_logging_thread():
try:
server.serve_forever()
except OSError as os_error:
if "Address already in use" in str(os_error):
if port_lookup and (
port_lookup_attempts > 0
or click.confirm(
(
"Another process on your machine is already listening on port {port}. "
"Would you like to run the app at another port instead?"
).format(port=port)
)
):
port_lookup_attempts += 1
start_server(
instance,
host,
port + port_lookup_attempts,
path_prefix,
app,
True,
port_lookup_attempts,
)
else:
six.raise_from(
Exception(
(
"Another process on your machine is already listening on port {port}. "
"It is possible that you have another instance of dagit "
"running somewhere using the same port. Or it could be another "
"random process. Either kill that process or use the -p option to "
"select another port."
).format(port=port)
),
os_error,
)
else:
raise os_error
cli = create_dagit_cli()
def main():
# click magic
cli(auto_envvar_prefix="DAGIT") # pylint:disable=E1120
|
timertest.py | # import the necessary packages
from threading import Thread
import time
class pingtimer:
def __init__(self, src=0, name="timer"):
# initialize the thread name
self.name = name
self.duration = 0
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self, duration = 0):
# start the thread to read frames from the video stream
self.duration = duration
t = Thread(target=self.update, name=self.name, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
time.sleep(self.duration)
print(1)
if self.stopped:
print('thread exit successfully')
return
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
t = pingtimer()
t.start(duration=2)
time.sleep(10)
t.stop()
|
evaluator.py | """This file runs as a free-standing program within a sandbox, and processes
permutation requests. It communicates with the outside world on stdin/stdout."""
import base64
from dataclasses import dataclass
import math
from multiprocessing import Process, Queue
import os
import queue
import sys
from tempfile import mkstemp
import threading
import time
import traceback
from typing import Counter, Dict, List, Optional, Set, Tuple, Union
import zlib
from nacl.secret import SecretBox
from ..candidate import CandidateResult
from ..compiler import Compiler
from ..error import CandidateConstructionFailure
from ..helpers import exception_to_string, static_assert_unreachable
from ..permuter import EvalError, EvalResult, Permuter
from ..profiler import Profiler
from ..scorer import Scorer
from .core import (
FilePort,
PermuterData,
Port,
json_prop,
permuter_data_from_json,
)
def _fix_stdout() -> None:
"""Redirect stdout to stderr to make print() debugging work. This function
*must* be called at startup for each (sub)process, since we use stdout for
our own communication purposes."""
sys.stdout = sys.stderr
# In addition, we set stderr to flush on newlines, which does not happen by
# default when it is piped. (Requires Python 3.7, but we can assume that's
# available inside the sandbox.)
sys.stdout.reconfigure(line_buffering=True) # type: ignore
def _setup_port(secret: bytes) -> Port:
"""Set up communication with the outside world."""
port = FilePort(
sys.stdin.buffer,
sys.stdout.buffer,
SecretBox(secret),
"server",
is_client=False,
)
_fix_stdout()
# Follow the controlling process's sanity check protocol.
magic = port.receive()
port.send(magic)
return port
def _create_permuter(data: PermuterData) -> Permuter:
fd, path = mkstemp(suffix=".o", prefix="permuter", text=False)
try:
with os.fdopen(fd, "wb") as f:
f.write(data.target_o_bin)
scorer = Scorer(target_o=path, stack_differences=data.stack_differences)
finally:
os.unlink(path)
fd, path = mkstemp(suffix=".sh", prefix="permuter", text=True)
try:
os.chmod(fd, 0o755)
with os.fdopen(fd, "w") as f2:
f2.write(data.compile_script)
compiler = Compiler(compile_cmd=path, show_errors=False)
return Permuter(
dir="unused",
fn_name=data.fn_name,
compiler=compiler,
scorer=scorer,
source_file=data.filename,
source=data.source,
force_seed=None,
force_rng_seed=None,
keep_prob=data.keep_prob,
need_profiler=data.need_profiler,
need_all_sources=False,
show_errors=False,
better_only=False,
best_only=False,
)
except:
os.unlink(path)
raise
@dataclass
class AddPermuter:
perm_id: str
data: PermuterData
@dataclass
class AddPermuterLocal:
perm_id: str
permuter: Permuter
@dataclass
class RemovePermuter:
perm_id: str
@dataclass
class WorkDone:
perm_id: str
id: int
time_us: int
result: EvalResult
@dataclass
class Work:
perm_id: str
id: int
seed: int
LocalWork = Tuple[Union[AddPermuterLocal, RemovePermuter], int]
GlobalWork = Tuple[Work, int]
Task = Union[AddPermuter, RemovePermuter, Work, WorkDone]
def _remove_permuter(perm: Permuter) -> None:
os.unlink(perm.compiler.compile_cmd)
def _send_result(item: WorkDone, port: Port) -> None:
obj = {
"type": "result",
"permuter": item.perm_id,
"id": item.id,
"time_us": item.time_us,
}
res = item.result
if isinstance(res, EvalError):
obj["error"] = res.exc_str
port.send_json(obj)
return
compressed_source = getattr(res, "compressed_source")
obj["score"] = res.score
obj["has_source"] = compressed_source is not None
if res.hash is not None:
obj["hash"] = res.hash
if res.profiler is not None:
obj["profiler"] = {
st.name: res.profiler.time_stats[st] for st in Profiler.StatType
}
port.send_json(obj)
if compressed_source is not None:
port.send(compressed_source)
def multiprocess_worker(
worker_queue: "Queue[GlobalWork]",
local_queue: "Queue[LocalWork]",
task_queue: "Queue[Task]",
) -> None:
_fix_stdout()
# Prevent deadlocks in case the parent process dies.
worker_queue.cancel_join_thread()
local_queue.cancel_join_thread()
task_queue.cancel_join_thread()
permuters: Dict[str, Permuter] = {}
timestamp = 0
while True:
work, required_timestamp = worker_queue.get()
while True:
try:
block = timestamp < required_timestamp
task, timestamp = local_queue.get(block=block)
except queue.Empty:
break
if isinstance(task, AddPermuterLocal):
permuters[task.perm_id] = task.permuter
elif isinstance(task, RemovePermuter):
del permuters[task.perm_id]
else:
static_assert_unreachable(task)
time_before = time.time()
permuter = permuters[work.perm_id]
result = permuter.try_eval_candidate(work.seed)
if isinstance(result, CandidateResult) and permuter.should_output(result):
permuter.record_result(result)
# Compress the source within the worker. (Why waste a free
# multi-threading opportunity?)
if isinstance(result, CandidateResult):
compressed_source: Optional[bytes] = None
if result.source is not None:
compressed_source = zlib.compress(result.source.encode("utf-8"))
setattr(result, "compressed_source", compressed_source)
result.source = None
time_us = int((time.time() - time_before) * 10 ** 6)
task_queue.put(
WorkDone(perm_id=work.perm_id, id=work.id, time_us=time_us, result=result)
)
def read_loop(task_queue: "Queue[Task]", port: Port) -> None:
try:
while True:
item = port.receive_json()
msg_type = json_prop(item, "type", str)
if msg_type == "add":
perm_id = json_prop(item, "permuter", str)
source = port.receive().decode("utf-8")
target_o_bin = port.receive()
data = permuter_data_from_json(item, source, target_o_bin)
task_queue.put(AddPermuter(perm_id=perm_id, data=data))
elif msg_type == "remove":
perm_id = json_prop(item, "permuter", str)
task_queue.put(RemovePermuter(perm_id=perm_id))
elif msg_type == "work":
perm_id = json_prop(item, "permuter", str)
id = json_prop(item, "id", int)
seed = json_prop(item, "seed", int)
task_queue.put(Work(perm_id=perm_id, id=id, seed=seed))
else:
raise Exception(f"Invalid message type {msg_type}")
except Exception as e:
# In case the port is closed from the other side, skip writing an ugly
# error message.
if not isinstance(e, EOFError):
traceback.print_exc()
# Exit the whole process, to improve the odds that the Docker container
# really stops and gets removed.
#
# The parent server has a "finally:" that does that, but it's not 100%
# trustworthy. In particular, pystray has a tendency to hard-crash
# (which doesn't fire "finally"s), and also reverts the signal handler
# for SIGINT to the default on Linux, making Ctrl+C not run cleanup.
# Either way, defense in depth here doesn't hurt, since leaking Docker
# containers is pretty bad.
#
# Unfortunately this still doesn't fix the problem, since we typically
# don't get a port closure signal when the parent process stops...
# TODO: listen to heartbeats as well.
sys.exit(1)
def main() -> None:
secret = base64.b64decode(os.environ["SECRET"])
del os.environ["SECRET"]
os.environ["PERMUTER_IS_REMOTE"] = "1"
port = _setup_port(secret)
obj = port.receive_json()
num_cores = json_prop(obj, "num_cores", float)
num_threads = math.ceil(num_cores)
worker_queue: "Queue[GlobalWork]" = Queue()
task_queue: "Queue[Task]" = Queue()
local_queues: "List[Queue[LocalWork]]" = []
for i in range(num_threads):
local_queue: "Queue[LocalWork]" = Queue()
p = Process(
target=multiprocess_worker,
args=(worker_queue, local_queue, task_queue),
daemon=True,
)
p.start()
local_queues.append(local_queue)
reader_thread = threading.Thread(
target=read_loop, args=(task_queue, port), daemon=True
)
reader_thread.start()
remaining_work: Counter[str] = Counter()
should_remove: Set[str] = set()
permuters: Dict[str, Permuter] = {}
timestamp = 0
def try_remove(perm_id: str) -> None:
nonlocal timestamp
assert perm_id in permuters
if perm_id not in should_remove or remaining_work[perm_id] != 0:
return
del remaining_work[perm_id]
should_remove.remove(perm_id)
timestamp += 1
for q in local_queues:
q.put((RemovePermuter(perm_id=perm_id), timestamp))
_remove_permuter(permuters[perm_id])
del permuters[perm_id]
while True:
item = task_queue.get()
if isinstance(item, AddPermuter):
assert item.perm_id not in permuters
msg: Dict[str, object] = {
"type": "init",
"permuter": item.perm_id,
}
time_before = time.time()
try:
# Construct a permuter. This involves a compilation on the main
# thread, which isn't great but we can live with it for now.
permuter = _create_permuter(item.data)
if permuter.base_score != item.data.base_score:
_remove_permuter(permuter)
score_str = f"{permuter.base_score} vs {item.data.base_score}"
if permuter.base_hash == item.data.base_hash:
hash_str = "same hash; different Python or permuter versions?"
else:
hash_str = "different hash; different objdump versions?"
raise CandidateConstructionFailure(
f"mismatching score: {score_str} ({hash_str})"
)
permuters[item.perm_id] = permuter
msg["success"] = True
msg["base_score"] = permuter.base_score
msg["base_hash"] = permuter.base_hash
# Tell all the workers about the new permuter.
# TODO: ideally we would also seed their Candidate lru_cache's
# to avoid all workers having to parse the source...
timestamp += 1
for q in local_queues:
q.put(
(
AddPermuterLocal(perm_id=item.perm_id, permuter=permuter),
timestamp,
)
)
except Exception as e:
# This shouldn't practically happen, since the client compiled
# the code successfully. Print a message if it does.
msg["success"] = False
msg["error"] = exception_to_string(e)
if isinstance(e, CandidateConstructionFailure):
print(e.message)
else:
traceback.print_exc()
msg["time_us"] = int((time.time() - time_before) * 10 ** 6)
port.send_json(msg)
elif isinstance(item, RemovePermuter):
# Silently ignore requests to remove permuters that have already
# been removed, which can occur when AddPermuter fails.
if item.perm_id in permuters:
should_remove.add(item.perm_id)
try_remove(item.perm_id)
elif isinstance(item, WorkDone):
remaining_work[item.perm_id] -= 1
try_remove(item.perm_id)
_send_result(item, port)
elif isinstance(item, Work):
remaining_work[item.perm_id] += 1
worker_queue.put((item, timestamp))
else:
static_assert_unreachable(item)
if __name__ == "__main__":
main()
|
test_base.py | import unittest
import time
from contextlib import suppress
from queue import Queue as ThreadQueue
from threading import Thread
from threading import Event as ThreadEvent
import numpy as np
from pdp.base import InterruptableQueue, StopEvent, start_one2one_transformer
DEFAULT_LOOP_TIMEOUT = 0.02
def set_event_after_timeout(event, timeout):
def target():
time.sleep(timeout)
event.set()
Thread(target=target).start()
class TestInterruptableQueue(unittest.TestCase):
def setUp(self):
self.maxsize = 10
self.loop_timeout = DEFAULT_LOOP_TIMEOUT
self.wait_timeout = 7.5 * self.loop_timeout
self.receive_timeout = 0.5 * self.loop_timeout
self.stop_event = ThreadEvent()
self.q = InterruptableQueue(ThreadQueue(self.maxsize), self.loop_timeout, self.stop_event)
def test_get(self):
def target():
with suppress(StopEvent):
self.q.get()
thread = Thread(target=target)
thread.start()
self.assertTrue(thread.is_alive())
set_event_after_timeout(event=self.stop_event, timeout=self.wait_timeout + self.receive_timeout)
self.assertTrue(thread.is_alive())
time.sleep(self.wait_timeout)
self.assertTrue(thread.is_alive())
time.sleep(self.receive_timeout * 2)
self.assertFalse(thread.is_alive())
def test_put(self):
for i in range(self.maxsize):
self.q.put(i)
def target():
with suppress(StopEvent):
self.q.put(-1)
thread = Thread(target=target)
thread.start()
self.assertTrue(thread.is_alive())
set_event_after_timeout(event=self.stop_event, timeout=self.wait_timeout + self.receive_timeout)
self.assertTrue(thread.is_alive())
time.sleep(self.wait_timeout)
self.assertTrue(thread.is_alive())
time.sleep(self.receive_timeout * 2)
self.assertFalse(thread.is_alive())
class testOne2One(unittest.TestCase):
def setUp(self):
self.buffer_size = 20
self.loop_timeout = DEFAULT_LOOP_TIMEOUT
self.stop_event = ThreadEvent()
self.q_in = InterruptableQueue(ThreadQueue(self.buffer_size), self.loop_timeout, self.stop_event)
self.q_out = InterruptableQueue(ThreadQueue(self.buffer_size), self.loop_timeout, self.stop_event)
def tearDown(self):
self.q_in.join()
self.q_out.join()
def data_pass(self, n_workers):
data_in = np.random.randn(self.buffer_size * 10)
def f(x):
return x ** 2
data_out_true = f(data_in)
start_one2one_transformer(f, q_in=self.q_in, q_out=self.q_out, stop_event=self.stop_event, n_workers=n_workers)
i = 0
data_out = []
for d in data_in:
self.q_in.put(d)
i += 1
if i == self.buffer_size:
for j in range(self.buffer_size):
data_out.append(self.q_out.get())
self.q_out.task_done()
i = 0
if n_workers > 1:
data_out_true = sorted(data_out_true)
data_out = sorted(data_out)
np.testing.assert_equal(data_out, data_out_true)
def test_data_pass(self):
for n_workers in (1, 4, 10):
with self.subTest(f'n_workers={n_workers}'):
self.data_pass(n_workers=n_workers)
|
SetupDialog.py | import os
import time
import threading
import serial
import serial.tools.list_ports
import wx
from RomWack import RomWack
from SAD import SAD
from ExecLibrary import ExecLibrary
from AmigaSnippets import AmigaSnippets
class SetupDialog(wx.Frame):
#(CLK/BAUDRATE)-1 = SERPER
#CLK/(SERPER+1) = BAUDRATE
#(serper, bps)
#3546895 PALCLK Hz
baudtablePAL=[
(372, 9600), #368, but romwack/sad value to avoid trouble
(183, 19200),
(91, 38400),
(60, 57600),
(29, 115200),
(13, 253349),
(6, 506699),
(4, 709379)
]
#3579545 NTSCCLK Hz
baudtableNTSC=[
(372, 9600),
(185, 19200),
(92, 38400),
(61, 57600),
(30, 115200),
(13, 255681),
(6, 511363),
(4, 715909)
]
def __init__(self, endcallback):
self.endcallback = endcallback
self.syncabort = threading.Event()
super().__init__(None, id=wx.ID_ANY, title=u"amigaXfer Setup", pos=wx.DefaultPosition, size=wx.Size(512, 225), style=wx.DEFAULT_FRAME_STYLE | wx.TAB_TRAVERSAL)
self.SetSizeHints(wx.DefaultSize, wx.DefaultSize)
self.SetBackgroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DLIGHT))
bSizer1 = wx.BoxSizer(wx.VERTICAL)
gbSizer1 = wx.GridBagSizer(0, 0)
gbSizer1.SetFlexibleDirection(wx.BOTH)
gbSizer1.SetNonFlexibleGrowMode(wx.FLEX_GROWMODE_SPECIFIED)
self.m_portmsg = wx.StaticText(self, wx.ID_ANY, u"Port", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_portmsg.Wrap(-1)
gbSizer1.Add(self.m_portmsg, wx.GBPosition(0, 0), wx.GBSpan(1, 1), wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT, 5)
m_portChoices = [port for (port,desc,hwid) in serial.tools.list_ports.comports()]
m_portChoices.reverse()
self.m_port = wx.ComboBox(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, m_portChoices, 0)
self.m_port.SetSelection(0)
gbSizer1.Add(self.m_port, wx.GBPosition(0, 1), wx.GBSpan(1, 1), wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.m_baudratemsg = wx.StaticText(self, wx.ID_ANY, u"Xfer Baudrate", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_baudratemsg.Wrap(-1)
gbSizer1.Add(self.m_baudratemsg, wx.GBPosition(1, 0), wx.GBSpan(1, 1), wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT, 5)
m_baudrateChoices = [u"9600", u"19200", u"38400", u"57600", u"115200", u"256k", u"512k"]
self.m_baudrate = wx.Choice(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_baudrateChoices, 0)
self.m_baudrate.SetSelection(4)
gbSizer1.Add(self.m_baudrate, wx.GBPosition(1, 1), wx.GBSpan(1, 1), wx.ALL, 5)
bSizer1.Add(gbSizer1, 0, wx.EXPAND, 5)
m_debuggerChoices = [u"RomWack (exec v37-)", u"SAD (exec v39+)"]
self.m_debugger = wx.RadioBox(self, wx.ID_ANY, u"Debugger", wx.DefaultPosition, wx.DefaultSize, m_debuggerChoices, 1, wx.RA_SPECIFY_ROWS)
self.m_debugger.SetSelection(0)
bSizer1.Add(self.m_debugger, 0, wx.ALL | wx.EXPAND, 5)
wSizer1 = wx.WrapSizer(wx.HORIZONTAL, wx.WRAPSIZER_DEFAULT_FLAGS)
self.m_dangerfast = wx.CheckBox(self, wx.ID_ANY, u"DangerFast", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_dangerfast.Hide()
self.m_dangerfast.SetForegroundColour(wx.Colour(255, 0, 0))
self.m_dangerfast.SetToolTip(u"Dev's hardcoded speeds")
self.m_dangerfast.Bind(wx.EVT_CHECKBOX, self.onCheckBox)
wSizer1.Add(self.m_dangerfast, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.m_crashentry = wx.CheckBox(self, wx.ID_ANY, u"CrashEntry", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_crashentry.SetToolTip(u"Refer to bootstrap doc")
self.m_crashentry.Bind(wx.EVT_CHECKBOX, self.onCheckBox)
wSizer1.Add(self.m_crashentry, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.m_resetfirst = wx.CheckBox(self, wx.ID_ANY, u"ResetFirst", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_resetfirst.SetToolTip(u"Reboots for cleaner env")
self.m_resetfirst.Bind(wx.EVT_CHECKBOX, self.onCheckBox)
wSizer1.Add(self.m_resetfirst, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.m_paranoid = wx.CheckBox(self, wx.ID_ANY, u"Paranoid", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_paranoid.SetToolTip(u"AmigaSnippets verifyuse (slow)")
wSizer1.Add(self.m_paranoid, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.m_debug = wx.CheckBox(self, wx.ID_ANY, u"Debug", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_debug.SetToolTip(u"Extra debug text")
wSizer1.Add(self.m_debug, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.m_logwindow = wx.CheckBox(self, wx.ID_ANY, u"LogWindow", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_logwindow.SetToolTip(u"Redirect log to a window")
self.m_logwindow.SetValue(True)
wSizer1.Add(self.m_logwindow, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
bSizer1.Add(wSizer1, 1, wx.EXPAND, 5)
wSizer14 = wx.WrapSizer(wx.HORIZONTAL, wx.WRAPSIZER_DEFAULT_FLAGS)
self.m_quit = wx.Button(self, wx.ID_ANY, u"Quit", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_quit.Bind(wx.EVT_BUTTON, self.onQuitPressed)
wSizer14.Add(self.m_quit, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_CENTER_HORIZONTAL, 5)
self.m_about = wx.Button(self, wx.ID_ANY, u"About", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_about.Bind(wx.EVT_BUTTON, self.onAbout)
wSizer14.Add(self.m_about, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_CENTER_HORIZONTAL, 5)
self.m_connect = wx.Button(self, wx.ID_ANY, u"Connect", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_connect.Bind(wx.EVT_BUTTON, self.DebuggerConnect)
wSizer14.Add(self.m_connect, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_CENTER_HORIZONTAL, 5)
bSizer1.Add(wSizer14, 0, wx.ALIGN_RIGHT, 5)
self.SetSizer(bSizer1)
self.Layout()
self.Fit()
self.Centre(wx.BOTH)
self.Bind(wx.EVT_CLOSE, self.onClose)
#FIXME: Secret tickboxes.
self.Bind(wx.EVT_KEY_DOWN, self.onKeyPress)
return
def onCheckBox(self, event):
self.Enablement(True)
return
def onKeyPress(self, event):
keycode = event.GetKeyCode()
if keycode == wx.WXK_SHIFT:
self.m_dangerfast.Show()
self.Layout()
event.Skip()
return
def onClose(self, event):
if event.CanVeto():
event.Veto()
self.Quit()
return
def onQuitPressed(self, event):
self.Quit()
return
def Quit(self):
self.syncabort.set()
wx.Exit()
return
def onAbout(self, event):
app = wx.GetApp()
app.ShowAboutDialog()
return
def DebuggerConnectFail(self):
wx.MessageBox("Serial port could not be opened.\n\nWrong serial port or incompatible baudrate\n\nNote the standard PC serial port is limited to 115200. For higher speeds, use USB-serial adapters.", "ERROR", wx.OK|wx.ICON_ERROR)
self.Enablement(True)
return
def Enablement(self, enable):
crashentry = self.m_crashentry.GetValue()
dangerfast = self.m_dangerfast.GetValue()
self.m_debugger.Enable(enable)
self.m_paranoid.Enable(enable)
self.m_debug.Enable(enable)
self.m_crashentry.Enable(enable)
self.m_dangerfast.Enable(enable)
self.m_logwindow.Enable(enable)
self.m_connect.Enable(enable)
self.m_about.Enable(enable)
self.m_port.Enable(enable)
self.m_portmsg.Enable(enable)
if crashentry:
self.m_resetfirst.Enable(False)
else:
self.m_resetfirst.Enable(enable)
if (not crashentry) and (not dangerfast):
self.m_baudrate.Enable(enable)
self.m_baudratemsg.Enable(enable)
else:
self.m_baudrate.Enable(False)
self.m_baudratemsg.Enable(False)
return
def DebuggerConnect(self, event): #Using name "Connect" would painfully override wx.
self.Enablement(False)
serialdev = self.m_port.GetValue()
baudrate = self.m_baudrate.GetSelection()
ntscbaudrate = self.baudtableNTSC[baudrate][1]
debugger = self.m_debugger.GetSelection()
paranoid = self.m_paranoid.GetValue()
debug = self.m_debug.GetValue()
dangerfast = self.m_dangerfast.GetValue()
logwindow = self.m_logwindow.GetValue()
crashentry = self.m_crashentry.GetValue()
resetfirst = self.m_resetfirst.GetValue()
if crashentry:
resetfirst = True
if logwindow:
wx.GetApp().RedirectStdio()
with open("nativeobjs.list", "r") as fh:
asmfiles = fh.read().splitlines()
missing = False
for asmfile in asmfiles:
if not os.path.isfile(asmfile):
missing = True
print(f"Object file {asmfile} is missing.")
if missing:
wx.MessageBox("Some m68k object files are missing.\n\nBuild missing objects or copy them from matching Windows binary release archive.\n\namigaXfer might work partially or not at all until resolved.", "ERROR", wx.OK|wx.ICON_ERROR)
print(f'port {serialdev}, baud {baudrate}, debugger {debugger}, paranoid {paranoid}, debug {debug}, dangerfast {dangerfast}, resetfirst {resetfirst}, crashentry {crashentry}')
if crashentry:
print("*** Crash entry mode: Refer to bootstrapping documentation.")
print("Overriding settings for safety. Serial will run at 9600.")
baudrate = 0
#debug = True
paranoid = False
try:
ser = serial.Serial(serialdev, 9600, timeout=None, exclusive=True)
except:
print("Serial device could not be opened.")
wx.CallAfter(self.DebuggerConnectFail)
return
else:
try:
ser = serial.Serial(serialdev, ntscbaudrate, timeout=None, exclusive=True)
except:
print(f"Serial device {serialdev} could not be opened at {ntscbaudrate} baud.")
wx.CallAfter(self.DebuggerConnectFail)
return
print("Serial device opened.")
self.ser = ser
if resetfirst:
threading.Thread(target=self.ResetFirstWorker, args=(baudrate, debugger, paranoid, debug, dangerfast, resetfirst, crashentry)).start()
else:
threading.Thread(target=self.DebuggerConnectWorker, args=(baudrate, debugger, paranoid, debug, dangerfast, resetfirst, crashentry)).start()
return
def DebuggerConnectWorker(self, baudrate, debugger, paranoid, debug, dangerfast, resetfirst, crashentry):
if resetfirst:
print('Syncing with debugger.')
else:
print('Syncing with debugger. Please have Amiga enter debugger now. Refer to README for help.')
if debugger==0:
self.debugger = RomWack(syncabort=self.syncabort, serial=self.ser, Debug=debug)
elif debugger==1:
self.debugger = SAD(syncabort=self.syncabort, serial=self.ser, Debug=debug)
amiga = self.debugger
if amiga.debugger == "SAD":
print(f'In SAD debugger. Bugged: {amiga.sadbug}. Entry: {amiga.entry}.')
else:
print(f"In {amiga.debugger} debugger.")
execlib = ExecLibrary(amiga)
self.execlib = execlib
print(f'Exec v{execlib.version}.{execlib.revision}. Base at {hex(execlib.base)}.')
execlib.Disable() #romwack/sad unreliable at wire speed if interrupts on, on 000/010 7MHz.
print("Disable.")
savedregs = []
if not resetfirst:
print("Saving non-scratch registers.")
savedregs = amiga.getregs(["a2", "a3", "a4", "a5", "a6", "a7", "d2", "d3", "d4", "d5", "d6", "d7"])
if amiga.debugger == "SAD":
execlib.enterdebugloop()
clkpal = execlib.is_pal()
if clkpal:
(serper, baudrate) = self.baudtablePAL[baudrate]
else:
(serper, baudrate) = self.baudtableNTSC[baudrate]
readmemserper=serper
writememserper=serper
readmembaudrate=baudrate
writemembaudrate=baudrate
if dangerfast:
serper=6
baudrate=506699
readmemserper=3
readmembaudrate=886723
writememserper=5
writemembaudrate=591149
print(f'clkpal: {clkpal}, SERPER: {serper}, baud: {baudrate}')
print(f'AmigaSnippets initialization start.')
snip = AmigaSnippets(
debugger=amiga,
serial=self.ser,
execlib=execlib,
allocmem=execlib.AllocMem,
verifyupload=True,
verifyuse=paranoid,
debug=debug,
serper=serper,
baudrate=baudrate,
readmemserper=readmemserper,
readmembaudrate=readmembaudrate,
writememserper=writememserper,
writemembaudrate=writemembaudrate)
if snip.reusing:
print("Reusing snippets.")
else:
print("Bootstrapped snippets.")
self.snip = snip
wx.CallAfter(self.endcallback, self.ser, self.debugger, self.execlib, self.snip, resetfirst, crashentry, savedregs)
return
def ResetFirstWorker(self, baudrate, debugger, paranoid, debug, dangerfast, resetfirst, crashentry):
ser = self.ser
if crashentry:
print("Waiting for Amiga to enter unrecoverable alert routine (the blink + reboot + guru sort).")
while not ser.in_waiting:
ser.write(b'\x7F')
time.sleep(0.3)
print("Amiga is alive. Attempting debugger entry.")
else:
print('Syncing with debugger. Please have Amiga enter debugger now. Refer to README for help.')
if debugger==0:
amiga = RomWack(syncabort=self.syncabort, serial=self.ser, Debug=debug)
elif debugger==1:
amiga = SAD(syncabort=self.syncabort, serial=self.ser, Debug=debug)
if amiga.debugger == "SAD":
print(f'In SAD debugger. Bugged: {amiga.sadbug}. Entry: {amiga.entry}.')
else:
print(f"In {amiga.debugger} debugger.")
print("Obtaining exec.library base and probing.")
execlib = ExecLibrary(amiga)
print(f'Exec v{execlib.version}.{execlib.revision}. Base at {hex(execlib.base)}.')
print("Setting up CoolCapture.")
amiga.poke32(execlib.base+execlib.CoolCapture, execlib.base+execlib.LVODebug)
print("Updating sysvars checksum.")
execlib.sysvarschksum()
print("Clearing 'HELP' guru flag.")
amiga.poke32(0, 0) #clear 'HELP' guru flag.
print("Rebooting Amiga.")
amiga.reboot()
print("Waiting for debugger to show up.")
if debugger==0:
amiga = RomWack(syncabort=self.syncabort, serial=self.ser, Debug=debug)
elif debugger==1:
amiga = SAD(syncabort=self.syncabort, serial=self.ser, Debug=debug)
if amiga.debugger == "SAD":
print(f'In SAD debugger. Bugged: {amiga.sadbug}. Entry: {amiga.entry}.')
else:
print(f"In {amiga.debugger} debugger.")
#execlib should be the same as before, else CoolCapture wouldn't be called, as execbase would have been rebuilt.
if execlib.version < 36:
print("Patching table of residents to disable strap.")
execlib.removeresidentstrap()
else:
print("Preparing resident module structure with init pointing to Debug().")
#Allocate chip just to ensure addr MSB is not set, as that has special meaning in resident table.
debugromtag = execlib.AllocMem(execlib.rt_sizeof, execlib.MEMF_CLEAR|execlib.MEMF_CHIP)
amiga.poke16(debugromtag+execlib.rt_MatchWord, execlib.RTC_MATCHWORD)
amiga.poke32(debugromtag+execlib.rt_MatchTag, debugromtag)
amiga.poke8(debugromtag+execlib.rt_Flags, 1)
amiga.poke8(debugromtag+execlib.rt_Version, execlib.version) #not actually needed as far as I can tell, but polite.
amiga.poke32(debugromtag+execlib.rt_Init, amiga.execdebug)
print("Patching table of residents to replace strap with the prepared resident module.")
execlib.replaceresidentbyname("strap", debugromtag)
print("Releasing Amiga.")
if execlib.version < 36:
print("If all went well, without strap, Amiga will call Debug() after resident initialization.")
else:
print("If all went well, in place of strap, Amiga will call Debug() during resident initialization.")
amiga.resume()
self.DebuggerConnectWorker(baudrate, debugger, paranoid, debug, dangerfast, resetfirst, crashentry)
return
|
generate_data.py | #!/usr/bin/env python3
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import csv
import json
import multiprocessing
import os
import pprint
import re
import shutil
import subprocess
import sys
import tempfile
import threading
import time
import shlex
import math
from encoder_commands import *
import binary_vars
binary_absolute_paths = {}
def find_absolute_path(use_system_path, binary):
global binary_absolute_paths
if binary in binary_absolute_paths:
return binary_absolute_paths[binary]
if use_system_path:
for path in os.environ["PATH"].split(os.pathsep):
target = os.path.join(path.strip('"'), os.path.basename(binary))
if os.path.isfile(target) and os.access(target, os.X_OK):
binary_absolute_paths[binary] = target
return target
target = os.path.join(os.path.dirname(os.path.abspath(__file__)), binary)
if os.path.isfile(target) and os.access(target, os.X_OK):
if use_system_path:
print(
"WARNING: '%s' not in PATH (using --use-system-path), falling back on locally-compiled binary."
% os.path.basename(binary))
binary_absolute_paths[binary] = target
return target
sys.exit(
"ERROR: '%s' missing, did you run the corresponding setup script?" %
(os.path.basename(binary) if use_system_path else target))
yuv_clip_pattern = re.compile(r"^(.*[\._](\d+)_(\d+).yuv):(\d+)$")
def clip_arg(clip):
(file_root, file_ext) = os.path.splitext(clip)
if file_ext != '.yuv':
width = int(
subprocess.check_output(
["mediainfo", "--Inform=Video;%Width%", clip],
encoding='utf-8'))
height = int(
subprocess.check_output(
["mediainfo", "--Inform=Video;%Height%", clip],
encoding='utf-8'))
fps = float(
subprocess.check_output(
["mediainfo", "--Inform=Video;%FrameRate%", clip],
encoding='utf-8'))
return {
'input_file': clip,
'height': height,
'width': width,
'fps': fps,
'file_type': 'y4m'
}
# Make sure YUV files are correctly formatted + look readable before actually
# running the script on them.
clip_match = yuv_clip_pattern.match(clip)
if not clip_match:
raise argparse.ArgumentTypeError(
"Argument '%s' doesn't match input format.\n" % clip)
input_file = clip_match.group(1)
if not os.path.isfile(input_file) or not os.access(input_file, os.R_OK):
raise argparse.ArgumentTypeError(
"'%s' is either not a file or cannot be opened for reading.\n" %
input_file)
return {
'input_file': clip_match.group(1),
'width': int(clip_match.group(2)),
'height': int(clip_match.group(3)),
'fps': float(clip_match.group(4)),
'file_type': 'yuv'
}
def psnr_to_dmos(score):
return 1 - 1 / (1 + math.exp(-0.1657 * (score + -26.19)))
def encoder_pairs(string):
pair_pattern = re.compile(r"^([\w\-]+):(\w+)$")
encoders = []
for pair in string.split(','):
pair_match = pair_pattern.match(pair)
if not pair_match:
raise argparse.ArgumentTypeError(
"Argument '%s' of '%s' doesn't match input format.\n" %
(pair, string))
if not get_encoder_command(pair_match.group(1)):
raise argparse.ArgumentTypeError(
"Unknown encoder: '%s' in pair '%s'\n" %
(pair_match.group(1), pair))
encoders.append((pair_match.group(1), pair_match.group(2)))
return encoders
def writable_dir(directory):
if not os.path.isdir(directory) or not os.access(directory, os.W_OK):
raise argparse.ArgumentTypeError(
"'%s' is either not a directory or cannot be opened for writing.\n"
% directory)
return directory
def positive_int(num):
num_int = int(num)
if num_int <= 0:
raise argparse.ArgumentTypeError("'%d' is not a positive integer.\n" %
num)
return num_int
parser = argparse.ArgumentParser(
description='Generate graph data for video-quality comparison.')
parser.add_argument('--enable-bitrate', action='store_true')
parser.add_argument('clips',
nargs='+',
metavar='clip_WIDTH_HEIGHT.yuv:FPS|clip.y4m',
type=clip_arg)
parser.add_argument('--single-datapoint', action='store_true')
parser.add_argument('--dump-commands', action='store_true')
parser.add_argument('--enable-vmaf', action='store_true')
parser.add_argument('--encoded-file-dir', default=None, type=writable_dir)
parser.add_argument('--encoders',
required=True,
metavar='encoder:codec,encoder:codec...',
type=encoder_pairs)
parser.add_argument('--frame-offset', default=0, type=positive_int)
parser.add_argument('--num-frames', default=-1, type=positive_int)
# TODO(pbos): Add support for multiple spatial layers.
parser.add_argument('--num-spatial-layers', type=int, default=1, choices=[1])
parser.add_argument('--num-temporal-layers',
type=int,
default=1,
choices=[1, 2, 3])
parser.add_argument('--out',
required=True,
metavar='output.txt',
type=argparse.FileType('w'))
parser.add_argument('--use-system-path', action='store_true')
parser.add_argument('--workers', type=int, default=multiprocessing.cpu_count())
def prepare_clips(args, temp_dir):
clips = args.clips
non_yuv_clips = [clip for clip in clips if clip['file_type'] != '.yuv']
# Convert all non yuv clips to yuv using ffmpeg
if non_yuv_clips:
print("Converting %d clip%s to yuv..." %
(len(non_yuv_clips), "" if len(non_yuv_clips) == 1 else "s"))
for clip in non_yuv_clips:
(fd, yuv_file) = tempfile.mkstemp(dir=temp_dir,
suffix=".%d_%d.yuv" % (clip['width'], clip['height']))
os.close(fd)
with open(os.devnull, 'w') as devnull:
subprocess.check_call(
['ffmpeg', '-y', '-i', clip['input_file'], yuv_file], stdout=devnull, stderr=devnull)
clip['yuv_file'] = yuv_file
# Get sha1sum of file and other metadata
for clip in clips:
clip['sha1sum'] = subprocess.check_output(
['sha1sum', clip['input_file']], encoding='utf-8').split(' ', 1)[0]
if 'yuv_file' not in clip:
clip['yuv_file'] = clip['input_file']
frame_size = int(6 * clip['width'] * clip['height'] / 4)
input_yuv_filesize = os.path.getsize(clip['yuv_file'])
clip['input_total_frames'] = input_yuv_filesize / frame_size
# Truncate file if necessary.
if args.frame_offset > 0 or args.num_frames > 0:
(fd, truncated_filename) = tempfile.mkstemp(
dir=temp_dir, suffix=".yuv")
blocksize = 2048 * 1024
total_filesize = args.num_frames * frame_size
with os.fdopen(fd, 'wb', blocksize) as truncated_file:
with open(clip['yuv_file'], 'rb') as original_file:
original_file.seek(args.frame_offset * frame_size)
while total_filesize > 0:
data = original_file.read(
blocksize if blocksize < total_filesize else total_filesize)
truncated_file.write(data)
total_filesize -= blocksize
clip['yuv_file'] = truncated_filename
(fd, y4m_file) = tempfile.mkstemp(dir=temp_dir, suffix='.y4m')
os.close(fd)
with open(os.devnull, 'w') as devnull:
subprocess.check_call(
['ffmpeg', '-y', '-s', '%dx%d' % (clip['width'], clip['height']), '-r', str(int(clip['fps'] + 0.5)), '-pix_fmt', 'yuv420p', '-i', clip['yuv_file'], y4m_file],
stdout=devnull,
stderr=devnull
)
clip['y4m_file'] = y4m_file
def decode_file(job, temp_dir, encoded_file):
(fd, decoded_file) = tempfile.mkstemp(dir=temp_dir, suffix=".yuv")
os.close(fd)
(fd, framestats_file) = tempfile.mkstemp(dir=temp_dir, suffix=".csv")
os.close(fd)
with open(os.devnull, 'w') as devnull:
if job['codec'] in ['av1', 'vp8', 'vp9']:
decoder = binary_vars.AOM_DEC_BIN if job[
'codec'] == 'av1' else binary_vars.VPX_DEC_BIN
subprocess.check_call([
decoder, '--i420',
'--codec=%s' % job['codec'], '-o', decoded_file, encoded_file,
'--framestats=%s' % framestats_file
],
stdout=devnull,
stderr=devnull,
encoding='utf-8')
elif job['codec'] == 'h264':
subprocess.check_call(
[binary_vars.H264_DEC_BIN, encoded_file, decoded_file],
stdout=devnull,
stderr=devnull,
encoding='utf-8')
# TODO(pbos): Generate H264 framestats.
framestats_file = None
return (decoded_file, framestats_file)
def add_framestats(results_dict, framestats_file, statstype):
with open(framestats_file) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
for (metric, value) in row.items():
metric_key = 'frame-%s' % metric
if metric_key not in results_dict:
results_dict[metric_key] = []
results_dict[metric_key].append(statstype(value))
def generate_metrics(results_dict, job, temp_dir, encoded_file):
(decoded_file, decoder_framestats) = decode_file(job, temp_dir,
encoded_file['filename'])
clip = job['clip']
temporal_divide = 2**(job['num_temporal_layers'] - 1 -
encoded_file['temporal-layer'])
temporal_skip = temporal_divide - 1
# TODO(pbos): Perform SSIM on downscaled .yuv files for spatial layers.
(fd, metrics_framestats) = tempfile.mkstemp(dir=temp_dir, suffix=".csv")
os.close(fd)
ssim_results = subprocess.check_output([
binary_vars.TINY_SSIM_BIN, clip['yuv_file'], decoded_file,
"%dx%d" % (results_dict['width'], results_dict['height']),
str(temporal_skip), metrics_framestats
],
encoding='utf-8').splitlines()
metric_map = {
'AvgPSNR': 'avg-psnr',
'AvgPSNR-Y': 'avg-psnr-y',
'AvgPSNR-U': 'avg-psnr-u',
'AvgPSNR-V': 'avg-psnr-v',
'GlbPSNR': 'glb-psnr',
'GlbPSNR-Y': 'glb-psnr-y',
'GlbPSNR-U': 'glb-psnr-u',
'GlbPSNR-V': 'glb-psnr-v',
'SSIM': 'ssim',
'SSIM-Y': 'ssim-y',
'SSIM-U': 'ssim-u',
'SSIM-V': 'ssim-v',
'VpxSSIM': 'vpx-ssim',
}
for line in ssim_results:
if not line:
continue
(metric, value) = line.split(': ')
if metric in metric_map:
results_dict[metric_map[metric]] = float(value)
elif metric == 'Nframes':
layer_frames = int(value)
results_dict['frame-count'] = layer_frames
results_dict['psnr-dmos'] = psnr_to_dmos(results_dict['avg-psnr'])
if decoder_framestats:
add_framestats(results_dict, decoder_framestats, int)
add_framestats(results_dict, metrics_framestats, float)
if args.enable_vmaf:
(fd, results_file) = tempfile.mkstemp(
dir=temp_dir,
suffix="%s-%s-%d.json" %
(job['encoder'], job['codec'], job['qp_value']))
os.close(fd)
vmaf_results = subprocess.check_output(['vmaf/libvmaf/build/tools/vmafossexec', 'yuv420p', str(results_dict['width']), str(
results_dict['height']), clip['yuv_file'], decoded_file, 'vmaf/model/vmaf_v0.6.1.pkl', '--log-fmt', 'json', '--log', results_file], encoding='utf-8')
with open(results_file, 'r') as results_file:
vmaf_obj = json.load(results_file)
results_dict['vmaf'] = float(vmaf_obj['VMAF score'])
results_dict['frame-vmaf'] = []
for frame in vmaf_obj['frames']:
results_dict['frame-vmaf'].append(frame['metrics']['vmaf'])
layer_fps = clip['fps'] / temporal_divide
results_dict['layer-fps'] = layer_fps
spatial_divide = 2**(job['num_spatial_layers'] - 1 -
encoded_file['spatial-layer'])
results_dict['layer-width'] = results_dict['width'] // spatial_divide
results_dict['layer-height'] = results_dict['height'] // spatial_divide
# target_bitrate_bps = job['target_bitrates_kbps'][
# encoded_file['temporal-layer']] * 1000
bitrate_used_bps = os.path.getsize(
encoded_file['filename']) * 8 * layer_fps / layer_frames
# results_dict['target-bitrate-bps'] = target_bitrate_bps
results_dict['actual-bitrate-bps'] = bitrate_used_bps
results_dict['bitrate-utilization'] = float(bitrate_used_bps)
def run_command(job, encoder_command, job_temp_dir, encoded_file_dir):
(command, encoded_files) = encoder_command
clip = job['clip']
start_time = time.time()
try:
process = subprocess.Popen(' '.join(
shlex.quote(arg) if arg != '&&' else arg for arg in command),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding='utf-8',
shell=True)
except OSError as e:
return (None, "> %s\n%s" % (" ".join(command), e))
(output, _) = process.communicate()
actual_encode_ms = (time.time() - start_time) * 1000
input_yuv_filesize = os.path.getsize(clip['yuv_file'])
input_num_frames = int(input_yuv_filesize /
(6 * clip['width'] * clip['height'] / 4))
target_encode_ms = float(input_num_frames) * 1000 / clip['fps']
if process.returncode != 0:
return (None, "> %s\n%s" % (" ".join(command), output))
results = [{} for i in range(len(encoded_files))]
for i in range(len(results)):
results_dict = results[i]
results_dict['input-file'] = os.path.basename(clip['input_file'])
results_dict['input-file-sha1sum'] = clip['sha1sum']
results_dict['input-total-frames'] = clip['input_total_frames']
results_dict['frame-offset'] = args.frame_offset
# results_dict['param'] = job['param']
# results_dict['bitrate-config-kbps'] = job['target_bitrates_kbps']
results_dict['layer-pattern'] = "%dsl%dtl" % (
job['num_spatial_layers'], job['num_temporal_layers'])
results_dict['encoder'] = job['encoder']
results_dict['codec'] = job['codec']
results_dict['height'] = clip['height']
results_dict['width'] = clip['width']
results_dict['fps'] = clip['fps']
results_dict['actual-encode-time-ms'] = actual_encode_ms
results_dict['target-encode-time-ms'] = target_encode_ms
results_dict[
'encode-time-utilization'] = actual_encode_ms / target_encode_ms
layer = encoded_files[i]
results_dict['temporal-layer'] = layer['temporal-layer']
results_dict['spatial-layer'] = layer['spatial-layer']
generate_metrics(results_dict, job, job_temp_dir, layer)
if encoded_file_dir:
param = job['qp_value'] if job['param'] == 'qp' else job[
'target_bitrates_kbps'][-1]
encoded_file_pattern = "%s-%s-%s-%dsl%dtl-%d-sl%d-tl%d%s" % (
os.path.splitext(os.path.basename(clip['input_file']))[0],
job['encoder'], job['codec'], job['num_spatial_layers'],
job['num_temporal_layers'], param, layer['spatial-layer'],
layer['temporal-layer'], os.path.splitext(layer['filename'])[1])
shutil.move(layer['filename'],
os.path.join(encoded_file_dir, encoded_file_pattern))
else:
os.remove(layer['filename'])
shutil.rmtree(job_temp_dir)
return (results, output)
def find_qp():
if args.single_datapoint:
return [55]
return [35, 40, 45, 48, 53, 55]
def find_bitrates(width, height):
# Do multiples of 100, because grouping based on bitrate splits in
# generate_graphs.py doesn't round properly.
# TODO(pbos): Propagate the bitrate split in the data instead of inferring it
# from the job to avoid rounding errors.
# Significantly lower than exact value, so 800p still counts as 720p for
# instance.
if args.single_datapoint:
return [100]
pixel_bound = width * height
if pixel_bound <= 176 * 144:
return [20, 40, 60, 80, 100, 120]
if pixel_bound <= 640 * 360:
return [100, 150, 200, 250, 300, 350]
if pixel_bound <= 854 * 480:
return [125, 250, 375, 500, 625, 750]
if pixel_bound <= 1280 * 720:
return [400, 600, 800, 1000, 1200, 1400]
if pixel_bound <= 1920 * 1080:
return [800, 1200, 1600, 2000, 2400, 2800]
return [1200, 1800, 2400, 3000, 3600, 4200]
layer_bitrates = [[1], [0.6, 1], [0.45, 0.65, 1]]
def split_temporal_bitrates_kbps(target_bitrate_kbps, num_temporal_layers):
bitrates_kbps = []
for i in range(num_temporal_layers):
layer_bitrate_kbps = int(layer_bitrates[num_temporal_layers - 1][i] *
target_bitrate_kbps)
bitrates_kbps.append(layer_bitrate_kbps)
return bitrates_kbps
def generate_jobs(args, temp_dir):
jobs = []
for clip in args.clips:
params = find_bitrates(
clip['width'],
clip['height']) if args.enable_bitrate else find_qp()
for param in params:
for (encoder, codec) in args.encoders:
job = {
'encoder': encoder,
'codec': codec,
'clip': clip,
'num_spatial_layers': args.num_spatial_layers,
'num_temporal_layers': args.num_temporal_layers,
}
if args.enable_bitrate:
job.update({
'param':
'bitrate',
'qp_value':
-1,
'target_bitrates_kbps':
split_temporal_bitrates_kbps(
param, args.num_temporal_layers)
})
else:
job.update({
'param': 'qp',
'qp_value': param,
'target_bitrates_kbps': []
})
job_temp_dir = tempfile.mkdtemp(dir=temp_dir)
(command, encoded_files) = get_encoder_command(job['encoder'])(
job, job_temp_dir)
full_command = find_absolute_path(args.use_system_path,
command[0])
command = [
full_command if word == command[0] else word
for word in command
]
jobs.append((job, (command, encoded_files), job_temp_dir))
return jobs
def start_daemon(func):
t = threading.Thread(target=func)
t.daemon = True
t.start()
return t
def job_to_string(job):
param = ":".join(str(i) for i in job['target_bitrates_kbps']
) if job['param'] == 'bitrate' else job['qp_value']
return "%s:%s %dsl%dtl %s %s" % (
job['encoder'], job['codec'], job['num_spatial_layers'],
job['num_temporal_layers'], param,
os.path.basename(job['clip']['input_file']))
def worker():
global args
global jobs
global current_job
global has_errored
global total_jobs
pp = pprint.PrettyPrinter(indent=2)
while True:
with thread_lock:
if not jobs:
return
(job, command, job_temp_dir) = jobs.pop()
(results, error) = run_command(job, command, job_temp_dir,
args.encoded_file_dir)
job_str = job_to_string(job)
with thread_lock:
current_job += 1
run_ok = results is not None
print(
"[%d/%d] %s (%s)" %
(current_job, total_jobs, job_str, "OK" if run_ok else "ERROR"))
if not run_ok:
has_errored = True
print(error)
else:
for result in results:
args.out.write(pp.pformat(result))
args.out.write(',\n')
args.out.flush()
thread_lock = threading.Lock()
def main():
global args
global jobs
global total_jobs
global current_job
global has_errored
temp_dir = tempfile.mkdtemp()
args = parser.parse_args()
prepare_clips(args, temp_dir)
jobs = generate_jobs(args, temp_dir)
total_jobs = len(jobs)
current_job = 0
has_errored = False
if args.dump_commands:
for (job, (command, encoded_files), job_temp_dir) in jobs:
current_job += 1
print("[%d/%d] %s" % (current_job, total_jobs, job_to_string(job)))
print("> %s" % " ".join(command))
print()
shutil.rmtree(temp_dir)
return 0
# Make sure commands for quality metrics are present.
find_absolute_path(False, binary_vars.TINY_SSIM_BIN)
for (encoder, codec) in args.encoders:
if codec in ['vp8', 'vp9']:
find_absolute_path(False, binary_vars.VPX_DEC_BIN)
elif codec == 'av1':
find_absolute_path(False, binary_vars.AOM_DEC_BIN)
elif codec == 'h264':
find_absolute_path(False, binary_vars.H264_DEC_BIN)
if args.enable_vmaf:
find_absolute_path(False, binary_vars.VMAF_BIN)
print("[0/%d] Running jobs..." % total_jobs)
args.out.write('[')
workers = [start_daemon(worker) for i in range(args.workers)]
[t.join() for t in workers]
args.out.write(']\n')
shutil.rmtree(temp_dir)
return 1 if has_errored else 0
if __name__ == '__main__':
sys.exit(main())
|
blacs_workers.py | #####################################################################
# #
# /NI_DAQmx/blacs_workers.py #
# #
# Copyright 2018, Monash University, JQI, Christopher Billington #
# #
# This file is part of the module labscript_devices, in the #
# labscript suite (see http://labscriptsuite.org), and is #
# licensed under the Simplified BSD License. See the license.txt #
# file in the root of the project for the full license. #
# #
#####################################################################
import sys
import time
import threading
from PyDAQmx import *
from PyDAQmx.DAQmxConstants import *
from PyDAQmx.DAQmxTypes import *
from PyDAQmx.DAQmxCallBack import *
import numpy as np
from numpy.lib.recfunctions import structured_to_unstructured
import labscript_utils.h5_lock
import h5py
from zprocess import Event
from zprocess.utils import _reraise
import labscript_utils.properties as properties
from labscript_utils import dedent
from labscript_utils.connections import _ensure_str
from blacs.tab_base_classes import Worker
from .utils import split_conn_port, split_conn_DO, split_conn_AI
from .daqmx_utils import incomplete_sample_detection
class NI_DAQmxOutputWorker(Worker):
def init(self):
self.check_version()
# Reset Device: clears previously added routes etc. Note: is insufficient for
# some devices, which require power cycling to truly reset.
DAQmxResetDevice(self.MAX_name)
self.start_manual_mode_tasks()
def stop_tasks(self):
if self.AO_task is not None:
self.AO_task.StopTask()
self.AO_task.ClearTask()
self.AO_task = None
if self.DO_task is not None:
self.DO_task.StopTask()
self.DO_task.ClearTask()
self.DO_task = None
def shutdown(self):
self.stop_tasks()
def check_version(self):
"""Check the version of PyDAQmx is high enough to avoid a known bug"""
major = uInt32()
minor = uInt32()
patch = uInt32()
DAQmxGetSysNIDAQMajorVersion(major)
DAQmxGetSysNIDAQMinorVersion(minor)
DAQmxGetSysNIDAQUpdateVersion(patch)
if major.value == 14 and minor.value < 2:
msg = """There is a known bug with buffered shots using NI DAQmx v14.0.0.
This bug does not exist on v14.2.0. You are currently using v%d.%d.%d.
Please ensure you upgrade to v14.2.0 or higher."""
raise Exception(dedent(msg) % (major.value, minor.value, patch.value))
def start_manual_mode_tasks(self):
# Create tasks:
if self.num_AO > 0:
self.AO_task = Task()
else:
self.AO_task = None
if self.ports:
self.DO_task = Task()
else:
self.DO_task = None
# Setup AO channels
for i in range(self.num_AO):
con = self.MAX_name + "/ao%d" % i
self.AO_task.CreateAOVoltageChan(
con, "", self.Vmin, self.Vmax, DAQmx_Val_Volts, None
)
# Setup DO channels
for port_str in sorted(self.ports, key=split_conn_port):
if not self.ports[port_str]['num_lines']:
continue
# Add each port to the task:
con = '%s/%s' % (self.MAX_name, port_str)
self.DO_task.CreateDOChan(con, "", DAQmx_Val_ChanForAllLines)
# Start tasks:
if self.AO_task is not None:
self.AO_task.StartTask()
if self.DO_task is not None:
self.DO_task.StartTask()
def program_manual(self, front_panel_values):
written = int32()
if self.AO_task is not None:
AO_data = np.zeros(self.num_AO, dtype=np.float64)
for i in range(self.num_AO):
AO_data[i] = front_panel_values['ao%d' % i]
self.AO_task.WriteAnalogF64(
1, True, 1, DAQmx_Val_GroupByChannel, AO_data, written, None
)
if self.DO_task is not None:
# Due to two bugs in DAQmx, we will always pack our data into a uint32 and
# write using WriteDigitalU32. The first bug is some kind of use of
# uninitialised memory when using WriteDigitalLines, discussed here:
# https://bitbucket.org/labscript_suite
# /labscript_devices/pull-requests/56/#comment-83671312
# The second is that using a smaller int dtype sometimes fails even though
# it is the correct int size for the size of the port. Using a 32 bit int
# always works, the additional bits are ignored. This is discussed here:
# https://forums.ni.com/t5/Multifunction-DAQ
# /problem-with-correlated-DIO-on-USB-6341/td-p/3344066
DO_data = np.zeros(len(self.ports), dtype=np.uint32)
for conn, value in front_panel_values.items():
if conn.startswith('port'):
port, line = split_conn_DO(conn)
DO_data[port] |= value << line
self.DO_task.WriteDigitalU32(
1, True, 10.0, DAQmx_Val_GroupByChannel, DO_data, written, None
)
# TODO: return coerced/quantised values
return {}
def get_output_tables(self, h5file, device_name):
"""Return the AO and DO tables rom the file, or None if they do not exist."""
with h5py.File(h5file, 'r') as hdf5_file:
group = hdf5_file['devices'][device_name]
try:
AO_table = group['AO'][:]
except KeyError:
AO_table = None
try:
DO_table = group['DO'][:]
except KeyError:
DO_table = None
return AO_table, DO_table
def set_mirror_clock_terminal_connected(self, connected):
"""Mirror the clock terminal on another terminal to allow daisy chaining of the
clock line to other devices, if applicable"""
if self.clock_mirror_terminal is None:
return
if connected:
DAQmxConnectTerms(
self.clock_terminal,
self.clock_mirror_terminal,
DAQmx_Val_DoNotInvertPolarity,
)
else:
DAQmxDisconnectTerms(self.clock_terminal, self.clock_mirror_terminal)
def program_buffered_DO(self, DO_table):
"""Create the DO task and program in the DO table for a shot. Return a
dictionary of the final values of each channel in use"""
if DO_table is None:
return {}
self.DO_task = Task()
written = int32()
ports = DO_table.dtype.names
final_values = {}
for port_str in ports:
# Add each port to the task:
con = '%s/%s' % (self.MAX_name, port_str)
self.DO_task.CreateDOChan(con, "", DAQmx_Val_ChanForAllLines)
# Collect the final values of the lines on this port:
port_final_value = DO_table[port_str][-1]
for line in range(self.ports[port_str]["num_lines"]):
# Extract each digital value from the packed bits:
line_final_value = bool((1 << line) & port_final_value)
final_values['%s/line%d' % (port_str, line)] = int(line_final_value)
# Convert DO table to a regular array and ensure it is C continguous:
DO_table = np.ascontiguousarray(
structured_to_unstructured(DO_table, dtype=np.uint32)
)
# Check if DOs are all zero for the whole shot. If they are this triggers a
# bug in NI-DAQmx that throws a cryptic error for buffered output. In this
# case, run it as a non-buffered task.
self.DO_all_zero = not np.any(DO_table)
if self.DO_all_zero:
DO_table = DO_table[0:1]
if self.static_DO or self.DO_all_zero:
# Static DO. Start the task and write data, no timing configuration.
self.DO_task.StartTask()
# Write data. See the comment in self.program_manual as to why we are using
# uint32 instead of the native size of each port
self.DO_task.WriteDigitalU32(
1, # npts
False, # autostart
10.0, # timeout
DAQmx_Val_GroupByScanNumber,
DO_table,
written,
None,
)
else:
# We use all but the last sample (which is identical to the second last
# sample) in order to ensure there is one more clock tick than there are
# samples. This is required by some devices to determine that the task has
# completed.
npts = len(DO_table) - 1
# Set up timing:
self.DO_task.CfgSampClkTiming(
self.clock_terminal,
self.clock_limit,
DAQmx_Val_Rising,
DAQmx_Val_FiniteSamps,
npts,
)
# Write data. See the comment in self.program_manual as to why we are using
# uint32 instead of the native size of each port.
self.DO_task.WriteDigitalU32(
npts,
False, # autostart
10.0, # timeout
DAQmx_Val_GroupByScanNumber,
DO_table[:-1], # All but the last sample as mentioned above
written,
None,
)
# Go!
self.DO_task.StartTask()
return final_values
def program_buffered_AO(self, AO_table):
if AO_table is None:
return {}
self.AO_task = Task()
written = int32()
channels = ', '.join(self.MAX_name + '/' + c for c in AO_table.dtype.names)
self.AO_task.CreateAOVoltageChan(
channels, "", self.Vmin, self.Vmax, DAQmx_Val_Volts, None
)
# Collect the final values of the analog outs:
final_values = dict(zip(AO_table.dtype.names, AO_table[-1]))
# Convert AO table to a regular array and ensure it is C continguous:
AO_table = np.ascontiguousarray(
structured_to_unstructured(AO_table, dtype=np.float64)
)
# Check if AOs are all zero for the whole shot. If they are this triggers a
# bug in NI-DAQmx that throws a cryptic error for buffered output. In this
# case, run it as a non-buffered task.
self.AO_all_zero = not np.any(AO_table)
if self.AO_all_zero:
AO_table = AO_table[0:1]
if self.static_AO or self.AO_all_zero:
# Static AO. Start the task and write data, no timing configuration.
self.AO_task.StartTask()
self.AO_task.WriteAnalogF64(
1, True, 10.0, DAQmx_Val_GroupByChannel, AO_table, written, None
)
else:
# We use all but the last sample (which is identical to the second last
# sample) in order to ensure there is one more clock tick than there are
# samples. This is required by some devices to determine that the task has
# completed.
npts = len(AO_table) - 1
# Set up timing:
self.AO_task.CfgSampClkTiming(
self.clock_terminal,
self.clock_limit,
DAQmx_Val_Rising,
DAQmx_Val_FiniteSamps,
npts,
)
# Write data:
self.AO_task.WriteAnalogF64(
npts,
False, # autostart
10.0, # timeout
DAQmx_Val_GroupByScanNumber,
AO_table[:-1], # All but the last sample as mentioned above
written,
None,
)
# Go!
self.AO_task.StartTask()
return final_values
def transition_to_buffered(self, device_name, h5file, initial_values, fresh):
# Store the initial values in case we have to abort and restore them:
self.initial_values = initial_values
# Stop the manual mode output tasks, if any:
self.stop_tasks()
# Get the data to be programmed into the output tasks:
AO_table, DO_table = self.get_output_tables(h5file, device_name)
# Mirror the clock terminal, if applicable:
self.set_mirror_clock_terminal_connected(True)
# Program the output tasks and retrieve the final values of each output:
DO_final_values = self.program_buffered_DO(DO_table)
AO_final_values = self.program_buffered_AO(AO_table)
final_values = {}
final_values.update(DO_final_values)
final_values.update(AO_final_values)
# If we are the wait timeout device, then the final value of the timeout line
# should be its rearm value:
if self.wait_timeout_device == self.device_name:
final_values[self.wait_timeout_connection] = self.wait_timeout_rearm_value
return final_values
def transition_to_manual(self, abort=False):
# Stop output tasks and call program_manual. Only call StopTask if not aborting.
# Otherwise results in an error if output was incomplete. If aborting, call
# ClearTask only.
npts = uInt64()
samples = uInt64()
tasks = []
if self.AO_task is not None:
tasks.append([self.AO_task, self.static_AO or self.AO_all_zero, 'AO'])
self.AO_task = None
if self.DO_task is not None:
tasks.append([self.DO_task, self.static_DO or self.DO_all_zero, 'DO'])
self.DO_task = None
for task, static, name in tasks:
if not abort:
if not static:
try:
# Wait for task completion with a 1 second timeout:
task.WaitUntilTaskDone(1)
finally:
# Log where we were up to in sample generation, regardless of
# whether the above succeeded:
task.GetWriteCurrWritePos(npts)
task.GetWriteTotalSampPerChanGenerated(samples)
# Detect -1 even though they're supposed to be unsigned ints, -1
# seems to indicate the task was not started:
current = samples.value if samples.value != 2 ** 64 - 1 else -1
total = npts.value if npts.value != 2 ** 64 - 1 else -1
msg = 'Stopping %s at sample %d of %d'
self.logger.info(msg, name, current, total)
task.StopTask()
task.ClearTask()
# Remove the mirroring of the clock terminal, if applicable:
self.set_mirror_clock_terminal_connected(False)
# Set up manual mode tasks again:
self.start_manual_mode_tasks()
if abort:
# Reprogram the initial states:
self.program_manual(self.initial_values)
return True
def abort_transition_to_buffered(self):
return self.transition_to_manual(True)
def abort_buffered(self):
return self.transition_to_manual(True)
class NI_DAQmxAcquisitionWorker(Worker):
MAX_READ_INTERVAL = 0.2
MAX_READ_PTS = 10000
def init(self):
# Prevent interference between the read callback and the shutdown code:
self.tasklock = threading.RLock()
# Assigned on a per-task basis and cleared afterward:
self.read_array = None
self.task = None
# Assigned on a per-shot basis and cleared afterward:
self.buffered_mode = False
self.h5_file = None
self.acquired_data = None
self.buffered_rate = None
self.buffered_chans = None
# Hard coded for now. Perhaps we will add functionality to enable
# and disable inputs in manual mode, and adjust the rate:
self.manual_mode_chans = ['ai%d' % i for i in range(self.num_AI)]
self.manual_mode_rate = 1000
# An event for knowing when the wait durations are known, so that we may use
# them to chunk up acquisition data:
self.wait_durations_analysed = Event('wait_durations_analysed')
# Start task for manual mode
self.start_task(self.manual_mode_chans, self.manual_mode_rate)
def shutdown(self):
if self.task is not None:
self.stop_task()
def read(self, task_handle, event_type, num_samples, callback_data=None):
"""Called as a callback by DAQmx while task is running. Also called by us to get
remaining data just prior to stopping the task. Since the callback runs
in a separate thread, we need to serialise access to instance variables"""
samples_read = int32()
with self.tasklock:
if self.task is None or task_handle != self.task.taskHandle.value:
# Task stopped already.
return 0
self.task.ReadAnalogF64(
num_samples,
-1,
DAQmx_Val_GroupByScanNumber,
self.read_array,
self.read_array.size,
samples_read,
None,
)
# Select only the data read, and downconvert to 32 bit:
data = self.read_array[: int(samples_read.value), :].astype(np.float32)
if self.buffered_mode:
# Append to the list of acquired data:
self.acquired_data.append(data)
else:
# TODO: Send it to the broker thingy.
pass
return 0
def start_task(self, chans, rate):
"""Set up a task that acquires data with a callback every MAX_READ_PTS points or
MAX_READ_INTERVAL seconds, whichever is faster. NI DAQmx calls callbacks in a
separate thread, so this method returns, but data acquisition continues until
stop_task() is called. Data is appended to self.acquired_data if
self.buffered_mode=True, or (TODO) sent to the [whatever the AI server broker is
called] if self.buffered_mode=False."""
if self.task is not None:
raise RuntimeError('Task already running')
if chans is None:
return
# Get data MAX_READ_PTS points at a time or once every MAX_READ_INTERVAL
# seconds, whichever is faster:
num_samples = min(self.MAX_READ_PTS, int(rate * self.MAX_READ_INTERVAL))
self.read_array = np.zeros((num_samples, len(chans)), dtype=np.float64)
self.task = Task()
for chan in chans:
if int(chan[2:])>=8:
aiType=DAQmx_Val_RSE
else:
aiType=DAQmx_Val_Diff
self.task.CreateAIVoltageChan(
self.MAX_name + '/' + chan,
"",
aiType,
self.AI_range[0],
self.AI_range[1],
DAQmx_Val_Volts,
None,
)
self.task.CfgSampClkTiming(
"", rate, DAQmx_Val_Rising, DAQmx_Val_ContSamps, num_samples
)
if self.buffered_mode:
self.task.CfgDigEdgeStartTrig(self.clock_terminal, DAQmx_Val_Rising)
# This must not be garbage collected until the task is:
self.task.callback_ptr = DAQmxEveryNSamplesEventCallbackPtr(self.read)
self.task.RegisterEveryNSamplesEvent(
DAQmx_Val_Acquired_Into_Buffer, num_samples, 0, self.task.callback_ptr, 100
)
self.task.StartTask()
def stop_task(self):
with self.tasklock:
if self.task is None:
raise RuntimeError('Task not running')
# Read remaining data:
self.read(self.task, None, -1)
# Stop the task:
self.task.StopTask()
self.task.ClearTask()
self.task = None
self.read_array = None
def transition_to_buffered(self, device_name, h5file, initial_values, fresh):
self.logger.debug('transition_to_buffered')
# read channels, acquisition rate, etc from H5 file
with h5py.File(h5file, 'r') as f:
group = f['/devices/' + device_name]
if 'AI' not in group:
# No acquisition
return {}
AI_table = group['AI'][:]
device_properties = properties.get(f, device_name, 'device_properties')
chans = [_ensure_str(c) for c in AI_table['connection']]
# Remove duplicates and sort:
if chans:
self.buffered_chans = sorted(set(chans), key=split_conn_AI)
self.h5_file = h5file
self.buffered_rate = device_properties['acquisition_rate']
self.acquired_data = []
# Stop the manual mode task and start the buffered mode task:
self.stop_task()
self.buffered_mode = True
self.start_task(self.buffered_chans, self.buffered_rate)
return {}
def transition_to_manual(self, abort=False):
self.logger.debug('transition_to_manual')
# If we were doing buffered mode acquisition, stop the buffered mode task and
# start the manual mode task. We might not have been doing buffered mode
# acquisition if abort() was called when we are not in buffered mode, or if
# there were no acuisitions this shot.
if not self.buffered_mode:
return True
if self.buffered_chans is not None:
self.stop_task()
self.buffered_mode = False
self.logger.info('transitioning to manual mode, task stopped')
self.start_task(self.manual_mode_chans, self.manual_mode_rate)
if abort:
self.acquired_data = None
self.buffered_chans = None
self.h5_file = None
self.buffered_rate = None
return True
with h5py.File(self.h5_file, 'a') as hdf5_file:
data_group = hdf5_file['data']
data_group.create_group(self.device_name)
waits_in_use = len(hdf5_file['waits']) > 0
if self.buffered_chans is not None and not self.acquired_data:
msg = """No data was acquired. Perhaps the acquisition task was not
triggered to start, is the device connected to a pseudoclock?"""
raise RuntimeError(dedent(msg))
# Concatenate our chunks of acquired data and recast them as a structured
# array with channel names:
if self.acquired_data:
start_time = time.time()
dtypes = [(chan, np.float32) for chan in self.buffered_chans]
raw_data = np.concatenate(self.acquired_data).view(dtypes)
raw_data = raw_data.reshape((len(raw_data),))
self.acquired_data = None
self.buffered_chans = None
self.extract_measurements(raw_data, waits_in_use)
self.h5_file = None
self.buffered_rate = None
msg = 'data written, time taken: %ss' % str(time.time() - start_time)
else:
msg = 'No acquisitions in this shot.'
self.logger.info(msg)
return True
def extract_measurements(self, raw_data, waits_in_use):
self.logger.debug('extract_measurements')
if waits_in_use:
# There were waits in this shot. We need to wait until the other process has
# determined their durations before we proceed:
self.wait_durations_analysed.wait(self.h5_file)
with h5py.File(self.h5_file, 'a') as hdf5_file:
if waits_in_use:
# get the wait start times and durations
waits = hdf5_file['/data/waits']
wait_times = waits['time']
wait_durations = waits['duration']
try:
acquisitions = hdf5_file['/devices/' + self.device_name + '/AI']
except KeyError:
# No acquisitions!
return
try:
measurements = hdf5_file['/data/traces']
except KeyError:
# Group doesn't exist yet, create it:
measurements = hdf5_file.create_group('/data/traces')
t0 = self.AI_start_delay
for connection, label, t_start, t_end, _, _, _ in acquisitions:
connection = _ensure_str(connection)
label = _ensure_str(label)
if waits_in_use:
# add durations from all waits that start prior to t_start of
# acquisition
t_start += wait_durations[(wait_times < t_start)].sum()
# compare wait times to t_end to allow for waits during an
# acquisition
t_end += wait_durations[(wait_times < t_end)].sum()
i_start = int(np.ceil(self.buffered_rate * (t_start - t0)))
i_end = int(np.floor(self.buffered_rate * (t_end - t0)))
# np.ceil does what we want above, but float errors can miss the
# equality:
if t0 + (i_start - 1) / self.buffered_rate - t_start > -2e-16:
i_start -= 1
# We want np.floor(x) to yield the largest integer < x (not <=):
if t_end - t0 - i_end / self.buffered_rate < 2e-16:
i_end -= 1
t_i = t0 + i_start / self.buffered_rate
t_f = t0 + i_end / self.buffered_rate
times = np.linspace(t_i, t_f, i_end - i_start + 1, endpoint=True)
values = raw_data[connection][i_start : i_end + 1]
dtypes = [('t', np.float64), ('values', np.float32)]
data = np.empty(len(values), dtype=dtypes)
data['t'] = times
data['values'] = values
measurements.create_dataset(label, data=data)
def abort_buffered(self):
return self.transition_to_manual(True)
def abort_transition_to_buffered(self):
return self.transition_to_manual(True)
def program_manual(self, values):
return {}
class NI_DAQmxWaitMonitorWorker(Worker):
def init(self):
self.all_waits_finished = Event('all_waits_finished', type='post')
self.wait_durations_analysed = Event('wait_durations_analysed', type='post')
self.wait_completed = Event('wait_completed', type='post')
# Set on a per-shot basis and cleared afterward:
self.h5_file = None
self.CI_task = None
self.DO_task = None
self.wait_table = None
self.semiperiods = None
self.wait_monitor_thread = None
# Saved error in case one occurs in the thread, we can raise it later in
# transition_to_manual:
self.wait_monitor_thread_exception = None
# To trigger early shutdown of the wait monitor thread:
self.shutting_down = False
# Does this device have the "incomplete sample detection" feature? This
# determines whether the first sample on our semiperiod counter input task will
# be automatically discarded before we see it, or whether we will have to
# discard it ourselves
self.incomplete_sample_detection = incomplete_sample_detection(self.MAX_name)
# Data for timeout triggers:
if self.timeout_trigger_type == 'rising':
trigger_value = 1
rearm_value = 0
elif self.timeout_trigger_type == 'falling':
trigger_value = 0
rearm_value = 1
else:
msg = 'timeout_trigger_type must be "rising" or "falling", not "{}".'
raise ValueError(msg.format(self.timeout_trigger_type))
self.timeout_trigger = np.array([trigger_value], dtype=np.uint8)
self.timeout_rearm = np.array([rearm_value], dtype=np.uint8)
def shutdown(self):
self.stop_tasks(True)
def read_edges(self, npts, timeout=None):
"""Wait up to the given timeout in seconds for an edge on the wait monitor and
and return the duration since the previous edge. Return None upon timeout."""
samples_read = int32()
# If no timeout, call read repeatedly with a 0.2 second timeout to ensure we
# don't block indefinitely and can still abort.
if timeout is None:
read_timeout = 0.2
else:
read_timeout = timeout
read_array = np.zeros(npts)
while True:
if self.shutting_down:
raise RuntimeError('Stopped before expected number of samples acquired')
try:
self.CI_task.ReadCounterF64(
npts, read_timeout, read_array, npts, samples_read, None
)
except SamplesNotYetAvailableError:
if timeout is None:
continue
return None
return read_array
def wait_monitor(self):
try:
# Read edge times from the counter input task, indiciating the times of the
# pulses that occur at the start of the experiment and after every wait. If a
# timeout occurs, pulse the timeout output to force a resume of the master
# pseudoclock. Save the resulting
self.logger.debug('Wait monitor thread starting')
with self.kill_lock:
self.logger.debug('Waiting for start of experiment')
# Wait for the pulse indicating the start of the experiment:
if self.incomplete_sample_detection:
semiperiods = self.read_edges(1, timeout=None)
else:
semiperiods = self.read_edges(2, timeout=None)
self.logger.debug('Experiment started, got edges:' + str(semiperiods))
# May have been one or two edges, depending on whether the device has
# incomplete sample detection. We are only interested in the second one
# anyway, it tells us how long the initial pulse was. Store the pulse width
# for later, we will use it for making timeout pulses if necessary. Note
# that the variable current_time is labscript time, so it will be reset
# after each wait to the time of that wait plus pulse_width.
current_time = pulse_width = semiperiods[-1]
self.semiperiods.append(semiperiods[-1])
# Alright, we're now a short way into the experiment.
for wait in self.wait_table:
# How long until when the next wait should timeout?
timeout = wait['time'] + wait['timeout'] - current_time
timeout = max(timeout, 0) # ensure non-negative
# Wait that long for the next pulse:
self.logger.debug('Waiting for pulse indicating end of wait')
semiperiods = self.read_edges(2, timeout)
# Did the wait finish of its own accord, or time out?
if semiperiods is None:
# It timed out. If there is a timeout device, send a trigger to
# resume the clock!
if self.DO_task is not None:
msg = """Wait timed out; retriggering clock with {:.3e} s
pulse ({} edge)"""
msg = msg.format(pulse_width, self.timeout_trigger_type)
self.logger.debug(dedent(msg))
self.send_resume_trigger(pulse_width)
else:
msg = """Specified wait timeout exceeded, but there is no
timeout device with which to resume the experiment.
Continuing to wait."""
self.logger.warning(dedent(msg))
# Keep waiting for the clock to resume:
self.logger.debug('Waiting for pulse indicating end of wait')
semiperiods = self.read_edges(2, timeout=None)
# Alright, now we're at the end of the wait.
self.semiperiods.extend(semiperiods)
self.logger.debug('Wait completed')
current_time = wait['time'] + semiperiods[-1]
# Inform any interested parties that a wait has completed:
postdata = _ensure_str(wait['label'])
self.wait_completed.post(self.h5_file, data=postdata)
# Inform any interested parties that waits have all finished:
self.logger.debug('All waits finished')
self.all_waits_finished.post(self.h5_file)
except Exception:
self.logger.exception('Exception in wait monitor thread:')
# Save the exception so it can be raised in transition_to_manual
self.wait_monitor_thread_exception = sys.exc_info()
def send_resume_trigger(self, pulse_width):
written = int32()
# Trigger:
self.DO_task.WriteDigitalLines(
1, True, 1, DAQmx_Val_GroupByChannel, self.timeout_trigger, written, None
)
# Wait however long we observed the first pulse of the experiment to be. In
# practice this is likely to be negligible compared to the other software delays
# here, but in case it is larger we'd better wait:
time.sleep(pulse_width)
# Rearm trigger:
self.DO_task.WriteDigitalLines(
1, True, 1, DAQmx_Val_GroupByChannel, self.timeout_rearm, written, None
)
def stop_tasks(self, abort):
self.logger.debug('stop_tasks')
if self.wait_monitor_thread is not None:
if abort:
# This will cause the wait_monitor thread to raise an exception within a
# short time, allowing us to join it before it would otherwise be done.
self.shutting_down = True
self.wait_monitor_thread.join()
self.wait_monitor_thread = None
self.shutting_down = False
if not abort and self.wait_monitor_thread_exception is not None:
# Raise any unexpected errors from the wait monitor thread:
_reraise(*self.wait_monitor_thread_exception)
self.wait_monitor_thread_exception = None
if not abort:
# Don't want errors about incomplete task to be raised if we are aborting:
self.CI_task.StopTask()
if self.DO_task is not None:
self.DO_task.StopTask()
if self.CI_task is not None:
self.CI_task.ClearTask()
self.CI_task = None
if self.DO_task is not None:
self.DO_task.ClearTask()
self.DO_task = None
self.logger.debug('finished stop_tasks')
def start_tasks(self):
# The counter acquisition task:
self.CI_task = Task()
CI_chan = self.MAX_name + '/' + self.wait_acq_connection
# What is the longest time in between waits, plus the timeout of the
# second wait?
interwait_times = np.diff([0] + list(self.wait_table['time']))
max_measure_time = max(interwait_times + self.wait_table['timeout'])
# Allow for software delays in timeouts.
max_measure_time += 1.0
min_measure_time = self.min_semiperiod_measurement
self.logger.debug(
"CI measurement range is: min: %f max: %f",
min_measure_time,
max_measure_time,
)
self.CI_task.CreateCISemiPeriodChan(
CI_chan, '', min_measure_time, max_measure_time, DAQmx_Val_Seconds, ""
)
num_edges = 2 * (len(self.wait_table) + 1)
self.CI_task.CfgImplicitTiming(DAQmx_Val_ContSamps, num_edges)
self.CI_task.StartTask()
# The timeout task:
if self.wait_timeout_MAX_name is not None:
self.DO_task = Task()
DO_chan = self.wait_timeout_MAX_name + '/' + self.wait_timeout_connection
self.DO_task.CreateDOChan(DO_chan, "", DAQmx_Val_ChanForAllLines)
# Ensure timeout trigger is armed:
written = int32()
# Writing autostarts the task:
self.DO_task.WriteDigitalLines(
1, True, 1, DAQmx_Val_GroupByChannel, self.timeout_rearm, written, None
)
def transition_to_buffered(self, device_name, h5file, initial_values, fresh):
self.logger.debug('transition_to_buffered')
self.h5_file = h5file
with h5py.File(h5file, 'r') as hdf5_file:
dataset = hdf5_file['waits']
if len(dataset) == 0:
# There are no waits. Do nothing.
self.logger.debug('There are no waits, not transitioning to buffered')
self.wait_table = None
return {}
self.wait_table = dataset[:]
self.start_tasks()
# An array to store the results of counter acquisition:
self.semiperiods = []
self.wait_monitor_thread = threading.Thread(target=self.wait_monitor)
# Not a daemon thread, as it implements wait timeouts - we need it to stay alive
# if other things die.
self.wait_monitor_thread.start()
self.logger.debug('finished transition to buffered')
return {}
def transition_to_manual(self, abort=False):
self.logger.debug('transition_to_manual')
self.stop_tasks(abort)
if not abort and self.wait_table is not None:
# Let's work out how long the waits were. The absolute times of each edge on
# the wait monitor were:
edge_times = np.cumsum(self.semiperiods)
# Now there was also a rising edge at t=0 that we didn't measure:
edge_times = np.insert(edge_times, 0, 0)
# Ok, and the even-indexed ones of these were rising edges.
rising_edge_times = edge_times[::2]
# Now what were the times between rising edges?
periods = np.diff(rising_edge_times)
# How does this compare to how long we expected there to be between the
# start of the experiment and the first wait, and then between each pair of
# waits? The difference will give us the waits' durations.
resume_times = self.wait_table['time']
# Again, include the start of the experiment, t=0:
resume_times = np.insert(resume_times, 0, 0)
run_periods = np.diff(resume_times)
wait_durations = periods - run_periods
waits_timed_out = wait_durations > self.wait_table['timeout']
# Work out how long the waits were, save them, post an event saying so:
dtypes = [
('label', 'a256'),
('time', float),
('timeout', float),
('duration', float),
('timed_out', bool),
]
data = np.empty(len(self.wait_table), dtype=dtypes)
data['label'] = self.wait_table['label']
data['time'] = self.wait_table['time']
data['timeout'] = self.wait_table['timeout']
data['duration'] = wait_durations
data['timed_out'] = waits_timed_out
with h5py.File(self.h5_file, 'a') as hdf5_file:
hdf5_file.create_dataset('/data/waits', data=data)
self.wait_durations_analysed.post(self.h5_file)
self.h5_file = None
self.semiperiods = None
return True
def abort_buffered(self):
return self.transition_to_manual(True)
def abort_transition_to_buffered(self):
return self.transition_to_manual(True)
def program_manual(self, values):
return {}
|
background_caching_job.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Module to build and run background source recording jobs.
For internal use only; no backwards-compatibility guarantees.
A background source recording job is a job that records events for all
recordable sources of a given pipeline. With Interactive Beam, one such job is
started when a pipeline run happens (which produces a main job in contrast to
the background source recording job) and meets the following conditions:
#. The pipeline contains recordable sources, configured through
interactive_beam.options.recordable_sources.
#. No such background job is running.
#. No such background job has completed successfully and the cached events are
still valid (invalidated when recordable sources change in the pipeline).
Once started, the background source recording job runs asynchronously until it
hits some recording limit configured in interactive_beam.options. Meanwhile,
the main job and future main jobs from the pipeline will run using the
deterministic replayable recorded events until they are invalidated.
"""
# pytype: skip-file
import logging
import threading
import time
import apache_beam as beam
from apache_beam.runners.interactive import interactive_environment as ie
from apache_beam.runners.interactive import utils
from apache_beam.runners.interactive.caching import streaming_cache
from apache_beam.runners.runner import PipelineState
_LOGGER = logging.getLogger(__name__)
class BackgroundCachingJob(object):
"""A simple abstraction that controls necessary components of a timed and
space limited background source recording job.
A background source recording job successfully completes source data
recording in 2 conditions:
#. The job is finite and runs into DONE state;
#. The job is infinite but hits an interactive_beam.options configured limit
and gets cancelled into CANCELLED/CANCELLING state.
In both situations, the background source recording job should be treated as
done successfully.
"""
def __init__(self, pipeline_result, limiters):
self._pipeline_result = pipeline_result
self._result_lock = threading.RLock()
self._condition_checker = threading.Thread(
target=self._background_caching_job_condition_checker, daemon=True)
# Limiters are checks s.t. if any are triggered then the background caching
# job gets cancelled.
self._limiters = limiters
self._condition_checker.start()
def _background_caching_job_condition_checker(self):
while True:
with self._result_lock:
if PipelineState.is_terminal(self._pipeline_result.state):
break
if self._should_end_condition_checker():
self.cancel()
break
time.sleep(0.5)
def _should_end_condition_checker(self):
return any([l.is_triggered() for l in self._limiters])
def is_done(self):
with self._result_lock:
is_terminated = self._pipeline_result.state in (
PipelineState.DONE, PipelineState.CANCELLED)
is_triggered = self._should_end_condition_checker()
is_cancelling = self._pipeline_result.state is PipelineState.CANCELLING
return is_terminated or (is_triggered and is_cancelling)
def is_running(self):
with self._result_lock:
return self._pipeline_result.state is PipelineState.RUNNING
def cancel(self):
"""Cancels this background source recording job.
"""
with self._result_lock:
if not PipelineState.is_terminal(self._pipeline_result.state):
try:
self._pipeline_result.cancel()
except NotImplementedError:
# Ignore the cancel invocation if it is never implemented by the
# runner.
pass
@property
def state(self):
with self._result_lock:
return self._pipeline_result.state
def attempt_to_run_background_caching_job(
runner, user_pipeline, options=None, limiters=None):
"""Attempts to run a background source recording job for a user-defined
pipeline.
Returns True if a job was started, False otherwise.
The pipeline result is automatically tracked by Interactive Beam in case
future cancellation/cleanup is needed.
"""
if is_background_caching_job_needed(user_pipeline):
# Cancel non-terminal jobs if there is any before starting a new one.
attempt_to_cancel_background_caching_job(user_pipeline)
# Cancel the gRPC server serving the test stream if there is one.
attempt_to_stop_test_stream_service(user_pipeline)
# TODO(BEAM-8335): refactor background source recording job logic from
# pipeline_instrument module to this module and aggregate tests.
from apache_beam.runners.interactive import pipeline_instrument as instr
runner_pipeline = beam.pipeline.Pipeline.from_runner_api(
user_pipeline.to_runner_api(), runner, options)
ie.current_env().add_derived_pipeline(user_pipeline, runner_pipeline)
background_caching_job_result = beam.pipeline.Pipeline.from_runner_api(
instr.build_pipeline_instrument(
runner_pipeline).background_caching_pipeline_proto(),
runner,
options).run()
recording_limiters = (
limiters
if limiters else ie.current_env().options.capture_control.limiters())
ie.current_env().set_background_caching_job(
user_pipeline,
BackgroundCachingJob(
background_caching_job_result, limiters=recording_limiters))
return True
return False
def is_background_caching_job_needed(user_pipeline):
"""Determines if a background source recording job needs to be started.
It does several state checks and recording state changes throughout the
process. It is not idempotent to simplify the usage.
"""
job = ie.current_env().get_background_caching_job(user_pipeline)
# Checks if the pipeline contains any source that needs to be cached.
need_cache = has_source_to_cache(user_pipeline)
# If this is True, we can invalidate a previous done/running job if there is
# one.
cache_changed = is_source_to_cache_changed(user_pipeline)
# When recording replay is disabled, cache is always needed for recordable
# sources (if any).
if need_cache and not ie.current_env().options.enable_recording_replay:
from apache_beam.runners.interactive.options import capture_control
capture_control.evict_captured_data()
return True
return (
need_cache and
# Checks if it's the first time running a job from the pipeline.
(
not job or
# Or checks if there is no previous job.
# DONE means a previous job has completed successfully and the
# cached events might still be valid.
not (
job.is_done() or
# RUNNING means a previous job has been started and is still
# running.
job.is_running()) or
# Or checks if we can invalidate the previous job.
cache_changed))
def is_cache_complete(pipeline_id):
# type: (str) -> bool
"""Returns True if the backgrond cache for the given pipeline is done.
"""
user_pipeline = ie.current_env().pipeline_id_to_pipeline(pipeline_id)
job = ie.current_env().get_background_caching_job(user_pipeline)
is_done = job and job.is_done()
cache_changed = is_source_to_cache_changed(
user_pipeline, update_cached_source_signature=False)
# Stop reading from the cache if the background job is done or the underlying
# cache signature changed that requires a new background source recording job.
return is_done or cache_changed
def has_source_to_cache(user_pipeline):
"""Determines if a user-defined pipeline contains any source that need to be
cached. If so, also immediately wrap current cache manager held by current
interactive environment into a streaming cache if this has not been done.
The wrapping doesn't invalidate existing cache in any way.
This can help determining if a background source recording job is needed to
write cache for sources and if a test stream service is needed to serve the
cache.
Throughout the check, if source-to-cache has changed from the last check, it
also cleans up the invalidated cache early on.
"""
# TODO(BEAM-8335): we temporarily only cache replaceable unbounded sources.
# Add logic for other cacheable sources here when they are available.
has_cache = utils.has_unbounded_sources(user_pipeline)
if has_cache:
if not isinstance(ie.current_env().get_cache_manager(user_pipeline,
create_if_absent=True),
streaming_cache.StreamingCache):
file_based_cm = ie.current_env().get_cache_manager(user_pipeline)
ie.current_env().set_cache_manager(
streaming_cache.StreamingCache(
file_based_cm._cache_dir,
is_cache_complete=is_cache_complete,
sample_resolution_sec=1.0,
saved_pcoders=file_based_cm._saved_pcoders),
user_pipeline)
return has_cache
def attempt_to_cancel_background_caching_job(user_pipeline):
"""Attempts to cancel background source recording job for a user-defined
pipeline.
If no background source recording job needs to be cancelled, NOOP. Otherwise,
cancel such job.
"""
job = ie.current_env().get_background_caching_job(user_pipeline)
if job:
job.cancel()
def attempt_to_stop_test_stream_service(user_pipeline):
"""Attempts to stop the gRPC server/service serving the test stream.
If there is no such server started, NOOP. Otherwise, stop it.
"""
if is_a_test_stream_service_running(user_pipeline):
ie.current_env().evict_test_stream_service_controller(user_pipeline).stop()
def is_a_test_stream_service_running(user_pipeline):
"""Checks to see if there is a gPRC server/service running that serves the
test stream to any job started from the given user_pipeline.
"""
return ie.current_env().get_test_stream_service_controller(
user_pipeline) is not None
def is_source_to_cache_changed(
user_pipeline, update_cached_source_signature=True):
"""Determines if there is any change in the sources that need to be cached
used by the user-defined pipeline.
Due to the expensiveness of computations and for the simplicity of usage, this
function is not idempotent because Interactive Beam automatically discards
previously tracked signature of transforms and tracks the current signature of
transforms for the user-defined pipeline if there is any change.
When it's True, there is addition/deletion/mutation of source transforms that
requires a new background source recording job.
"""
# By default gets empty set if the user_pipeline is first time seen because
# we can treat it as adding transforms.
recorded_signature = ie.current_env().get_cached_source_signature(
user_pipeline)
current_signature = extract_source_to_cache_signature(user_pipeline)
is_changed = not current_signature.issubset(recorded_signature)
# The computation of extract_unbounded_source_signature is expensive, track on
# change by default.
if is_changed and update_cached_source_signature:
options = ie.current_env().options
# No info needed when recording replay is disabled.
if options.enable_recording_replay:
if not recorded_signature:
def sizeof_fmt(num, suffix='B'):
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1000.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1000.0
return "%.1f%s%s" % (num, 'Yi', suffix)
_LOGGER.info(
'Interactive Beam has detected unbounded sources in your pipeline. '
'In order to have a deterministic replay, a segment of data will '
'be recorded from all sources for %s seconds or until a total of '
'%s have been written to disk.',
options.recording_duration.total_seconds(),
sizeof_fmt(options.recording_size_limit))
else:
_LOGGER.info(
'Interactive Beam has detected a new streaming source was '
'added to the pipeline. In order for the cached streaming '
'data to start at the same time, all recorded data has been '
'cleared and a new segment of data will be recorded.')
ie.current_env().cleanup(user_pipeline)
ie.current_env().set_cached_source_signature(
user_pipeline, current_signature)
ie.current_env().add_user_pipeline(user_pipeline)
return is_changed
def extract_source_to_cache_signature(user_pipeline):
"""Extracts a set of signature for sources that need to be cached in the
user-defined pipeline.
A signature is a str representation of urn and payload of a source.
"""
# TODO(BEAM-8335): we temporarily only cache replaceable unbounded sources.
# Add logic for other cacheable sources here when they are available.
unbounded_sources_as_applied_transforms = utils.unbounded_sources(
user_pipeline)
unbounded_sources_as_ptransforms = set(
map(lambda x: x.transform, unbounded_sources_as_applied_transforms))
_, context = user_pipeline.to_runner_api(return_context=True)
signature = set(
map(
lambda transform: str(transform.to_runner_api(context)),
unbounded_sources_as_ptransforms))
return signature
|
SerialClient.py | #!/usr/bin/env python
#####################################################################
# Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = "mferguson@willowgarage.com (Michael Ferguson)"
import roslib;
import rospy
import thread
import multiprocessing
from serial import *
import StringIO
from std_msgs.msg import Time
from rosserial_msgs.msg import *
from rosserial_msgs.srv import *
import diagnostic_msgs.msg
import socket
import time
import struct
import signal
def load_pkg_module(package, directory):
#check if its in the python path
in_path = False
path = sys.path
pkg_src = package+'/src' #check for the source directory which
# is added to path by roslib boostrapping
for entry in sys.path:
if pkg_src in entry:
in_path = True
if not in_path:
roslib.load_manifest(package)
try:
m = __import__( package + '.' + directory )
except:
rospy.logerr( "Cannot import package : %s"% package )
rospy.logerr( "sys.path was " + str(path) )
return None
return m
def load_message(package, message):
m = load_pkg_module(package, 'msg')
m2 = getattr(m, 'msg')
return getattr(m2, message)
def load_service(package,service):
s = load_pkg_module(package, 'srv')
s = getattr(s, 'srv')
srv = getattr(s, service)
mreq = getattr(s, service+"Request")
mres = getattr(s, service+"Response")
return srv,mreq,mres
class Publisher:
"""
Publisher forwards messages from the serial device to ROS.
"""
def __init__(self, topic_info):
""" Create a new publisher. """
self.topic = topic_info.topic_name
# find message type
package, message = topic_info.message_type.split('/')
self.message = load_message(package, message)
if self.message._md5sum == topic_info.md5sum:
self.publisher = rospy.Publisher(self.topic, self.message, queue_size=2)
else:
raise Exception('Checksum does not match: ' + self.message._md5sum + ',' + topic_info.md5sum)
def handlePacket(self, data):
""" Forward message to ROS network. """
m = self.message()
m.deserialize(data)
self.publisher.publish(m)
class Subscriber:
"""
Subscriber forwards messages from ROS to the serial device.
"""
def __init__(self, topic_info, parent):
self.topic = topic_info.topic_name
self.id = topic_info.topic_id
self.parent = parent
# find message type
package, message = topic_info.message_type.split('/')
self.message = load_message(package, message)
if self.message._md5sum == topic_info.md5sum:
self.subscriber = rospy.Subscriber(self.topic, self.message, self.callback)
else:
raise Exception('Checksum does not match: ' + self.message._md5sum + ',' + topic_info.md5sum)
def unregister(self):
rospy.loginfo("Removing subscriber: %s", self.topic)
self.subscriber.unregister()
def callback(self, msg):
""" Forward message to serial device. """
data_buffer = StringIO.StringIO()
msg.serialize(data_buffer)
self.parent.send(self.id, data_buffer.getvalue())
def unregister(self):
self.subscriber.unregister()
class ServiceServer:
"""
ServiceServer responds to requests from ROS.
"""
def __init__(self, topic_info, parent):
self.topic = topic_info.topic_name
self.parent = parent
# find message type
package, service = topic_info.message_type.split('/')
s = load_pkg_module(package, 'srv')
s = getattr(s, 'srv')
self.mreq = getattr(s, service+"Request")
self.mres = getattr(s, service+"Response")
srv = getattr(s, service)
self.service = rospy.Service(self.topic, srv, self.callback)
# response message
self.data = None
def unregister(self):
rospy.loginfo("Removing service: %s", self.topic)
self.service.shutdown()
def callback(self, req):
""" Forward request to serial device. """
data_buffer = StringIO.StringIO()
req.serialize(data_buffer)
self.response = None
if self.parent.send(self.id, data_buffer.getvalue()) >= 0:
while self.response == None:
pass
return self.response
def handlePacket(self, data):
""" Forward response to ROS network. """
r = self.mres()
r.deserialize(data)
self.response = r
class ServiceClient:
"""
ServiceServer responds to requests from ROS.
"""
def __init__(self, topic_info, parent):
self.topic = topic_info.topic_name
self.parent = parent
# find message type
package, service = topic_info.message_type.split('/')
s = load_pkg_module(package, 'srv')
s = getattr(s, 'srv')
self.mreq = getattr(s, service+"Request")
self.mres = getattr(s, service+"Response")
srv = getattr(s, service)
rospy.loginfo("Starting service client, waiting for service '" + self.topic + "'")
rospy.wait_for_service(self.topic)
self.proxy = rospy.ServiceProxy(self.topic, srv)
def handlePacket(self, data):
""" Forward request to ROS network. """
req = self.mreq()
req.deserialize(data)
# call service proxy
resp = self.proxy(req)
# serialize and publish
data_buffer = StringIO.StringIO()
resp.serialize(data_buffer)
self.parent.send(self.id, data_buffer.getvalue())
class RosSerialServer:
"""
RosSerialServer waits for a socket connection then passes itself, forked as a
new process, to SerialClient which uses it as a serial port. It continues to listen
for additional connections. Each forked process is a new ros node, and proxies ros
operations (e.g. publish/subscribe) from its connection to the rest of ros.
"""
def __init__(self, tcp_portnum, fork_server=False):
print "Fork_server is: ", fork_server
self.tcp_portnum = tcp_portnum
self.fork_server = fork_server
def listen(self):
self.serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#bind the socket to a public host, and a well-known port
self.serversocket.bind(("", self.tcp_portnum)) #become a server socket
self.serversocket.listen(1)
while True:
#accept connections
print "waiting for socket connection"
(clientsocket, address) = self.serversocket.accept()
#now do something with the clientsocket
rospy.loginfo("Established a socket connection from %s on port %s" % (address))
self.socket = clientsocket
self.isConnected = True
if (self.fork_server == True): # if configured to launch server in a separate process
rospy.loginfo("Forking a socket server process")
process = multiprocessing.Process(target=self.startSocketServer, args=(address))
process.daemon = True
process.start()
rospy.loginfo("launched startSocketServer")
else:
rospy.loginfo("calling startSerialClient")
self.startSerialClient()
rospy.loginfo("startSerialClient() exited")
def startSerialClient(self):
client = SerialClient(self)
try:
client.run()
except KeyboardInterrupt:
pass
except RuntimeError:
rospy.loginfo("RuntimeError exception caught")
self.isConnected = False
except socket.error:
rospy.loginfo("socket.error exception caught")
self.isConnected = False
finally:
self.socket.close()
for sub in client.subscribers.values():
sub.unregister()
for srv in client.services.values():
srv.unregister()
#pass
def startSocketServer(self, port, address):
rospy.loginfo("starting ROS Serial Python Node serial_node-%r" % (address,))
rospy.init_node("serial_node_%r" % (address,))
self.startSerialClient()
def flushInput(self):
pass
def write(self, data):
if (self.isConnected == False):
return
length = len(data)
totalsent = 0
while totalsent < length:
sent = self.socket.send(data[totalsent:])
if sent == 0:
raise RuntimeError("RosSerialServer.write() socket connection broken")
totalsent = totalsent + sent
def read(self, rqsted_length):
self.msg = ''
if (self.isConnected == False):
return self.msg
while len(self.msg) < rqsted_length:
chunk = self.socket.recv(rqsted_length - len(self.msg))
if chunk == '':
raise RuntimeError("RosSerialServer.read() socket connection broken")
self.msg = self.msg + chunk
return self.msg
def close(self):
self.port.close()
def inWaiting(self):
try: # the caller checks just for <1, so we'll peek at just one byte
chunk = self.socket.recv(1, socket.MSG_DONTWAIT|socket.MSG_PEEK)
if chunk == '':
raise RuntimeError("RosSerialServer.inWaiting() socket connection broken")
return len(chunk)
except socket.error, e:
if e.args[0] == errno.EWOULDBLOCK:
return 0
raise
class SerialClient:
"""
ServiceServer responds to requests from the serial device.
"""
def __init__(self, port=None, baud=57600, timeout=5.0):
""" Initialize node, connect to bus, attempt to negotiate topics. """
self.mutex = thread.allocate_lock()
self.lastsync = rospy.Time(0)
self.lastsync_lost = rospy.Time(0)
self.timeout = timeout
self.synced = False
self.pub_diagnostics = rospy.Publisher('/diagnostics', diagnostic_msgs.msg.DiagnosticArray, queue_size=2)
if port== None:
# no port specified, listen for any new port?
pass
elif hasattr(port, 'read'):
#assume its a filelike object
self.port=port
else:
# open a specific port
try:
self.port = Serial(port, baud, timeout=self.timeout*0.5)
except SerialException as e:
rospy.logerr("Error opening serial: %s", e)
rospy.signal_shutdown("Error opening serial: %s" % e)
raise SystemExit
self.port.timeout = 0.05 # Edit the port timeout
time.sleep(0.1) # Wait for ready (patch for Uno)
# hydro introduces protocol ver2 which must match node_handle.h
# The protocol version is sent as the 2nd sync byte emitted by each end
self.protocol_ver1 = '\xff'
self.protocol_ver2 = '\xfe'
self.protocol_ver = self.protocol_ver2
self.publishers = dict() # id:Publishers
self.subscribers = dict() # topic:Subscriber
self.services = dict() # topic:Service
self.buffer_out = -1
self.buffer_in = -1
self.callbacks = dict()
# endpoints for creating new pubs/subs
self.callbacks[TopicInfo.ID_PUBLISHER] = self.setupPublisher
self.callbacks[TopicInfo.ID_SUBSCRIBER] = self.setupSubscriber
# service client/servers have 2 creation endpoints (a publisher and a subscriber)
self.callbacks[TopicInfo.ID_SERVICE_SERVER+TopicInfo.ID_PUBLISHER] = self.setupServiceServerPublisher
self.callbacks[TopicInfo.ID_SERVICE_SERVER+TopicInfo.ID_SUBSCRIBER] = self.setupServiceServerSubscriber
self.callbacks[TopicInfo.ID_SERVICE_CLIENT+TopicInfo.ID_PUBLISHER] = self.setupServiceClientPublisher
self.callbacks[TopicInfo.ID_SERVICE_CLIENT+TopicInfo.ID_SUBSCRIBER] = self.setupServiceClientSubscriber
# custom endpoints
self.callbacks[TopicInfo.ID_PARAMETER_REQUEST] = self.handleParameterRequest
self.callbacks[TopicInfo.ID_LOG] = self.handleLoggingRequest
self.callbacks[TopicInfo.ID_TIME] = self.handleTimeRequest
rospy.sleep(2.0) # TODO
self.requestTopics()
self.lastsync = rospy.Time.now()
signal.signal(signal.SIGINT, self.txStopRequest)
def requestTopics(self):
""" Determine topics to subscribe/publish. """
self.port.flushInput()
# request topic sync
self.port.write("\xff" + self.protocol_ver + "\x00\x00\xff\x00\x00\xff")
def txStopRequest(self, signal, frame):
""" send stop tx request to arduino when receive SIGINT(Ctrl-c)"""
self.port.flushInput()
self.port.write("\xff" + self.protocol_ver + "\x00\x00\xff\x0b\x00\xf4")
# tx_stop_request is x0b
rospy.loginfo("Send tx stop request")
sys.exit(0)
def tryRead(self, length):
try:
bytes_read = self.port.read(length)
if len(bytes_read) < length:
rospy.logwarn("Serial Port read returned short (expected %d bytes, received %d instead)."
% (length, len(bytes_read)))
raise IOError()
return bytes_read
except Exception as e:
rospy.logwarn("Serial Port read failure: %s", e)
raise IOError()
def run(self):
""" Forward recieved messages to appropriate publisher. """
data = ''
while not rospy.is_shutdown():
if (rospy.Time.now() - self.lastsync).to_sec() > (self.timeout):
if (self.synced == True):
rospy.logerr("Lost sync with device, restarting...")
else:
rospy.logerr("Unable to sync with device; possible link problem or link software version mismatch such as hydro rosserial_python with groovy Arduino")
self.lastsync_lost = rospy.Time.now()
self.sendDiagnostics(diagnostic_msgs.msg.DiagnosticStatus.ERROR, "no sync with device")
self.requestTopics()
self.lastsync = rospy.Time.now()
# This try-block is here because we make multiple calls to read(). Any one of them can throw
# an IOError if there's a serial problem or timeout. In that scenario, a single handler at the
# bottom attempts to reconfigure the topics.
try:
if self.port.inWaiting() < 1:
time.sleep(0.001)
continue
flag = [0,0]
flag[0] = self.tryRead(1)
if (flag[0] != '\xff'):
continue
flag[1] = self.tryRead(1)
if ( flag[1] != self.protocol_ver):
self.sendDiagnostics(diagnostic_msgs.msg.DiagnosticStatus.ERROR, "Mismatched protocol version in packet: lost sync or rosserial_python is from different ros release than the rosserial client")
rospy.logerr("Mismatched protocol version in packet: lost sync or rosserial_python is from different ros release than the rosserial client")
protocol_ver_msgs = {'\xff': 'Rev 0 (rosserial 0.4 and earlier)', '\xfe': 'Rev 1 (rosserial 0.5+)', '\xfd': 'Some future rosserial version'}
if (flag[1] in protocol_ver_msgs):
found_ver_msg = 'Protocol version of client is ' + protocol_ver_msgs[flag[1]]
else:
found_ver_msg = "Protocol version of client is unrecognized"
rospy.loginfo("%s, expected %s" % (found_ver_msg, protocol_ver_msgs[self.protocol_ver]))
continue
msg_len_bytes = self.tryRead(2)
msg_length, = struct.unpack("<h", msg_len_bytes)
# checksum of msg_len
msg_len_chk = self.tryRead(1)
msg_len_checksum = sum(map(ord, msg_len_bytes)) + ord(msg_len_chk)
if msg_len_checksum % 256 != 255:
rospy.loginfo("wrong checksum for msg length, length %d" %(msg_length))
rospy.loginfo("chk is %d" % ord(msg_len_chk))
continue
# topic id (2 bytes)
topic_id_header = self.tryRead(2)
topic_id, = struct.unpack("<h", topic_id_header)
try:
msg = self.tryRead(msg_length)
except IOError:
self.sendDiagnostics(diagnostic_msgs.msg.DiagnosticStatus.ERROR, "Packet Failed : Failed to read msg data")
rospy.loginfo("Packet Failed : Failed to read msg data")
rospy.loginfo("msg len is %d",len(msg))
raise
# checksum for topic id and msg
chk = self.tryRead(1)
checksum = sum(map(ord, topic_id_header) ) + sum(map(ord, msg)) + ord(chk)
if checksum % 256 == 255:
self.synced = True
try:
self.callbacks[topic_id](msg)
except KeyError:
rospy.logerr("Tried to publish before configured, topic id %d" % topic_id)
rospy.sleep(0.001)
else:
rospy.loginfo("wrong checksum for topic id and msg")
except IOError:
# One of the read calls had an issue. Just to be safe, request that the client
# reinitialize their topics.
self.requestTopics()
def setPublishSize(self, bytes):
if self.buffer_out < 0:
self.buffer_out = bytes
rospy.loginfo("Note: publish buffer size is %d bytes" % self.buffer_out)
def setSubscribeSize(self, bytes):
if self.buffer_in < 0:
self.buffer_in = bytes
rospy.loginfo("Note: subscribe buffer size is %d bytes" % self.buffer_in)
def setupPublisher(self, data):
""" Register a new publisher. """
try:
msg = TopicInfo()
msg.deserialize(data)
pub = Publisher(msg)
# test if topic id already in use
if msg.topic_id in self.publishers:
rospy.logwarn("Setting up duplicate topic %d" % msg.topic_id)
self.publishers[msg.topic_id] = pub
self.callbacks[msg.topic_id] = pub.handlePacket
self.setPublishSize(msg.buffer_size)
rospy.loginfo("Setup publisher on %s [%s] as id %d" % (msg.topic_name, msg.message_type, msg.topic_id) )
except Exception as e:
rospy.logerr("Creation of publisher failed: %s", e)
def setupSubscriber(self, data):
""" Register a new subscriber. """
try:
msg = TopicInfo()
msg.deserialize(data)
# test if topic id already registered
if msg.topic_id in [sub.id for sub in self.subscribers.values()]:
rospy.logwarn("Ignored duplicate subscriber on %s [%s] as id %d" % (msg.topic_name, msg.message_type, msg.topic_id) )
return
if not msg.topic_name in self.subscribers.keys():
sub = Subscriber(msg, self)
self.subscribers[msg.topic_name] = sub
self.setSubscribeSize(msg.buffer_size)
rospy.loginfo("Setup subscriber on %s [%s]" % (msg.topic_name, msg.message_type) )
elif msg.message_type != self.subscribers[msg.topic_name].message._type:
old_message_type = self.subscribers[msg.topic_name].message._type
self.subscribers[msg.topic_name].unregister()
sub = Subscriber(msg, self)
self.subscribers[msg.topic_name] = sub
self.setSubscribeSize(msg.buffer_size)
rospy.loginfo("Change the message type of subscriber on %s from [%s] to [%s]" % (msg.topic_name, old_message_type, msg.message_type) )
except Exception as e:
rospy.logerr("Creation of subscriber failed: %s", e)
def setupServiceServerPublisher(self, data):
""" Register a new service server. """
try:
msg = TopicInfo()
msg.deserialize(data)
self.setPublishSize(msg.buffer_size)
try:
srv = self.services[msg.topic_name]
except:
srv = ServiceServer(msg, self)
rospy.loginfo("Setup service server on %s [%s]" % (msg.topic_name, msg.message_type) )
self.services[msg.topic_name] = srv
if srv.mres._md5sum == msg.md5sum:
self.callbacks[msg.topic_id] = srv.handlePacket
else:
raise Exception('Checksum does not match: ' + srv.mres._md5sum + ',' + msg.md5sum)
except Exception as e:
rospy.logerr("Creation of service server failed: %s", e)
def setupServiceServerSubscriber(self, data):
""" Register a new service server. """
try:
msg = TopicInfo()
msg.deserialize(data)
self.setSubscribeSize(msg.buffer_size)
try:
srv = self.services[msg.topic_name]
except:
srv = ServiceServer(msg, self)
rospy.loginfo("Setup service server on %s [%s]" % (msg.topic_name, msg.message_type) )
self.services[msg.topic_name] = srv
if srv.mreq._md5sum == msg.md5sum:
srv.id = msg.topic_id
else:
raise Exception('Checksum does not match: ' + srv.mreq._md5sum + ',' + msg.md5sum)
except Exception as e:
rospy.logerr("Creation of service server failed: %s", e)
def setupServiceClientPublisher(self, data):
""" Register a new service client. """
try:
msg = TopicInfo()
msg.deserialize(data)
self.setPublishSize(msg.buffer_size)
try:
srv = self.services[msg.topic_name]
except:
srv = ServiceClient(msg, self)
rospy.loginfo("Setup service client on %s [%s]" % (msg.topic_name, msg.message_type) )
self.services[msg.topic_name] = srv
if srv.mreq._md5sum == msg.md5sum:
self.callbacks[msg.topic_id] = srv.handlePacket
else:
raise Exception('Checksum does not match: ' + srv.mreq._md5sum + ',' + msg.md5sum)
except Exception as e:
rospy.logerr("Creation of service client failed: %s", e)
def setupServiceClientSubscriber(self, data):
""" Register a new service client. """
try:
msg = TopicInfo()
msg.deserialize(data)
self.setSubscribeSize(msg.buffer_size)
try:
srv = self.services[msg.topic_name]
except:
srv = ServiceClient(msg, self)
rospy.loginfo("Setup service client on %s [%s]" % (msg.topic_name, msg.message_type) )
self.services[msg.topic_name] = srv
if srv.mres._md5sum == msg.md5sum:
srv.id = msg.topic_id
else:
raise Exception('Checksum does not match: ' + srv.mres._md5sum + ',' + msg.md5sum)
except Exception as e:
rospy.logerr("Creation of service client failed: %s", e)
def handleTimeRequest(self, data):
""" Respond to device with system time. """
t = Time()
t.data = rospy.Time.now()
data_buffer = StringIO.StringIO()
t.serialize(data_buffer)
self.send( TopicInfo.ID_TIME, data_buffer.getvalue() )
self.lastsync = rospy.Time.now()
def handleParameterRequest(self, data):
""" Send parameters to device. Supports only simple datatypes and arrays of such. """
req = RequestParamRequest()
req.deserialize(data)
resp = RequestParamResponse()
try:
param = rospy.get_param(req.name)
except KeyError:
rospy.logerr("Parameter %s does not exist"%req.name)
return
if param == None:
rospy.logerr("Parameter %s does not exist"%req.name)
return
if (type(param) == dict):
rospy.logerr("Cannot send param %s because it is a dictionary"%req.name)
return
if (type(param) != list):
param = [param]
#check to make sure that all parameters in list are same type
t = type(param[0])
for p in param:
if t!= type(p):
rospy.logerr('All Paramers in the list %s must be of the same type'%req.name)
return
if (t == int):
resp.ints= param
if (t == float):
resp.floats=param
if (t == str):
resp.strings = param
data_buffer = StringIO.StringIO()
resp.serialize(data_buffer)
self.send(TopicInfo.ID_PARAMETER_REQUEST, data_buffer.getvalue())
def handleLoggingRequest(self, data):
""" Forward logging information from serial device into ROS. """
msg = Log()
msg.deserialize(data)
if (msg.level == Log.ROSDEBUG):
rospy.logdebug(msg.msg)
elif(msg.level== Log.INFO):
rospy.loginfo(msg.msg)
elif(msg.level== Log.WARN):
rospy.logwarn(msg.msg)
elif(msg.level== Log.ERROR):
rospy.logerr(msg.msg)
elif(msg.level==Log.FATAL):
rospy.logfatal(msg.msg)
def send(self, topic, msg):
""" Send a message on a particular topic to the device. """
with self.mutex:
length = len(msg)
if self.buffer_in > 0 and length > self.buffer_in:
rospy.logerr("Message from ROS network dropped: message larger than buffer.")
print msg
return -1
else:
#modified frame : header(2 bytes) + msg_len(2 bytes) + msg_len_chk(1 byte) + topic_id(2 bytes) + msg(x bytes) + msg_topic_id_chk(1 byte)
# second byte of header is protocol version
msg_len_checksum = 255 - ( ((length&255) + (length>>8))%256 )
msg_checksum = 255 - ( ((topic&255) + (topic>>8) + sum([ord(x) for x in msg]))%256 )
data = "\xff" + self.protocol_ver + chr(length&255) + chr(length>>8) + chr(msg_len_checksum) + chr(topic&255) + chr(topic>>8)
data = data + msg + chr(msg_checksum)
#print "sending msg on topic %d at %f"%(topic, time.time())
self.port.write(data)
return length
def sendDiagnostics(self, level, msg_text):
msg = diagnostic_msgs.msg.DiagnosticArray()
status = diagnostic_msgs.msg.DiagnosticStatus()
status.name = "rosserial_python"
msg.header.stamp = rospy.Time.now()
msg.status.append(status)
status.message = msg_text
status.level = level
status.values.append(diagnostic_msgs.msg.KeyValue())
status.values[0].key="last sync"
if self.lastsync.to_sec()>0:
status.values[0].value=time.ctime(self.lastsync.to_sec())
else:
status.values[0].value="never"
status.values.append(diagnostic_msgs.msg.KeyValue())
status.values[1].key="last sync lost"
status.values[1].value=time.ctime(self.lastsync_lost.to_sec())
self.pub_diagnostics.publish(msg)
|
run_tests.py | #!/usr/bin/env python
# Copyright 2013 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# -*- coding: utf-8 -*-
import argparse
import sys
import unittest
from multiprocessing import Process
from urllib import urlopen
from tests import TESTS_ROOT
from tests.fake_webapp import start_flask_app, EXAMPLE_APP
parser = argparse.ArgumentParser('Run splinter tests')
parser.add_argument('-w', '--which', action='store')
parser.add_argument('-f', '--failfast', action='store_true')
parser.add_argument('-v', '--verbosity', type=int, default=1)
class Env(object):
pass
env = Env()
env.process = None
env.host, env.port = 'localhost', 5000
def wait_until_start():
while True:
try:
results = urlopen(EXAMPLE_APP)
if results.code == 404:
raise Exception('%s returned unexpected 404' % EXAMPLE_APP)
break
except IOError:
pass
def wait_until_stop():
while True:
try:
results = urlopen(EXAMPLE_APP)
if results.code == 404:
break
except IOError:
break
def start_server():
sys.stderr = open('/dev/null', 'w')
env.process = Process(target=start_flask_app, args=(env.host, env.port))
env.process.daemon = True
env.process.start()
wait_until_start()
def stop_server():
env.process.terminate()
env.process.join()
wait_until_stop()
def get_modules(modules_str):
names = modules_str.split(',')
modules = []
for name in names:
name = name.replace('/', '.').replace('.py', '')
try:
module = __import__(name, fromlist='tests')
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print 'Error importing module %s:' % name
import traceback
traceback.print_exception(exc_type, exc_value, exc_traceback,
file=sys.stdout)
modules.append(module)
return modules
def run_suite(suite, args):
runner = unittest.TextTestRunner(sys.stdout, True, args.verbosity,
args.failfast)
return runner.run(suite)
def get_suite_from_modules(modules):
loader = unittest.TestLoader()
suite = unittest.TestSuite()
for module in modules:
suite.addTest(loader.loadTestsFromModule(module))
return suite
def get_complete_suite():
loader = unittest.TestLoader()
return loader.discover(TESTS_ROOT)
if __name__ == '__main__':
try:
start_server()
except Exception as e:
sys.stdout.write("Failed to start test server: %s\n\n" % e)
sys.exit(1)
args = parser.parse_args()
loader = unittest.TestLoader()
if args.which and args.which != 'tests':
modules = get_modules(args.which)
suite = get_suite_from_modules(modules)
else:
suite = get_complete_suite()
result = run_suite(suite, args)
stop_server()
sys.exit(len(result.errors) + len(result.failures))
|
__main__.py | #####################################################################
# #
# /main.pyw #
# #
# Copyright 2014, Monash University #
# #
# This file is part of the program runviewer, in the labscript #
# suite (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
from __future__ import division, unicode_literals, print_function, absolute_import
from labscript_utils import PY2
import os
import labscript_utils.excepthook
try:
from labscript_utils import check_version
except ImportError:
raise ImportError('Require labscript_utils > 2.1.0')
check_version('labscript_utils', '2.10.0', '3')
# Splash screen
from labscript_utils.splash import Splash
splash = Splash(os.path.join(os.path.dirname(__file__), 'runviewer.svg'))
splash.show()
splash.update_text('importing standard library modules')
import sys
import time
import threading
import logging
import ctypes
import socket
if PY2:
str = unicode
from Queue import Queue
else:
from queue import Queue
import ast
import pprint
import signal
# Quit on ctrl-c
signal.signal(signal.SIGINT, signal.SIG_DFL)
# Set working directory to runviewer folder, resolving symlinks
runviewer_dir = os.path.dirname(os.path.realpath(__file__))
os.chdir(runviewer_dir)
splash.update_text('importing Qt')
check_version('qtutils', '2.0.0', '3.0.0')
splash.update_text('importing labscript suite modules')
from labscript_utils.setup_logging import setup_logging
logger = setup_logging('runviewer')
labscript_utils.excepthook.set_logger(logger)
splash.update_text('importing h5_lock and h5py')
import labscript_utils.h5_lock
import h5py
# This must be bumped until after the h5_lock import
# This is because the check imports pyqtgraph, which imports h5py
# h5py must be imported after h5_lock, thus we do the check here
splash.update_text('importing pyqtgraph')
check_version('pyqtgraph', '0.9.10', '1')
from qtutils.qt.QtCore import *
from qtutils.qt.QtGui import *
from qtutils.qt.QtWidgets import *
from qtutils.qt.QtCore import pyqtSignal as Signal
splash.update_text('importing numpy')
import numpy
splash.update_text('importing scipy')
from scipy import interpolate
# must be imported after PySide/PyQt4
import pyqtgraph as pg
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
from qtutils import *
import qtutils.icons
splash.update_text('importing labscript suite modules')
from labscript_utils.connections import ConnectionTable
import labscript_devices
from labscript_utils.labconfig import LabConfig, config_prefix
check_version('labscript_utils', '2.11.0', '3')
from labscript_utils.ls_zprocess import ZMQServer, ProcessTree
process_tree = ProcessTree.instance()
process_tree.zlock_client.set_process_name('runviewer')
from runviewer.resample import resample as _resample
def set_win_appusermodel(window_id):
from labscript_utils.winshell import set_appusermodel, appids, app_descriptions
icon_path = os.path.abspath('runviewer.ico')
executable = sys.executable.lower()
if not executable.endswith('w.exe'):
executable = executable.replace('.exe', 'w.exe')
relaunch_command = executable + ' ' + os.path.abspath(__file__.replace('.pyc', '.py'))
relaunch_display_name = app_descriptions['runviewer']
set_appusermodel(window_id, appids['runviewer'], icon_path, relaunch_command, relaunch_display_name)
SHOT_MODEL__COLOUR_INDEX = 0
SHOT_MODEL__SHUTTER_INDEX = 1
SHOT_MODEL__CHECKBOX_INDEX = 2
SHOT_MODEL__PATH_INDEX = 1
CHANNEL_MODEL__CHECKBOX_INDEX = 0
CHANNEL_MODEL__CHANNEL_INDEX = 0
def format_time(input_sec):
# inout is the time in sec
if input_sec >= 1:
return "{:.3g}s".format(input_sec)
elif input_sec >= 1e-3:
return "{:.3g}ms".format(input_sec * 1e3)
elif input_sec >= 1e-6:
return "{:.3g}us".format(input_sec * 1e6)
elif input_sec >= 1e-9:
return "{:.3g}ns".format(input_sec * 1e9)
elif input_sec >= 1e-12:
return "{:.3g}ps".format(input_sec * 1e12)
elif input_sec >= 1e-15:
return "{:.3g}fs".format(input_sec * 1e15)
elif input_sec >= 1e-18:
return "{:.3g}as".format(input_sec * 1e18)
else:
return str(input_sec) + "s"
def int_to_enum(enum_list, value):
"""stupid hack to work around the fact that PySide screws with the type of a variable when it goes into a model. Enums are converted to ints, which then
can't be interpreted by QColor correctly (for example)
unfortunately Qt doesn't provide a python list structure of enums, so you have to build the list yourself.
"""
for item in enum_list:
if item == value:
return item
return value
class ScaleHandler():
def __init__(self, input_times, target_positions, stop_time):
# input_times is a list (may be unsorted) of times which should be scaled evenly with target_length
# an input list of [1,2,4,6] and target_length of 1.0 will result in:
# get_scaled_time(1) -> 1
# get_scaled_time(1.5) -> 1.5
# get_scaled_time(3) -> 2.5
# get_scaled_time(4) -> 3
# get_scaled_time(5) -> 3.5 ...
self.org_stop_time = float(stop_time)
if not all((x >= 0) and (x <= self.org_stop_time) for x in input_times):
raise Exception('shot contains at least one marker before t=0 and/or after the stop time. Non-linear time currently does not support this.')
unscaled_times = sorted(input_times)
scaled_times = sorted(target_positions)
# append values for linear scaling before t=0 and after stop time
unscaled_times = [min(unscaled_times)-1e-9] + unscaled_times + [max(unscaled_times) + 1e-9]
scaled_times = [min(scaled_times)-1e-9] + scaled_times + [max(scaled_times) + 1e-9]
self.get_scaled_time = interpolate.interp1d(unscaled_times, scaled_times, assume_sorted=True, bounds_error=False, fill_value='extrapolate')
self.get_unscaled_time = interpolate.interp1d(scaled_times, unscaled_times, assume_sorted=True, bounds_error=False, fill_value='extrapolate')
self.scaled_stop_time = self.get_scaled_time(self.org_stop_time)
class ColourDelegate(QItemDelegate):
def __init__(self, view, *args, **kwargs):
QItemDelegate.__init__(self, *args, **kwargs)
self._view = view
self._colours = [Qt.black, Qt.red, Qt.green, Qt.blue, Qt.cyan, Qt.magenta, Qt.yellow, Qt.gray, Qt.darkRed, Qt.darkGreen, Qt.darkBlue, Qt.darkCyan, Qt.darkMagenta, Qt.darkYellow, Qt.darkGray, Qt.lightGray]
self._current_colour_index = 0
def get_next_colour(self):
colour = self._colours[self._current_colour_index]
self._current_colour_index += 1
if self._current_colour_index >= len(self._colours):
self._current_colour_index = 0
return colour
def createEditor(self, parent, option, index):
editor = QComboBox(parent)
#colours = QColor.colorNames()
for colour in self._colours:
pixmap = QPixmap(20, 20)
pixmap.fill(colour)
editor.addItem(QIcon(pixmap), '', colour)
editor.activated.connect(lambda index, editor=editor: self._view.commitData(editor))
editor.activated.connect(lambda index, editor=editor: self._view.closeEditor(editor, QAbstractItemDelegate.NoHint))
QTimer.singleShot(10, editor.showPopup)
return editor
def setEditorData(self, editor, index):
value = index.model().data(index, Qt.UserRole)
for i in range(editor.count()):
if editor.itemData(i) == value():
editor.setCurrentIndex(i)
break
def setModelData(self, editor, model, index):
icon = editor.itemIcon(editor.currentIndex())
colour = editor.itemData(editor.currentIndex())
# Note, all data being written to the model must be read out of the editor PRIOR to calling model.setData()
# This is because a call to model.setData() triggers setEditorData(), which messes up subsequent
# calls to the editor to determine the currently selected item/data
model.setData(index, icon, Qt.DecorationRole)
model.setData(index, lambda clist=self._colours, colour=colour: int_to_enum(clist, colour), Qt.UserRole)
def updateEditorGeometry(self, editor, option, index):
editor.setGeometry(option.rect)
class RunviewerMainWindow(QMainWindow):
# A signal for when the window manager has created a new window for this widget:
newWindow = Signal(int)
def event(self, event):
result = QMainWindow.event(self, event)
if event.type() == QEvent.WinIdChange:
self.newWindow.emit(self.effectiveWinId())
return result
class RunViewer(object):
def __init__(self, exp_config):
splash.update_text('loading graphical interface')
self.ui = UiLoader().load(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'main.ui'), RunviewerMainWindow())
# setup shot treeview model
self.shot_model = QStandardItemModel()
self.shot_model.setHorizontalHeaderLabels(['colour', 'shutters', 'path'])
self.ui.shot_treeview.setModel(self.shot_model)
self.ui.shot_treeview.resizeColumnToContents(1)
self.shot_model.itemChanged.connect(self.on_shot_selection_changed)
self.shot_colour_delegate = ColourDelegate(self.ui.shot_treeview)
self.ui.shot_treeview.setItemDelegateForColumn(0, self.shot_colour_delegate)
# setup channel treeview model
self.channel_model = QStandardItemModel()
self.channel_model.setHorizontalHeaderLabels(['channel'])
self.ui.channel_treeview.setModel(self.channel_model)
self.channel_model.itemChanged.connect(self.update_plots)
# create a hidden plot widget that all plots can link their x-axis too
hidden_plot = pg.PlotWidget(name='runviewer - time axis link')
hidden_plot.setMinimumHeight(1)
hidden_plot.setMaximumHeight(1)
hidden_plot.setLabel('bottom', 'Time', units='s')
hidden_plot.setLabel('left', " ")
hidden_plot.showAxis('right', True)
hidden_plot_item = hidden_plot.plot([0, 1], [0, 0])
self._hidden_plot = (hidden_plot, hidden_plot_item)
self.ui.hidden_plot_layout.addWidget(hidden_plot)
time_axis_plot = pg.PlotWidget()
time_axis_plot.setMinimumHeight(120)
time_axis_plot.setMaximumHeight(120)
time_axis_plot.setLabel('bottom', 'Time', units='s')
time_axis_plot.showAxis('right', True)
time_axis_plot.setXLink('runviewer - time axis link')
time_axis_plot.setMouseEnabled(y=False)
time_axis_plot.getAxis('left').setTicks([]) # hide y ticks in the left & right side. only show time axis
time_axis_plot.getAxis('right').setTicks([])
time_axis_plot.setLabel('left', 'Slots')
time_axis_plot.scene().sigMouseMoved.connect(lambda pos: self.mouseMovedEvent(pos, time_axis_plot, "Slots"))
time_axis_plot_item = time_axis_plot.plot([0, 1], [0, 0], pen=(255, 255, 255))
self._time_axis_plot = (time_axis_plot, time_axis_plot_item)
self.all_markers = {}
self.all_marker_items = {}
self.movable_marker_items = {}
markers_plot = pg.PlotWidget(name='runviewer - markers')
markers_plot.setMinimumHeight(120)
markers_plot.setMaximumHeight(120)
markers_plot.showAxis('top', False)
markers_plot.showAxis('bottom', False)
markers_plot.showAxis('left', True)
markers_plot.showAxis('right', True)
markers_plot.getAxis('left').setTicks([])
markers_plot.getAxis('right').setTicks([])
markers_plot.setLabel('left', 'Markers')
markers_plot.setXLink('runviewer - time axis link')
markers_plot.setMouseEnabled(y=False)
markers_plot.scene().sigMouseMoved.connect(lambda pos: self.mouseMovedEvent(pos, markers_plot, "Markers"))
markers_plot_item = markers_plot.plot([])
self._markers_plot = (markers_plot, markers_plot_item)
self.ui.verticalLayout_9.insertWidget(1,markers_plot)
self.ui.plot_layout.addWidget(time_axis_plot)
# add some icons
self.ui.add_shot.setIcon(QIcon(':/qtutils/fugue/plus'))
self.ui.remove_shots.setIcon(QIcon(':/qtutils/fugue/minus'))
self.ui.enable_selected_shots.setIcon(QIcon(':/qtutils/fugue/ui-check-box'))
self.ui.disable_selected_shots.setIcon(QIcon(':/qtutils/fugue/ui-check-box-uncheck'))
self.ui.group_channel.setIcon(QIcon(':/qtutils/fugue/layers-group'))
self.ui.delete_group.setIcon(QIcon(':/qtutils/fugue/layers-ungroup'))
self.ui.channel_move_to_top.setIcon(QIcon(':/qtutils/fugue/arrow-stop-090'))
self.ui.channel_move_up.setIcon(QIcon(':/qtutils/fugue/arrow-090'))
self.ui.channel_move_down.setIcon(QIcon(':/qtutils/fugue/arrow-270'))
self.ui.channel_move_to_bottom.setIcon(QIcon(':/qtutils/fugue/arrow-stop-270'))
self.ui.reset_x_axis.setIcon(QIcon(':/qtutils/fugue/layer-resize-replicate'))
self.ui.reset_y_axis.setIcon(QIcon(':/qtutils/fugue/layer-resize-replicate-vertical'))
self.ui.toggle_tooltip.setIcon(QIcon(':/qtutils/fugue/ui-tooltip-balloon'))
self.ui.linear_time.setIcon(QIcon(':/qtutils/fugue/clock-history'))
self.ui.equal_space_time.setIcon(QIcon(':/qtutils/fugue/border-vertical-all'))
self.ui.linear_time.setEnabled(False)
self.ui.equal_space_time.setEnabled(False)
self.ui.actionOpen_Shot.setIcon(QIcon(':/qtutils/fugue/plus'))
self.ui.actionQuit.setIcon(QIcon(':/qtutils/fugue/cross-button'))
self.ui.actionLoad_channel_config.setIcon(QIcon(':/qtutils/fugue/folder-open'))
self.ui.actionSave_channel_config.setIcon(QIcon(':/qtutils/fugue/disk'))
# disable buttons that are not yet implemented to help avoid confusion!
self.ui.group_channel.setEnabled(False)
self.ui.delete_group.setEnabled(False)
# connect signals
self.ui.reset_x_axis.clicked.connect(self.on_x_axis_reset)
self.ui.reset_y_axis.clicked.connect(self.on_y_axes_reset)
self.ui.channel_move_up.clicked.connect(self._move_up)
self.ui.channel_move_down.clicked.connect(self._move_down)
self.ui.channel_move_to_top.clicked.connect(self._move_top)
self.ui.channel_move_to_bottom.clicked.connect(self._move_bottom)
self.ui.enable_selected_shots.clicked.connect(self._enable_selected_shots)
self.ui.disable_selected_shots.clicked.connect(self._disable_selected_shots)
self.ui.add_shot.clicked.connect(self.on_add_shot)
self.ui.markers_comboBox.currentIndexChanged.connect(self._update_markers)
# self.ui.non_linear_time.toggled.connect(self._toggle_non_linear_time)
self.ui.linear_time.clicked.connect(self._reset_linear_time)
self.ui.equal_space_time.clicked.connect(self._space_markers_evenly)
self.ui.remove_shots.clicked.connect(self.on_remove_shots)
self.ui.actionOpen_Shot.triggered.connect(self.on_add_shot)
self.ui.actionQuit.triggered.connect(self.ui.close)
self.ui.actionLoad_channel_config.triggered.connect(self.on_load_channel_config)
self.ui.actionSave_channel_config.triggered.connect(self.on_save_channel_config)
# Keyboard shortcuts:
QShortcut('Del', self.ui.shot_treeview, lambda: self.on_remove_shots(confirm=True))
QShortcut('Shift+Del', self.ui.shot_treeview, lambda: self.on_remove_shots(confirm=False))
if os.name == 'nt':
self.ui.newWindow.connect(set_win_appusermodel)
splash.update_text('done')
self.ui.show()
# internal variables
#self._channels_list = {}
self.plot_widgets = {}
self.plot_items = {}
self.shutter_lines = {}
try:
self.default_config_path = os.path.join(exp_config.get('DEFAULT', 'app_saved_configs'), 'runviewer')
except LabConfig.NoOptionError:
exp_config.set('DEFAULT', 'app_saved_configs', os.path.join('%(labscript_suite)s', 'userlib', 'app_saved_configs', '%(experiment_name)s'))
self.default_config_path = os.path.join(exp_config.get('DEFAULT', 'app_saved_configs'), 'runviewer')
if not os.path.exists(self.default_config_path):
os.makedirs(self.default_config_path)
self.last_opened_shots_folder = exp_config.get('paths', 'experiment_shot_storage')
# start resample thread
self._resample = False
self._thread = threading.Thread(target=self._resample_thread)
self._thread.daemon = True
self._thread.start()
# start shots_to_process_queue monitoring thread
self._shots_to_process_thread = threading.Thread(target=self._process_shots)
self._shots_to_process_thread.daemon = True
self._shots_to_process_thread.start()
self.scale_time = False
self.scalehandler = None
def _update_markers(self, index):
for line, plot in self.all_marker_items.items():
# line.blockSignals(True)
plot.removeItem(line)
self.all_marker_items = {}
for line, plot in self.movable_marker_items.items():
# line.blockSignals(True)
plot.removeItem(line)
self.movable_marker_items = {}
self.marker_times_unscaled = {}
marker_index = self.ui.markers_comboBox.currentIndex()
shot = self.ui.markers_comboBox.itemData(marker_index)
if index == 0:
self.ui.linear_time.setEnabled(False)
self.ui.equal_space_time.setEnabled(False)
self.all_markers = {}
else:
self.ui.linear_time.setEnabled(True)
self.ui.equal_space_time.setEnabled(True)
self.all_markers = shot.markers
# self._update_non_linear_time(changed_shot=True)
times = sorted(list(self.all_markers.keys()))
last_time = 0
for i, (t, m) in enumerate(sorted(self.all_markers.items())):
if i < len(times)-1:
delta_t = times[i+1] - t
# Now always have a marker at stop time
# else:
# delta_t = shot.stop_time - t
unscaled_t = t
if self.scale_time:
t = self.scalehandler.get_scaled_time(t)
color = m['color']
r, g, b = color
if (r, g, b) == (-1, -1, -1):
# Default colour, black:
r, g, b = (0, 0, 0)
color = QColor(r, g, b)
label = m['label'].decode() if isinstance( m['label'], bytes) else str(m['label'])
if i == 0:
line = self._markers_plot[0].addLine(x=t, pen=pg.mkPen(color=color, width=1.5, style=Qt.DashLine), label=label, labelOpts= {"color": color, "fill": QColor(255, 255, 255, 255), "rotateAxis":(1, 0), "anchors": [(0.5, 0),(0.5, 0)]}, movable=False )
else:
line = self._markers_plot[0].addLine(x=t, pen=pg.mkPen(color=color, width=1.5, style=Qt.DashLine), label=label, labelOpts= {"color": color, "fill": QColor(255, 255, 255, 255), "rotateAxis":(1, 0), "anchors": [(0.5, 0),(0.5, 0)]}, movable=True )
line.setBounds([last_time+1e-9 if last_time !=0 else last_time ,None])
line.sigPositionChanged.connect(self._marker_moving)
line.sigPositionChangeFinished.connect(self._marker_moved)
# self.all_marker_items[line] = self._markers_plot[0]
self.movable_marker_items[line] = self._markers_plot[0]
self.marker_times_unscaled[line] = unscaled_t
line = self._time_axis_plot[0].addLine(x=t, pen=pg.mkPen(color=color, width=1.5, style=Qt.DashLine), label=format_time(delta_t), labelOpts= {"color": color, "fill": QColor(255, 255, 255, 255), "rotateAxis":(1, 0), "anchors": [(0.5, 0),(0.5, 0)]}, movable=False )
self.all_marker_items[line] = self._time_axis_plot[0]
last_time = t
self.update_plots()
def mouseMovedEvent(self, position, ui, name):
if self.ui.toggle_tooltip.isChecked():
v = ui.scene().views()[0]
viewP = v.mapFromScene(position)
glob_pos = ui.mapToGlobal(viewP) # convert to Screen x
glob_zero = ui.mapToGlobal(QPoint(0, 0))
self._global_start_x = glob_zero.x()
self._global_start_y = glob_zero.y()
self._global_width = ui.width()
self._global_height = ui.height()
coord_pos = ui.plotItem.vb.mapSceneToView(position)
if len(self.get_selected_shots_and_colours()) > 0:
scaled_t = float(coord_pos.x())
if self.scale_time and self.scalehandler is not None:
unscaled_t = float(self.scalehandler.get_unscaled_time(scaled_t))
else:
unscaled_t = scaled_t
if unscaled_t is not None:
pos = QPoint(glob_pos.x(), glob_pos.y())
plot_data = ui.plotItem.listDataItems()[0].getData()
if plot_data[0] is not None and scaled_t is not None:
nearest_index = numpy.abs(plot_data[0] - scaled_t).argmin() - 1
y_val = "{:.2f}".format(plot_data[1][nearest_index])
else:
y_val = '-'
text = "Plot: {} \nTime: {:.9f}s\nValue: {}".format(name, unscaled_t, y_val)
QToolTip.showText(pos, text)
def _reset_linear_time(self):
self.scale_time = False
markers_unscaled = sorted(list(self.all_markers.keys()))
marker_index = self.ui.markers_comboBox.currentIndex()
shot = self.ui.markers_comboBox.itemData(marker_index)
scalehandler = ScaleHandler(markers_unscaled, markers_unscaled, shot.stop_time)
self._update_non_linear_time(new_scalehandler=scalehandler)
self.on_x_axis_reset()
self._resample = True
def _space_markers_evenly(self):
self.scale_time = True
marker_index = self.ui.markers_comboBox.currentIndex()
shot = self.ui.markers_comboBox.itemData(marker_index)
markers_unscaled = sorted(list(self.all_markers.keys()))
target_length = shot.stop_time / float(len(markers_unscaled) - 1)
scaled_times = [target_length * i for i in range(len(markers_unscaled))]
scalehandler = ScaleHandler(markers_unscaled, scaled_times, shot.stop_time)
self._update_non_linear_time(new_scalehandler=scalehandler)
self.on_x_axis_reset()
self._resample = True
def _marker_moving(self, line):
self.scale_time = True
marker_index = self.ui.markers_comboBox.currentIndex()
shot = self.ui.markers_comboBox.itemData(marker_index)
markers_unscaled = sorted(list(self.all_markers.keys()))
# What was the unscaled time of the marker that moved, and where is it now?
moved_marker_unscaled_t = self.marker_times_unscaled[line]
moved_marker_new_pos = line.pos().x()
# Where was the marker just before it was moved? This is given by the current scalehandler
if self.scalehandler is not None:
moved_marker_last_pos = self.scalehandler.get_scaled_time(moved_marker_unscaled_t)
else:
moved_marker_last_pos = moved_marker_unscaled_t
# How far has the marker moved?
delta_marker = moved_marker_new_pos - moved_marker_last_pos
# Now we want to shift the other markers if the are at a higher position than this one
markers = list(self.marker_times_unscaled.keys())
new_scaled_times = []
for marker in markers:
if marker == line:
new_scaled_times.append(moved_marker_new_pos)
else:
x = marker.pos().x()
if x > moved_marker_last_pos:
x += delta_marker
new_scaled_times.append(x)
new_scaled_times = sorted(new_scaled_times)
scalehandler = ScaleHandler(markers_unscaled,new_scaled_times, shot.stop_time)
self._update_non_linear_time(new_scalehandler=scalehandler)
def _marker_moved(self, line):
self._resample = True
def _update_non_linear_time(self, changed_shot=False, new_scalehandler=None):
marker_index = self.ui.markers_comboBox.currentIndex()
shot = self.ui.markers_comboBox.itemData(marker_index)
if new_scalehandler is None:
# make a 1:1 scalehandler using the hidden_plot
self.scale_time = False
end_t = self._hidden_plot[1].getData()[0][-1]
new_scalehandler = ScaleHandler([0,end_t],[0,end_t],end_t)
old_scalehandler = self.scalehandler
self.scalehandler = new_scalehandler
# combine markers and shutter lines
markers = list(self.all_marker_items.keys())
for channel in self.shutter_lines:
for shot in self.shutter_lines[channel]:
for line in self.shutter_lines[channel][shot][0]:
markers.append(line)
for line in self.shutter_lines[channel][shot][1]:
markers.append(line)
# Move all Markes/Shutter Lines to new position
for marker in markers:
pos = marker.pos()
if old_scalehandler is None:
unscaled_x = pos.x()
else:
unscaled_x = old_scalehandler.get_unscaled_time(pos.x())
if self.scale_time and self.scalehandler is not None:
new_x = self.scalehandler.get_scaled_time(unscaled_x)
else:
new_x = unscaled_x
pos.setX(new_x)
marker.setPos(pos)
# Move the movable lines in the upper graph
mv_markers = list(self.movable_marker_items.keys())
new_marker_times = {}
for marker in mv_markers:
if self.scale_time and self.scalehandler is not None:
new_x = self.scalehandler.get_scaled_time(self.marker_times_unscaled[marker])
else:
new_x = self.marker_times_unscaled[marker]
new_marker_times[float(new_x)] = marker
last_time = None
for t in sorted(list(new_marker_times.keys())):
marker = new_marker_times[t]
marker.blockSignals(True)
marker.setBounds([None, None])
marker.setPos(t)
marker.setBounds([last_time+1e-9 if last_time is not None else 0.0, None])
marker.blockSignals(False)
last_time = t
if shot is not None and self.scale_time:
self._time_axis_plot[0].getAxis("bottom").setTicks([[[0, 0], [shot.stop_time, shot.stop_time]]])
for plot in self.plot_widgets.values():
plot.getAxis("bottom").setTicks([[[0, 0], [shot.stop_time, shot.stop_time]]])
else:
self._time_axis_plot[0].getAxis("bottom").setTicks(None)
for plot in self.plot_widgets.values():
plot.getAxis("bottom").setTicks(None)
for plot in self.plot_widgets.values():
for item in plot.getPlotItem().items:
if isinstance(item, pg.PlotDataItem):
if old_scalehandler is not None:
unscaled_t = old_scalehandler.get_unscaled_time(item.xData)
else:
unscaled_t = item.xData
if self.scalehandler is not None:
item.setData(self.scalehandler.get_scaled_time(unscaled_t), item.yData)
else:
item.setData(unscaled_t, item.yData)
def _process_shots(self):
while True:
filepath = shots_to_process_queue.get()
inmain_later(self.load_shot, filepath)
def on_load_channel_config(self):
config_file = QFileDialog.getOpenFileName(self.ui, "Select file to load", self.default_config_path, "Config files (*.ini)")
if isinstance(config_file, tuple):
config_file, _ = config_file
if config_file:
runviewer_config = LabConfig(config_file)
try:
channels = ast.literal_eval(runviewer_config.get('runviewer_state', 'Channels'))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
channels = {}
for row, (channel, checked) in enumerate(channels):
check_items = self.channel_model.findItems(channel)
if len(check_items) == 0:
items = []
check_item = QStandardItem(channel)
check_item.setEditable(False)
check_item.setCheckable(True)
items.append(check_item)
check_item.setCheckState(Qt.Checked if checked else Qt.Unchecked)
check_item.setEnabled(False)
self.channel_model.insertRow(row, items)
else:
check_item = check_items[0]
check_item.setCheckState(Qt.Checked if checked else Qt.Unchecked)
self.channel_model.takeRow(check_item.row())
self.channel_model.insertRow(row, check_item)
def on_save_channel_config(self):
save_file = QFileDialog.getSaveFileName(self.ui, 'Select file to save current channel configuration', self.default_config_path, "config files (*.ini)")
if type(save_file) is tuple:
save_file, _ = save_file
if save_file:
runviewer_config = LabConfig(save_file)
channels = []
for row in range(self.channel_model.rowCount()):
item = self.channel_model.item(row)
channels.append((item.text(), item.checkState() == Qt.Checked))
runviewer_config.set('runviewer_state', 'Channels', pprint.pformat(channels))
def on_toggle_shutter(self, checked, current_shot):
for channel in self.shutter_lines:
for shot in self.shutter_lines[channel]:
if shot == current_shot:
for line in self.shutter_lines[channel][shot][0]:
if checked:
line.show()
else:
line.hide()
for line in self.shutter_lines[channel][shot][1]:
if checked:
line.show()
else:
line.hide()
def on_add_shot(self):
selected_files = QFileDialog.getOpenFileNames(self.ui, "Select file to load", self.last_opened_shots_folder, "HDF5 files (*.h5 *.hdf5)")
popup_warning = False
if isinstance(selected_files, tuple):
selected_files, _ = selected_files
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
selected_files = [os.path.abspath(str(shot_file)) for shot_file in selected_files]
if len(selected_files) > 0:
self.last_opened_shots_folder = os.path.dirname(selected_files[0])
for file in selected_files:
try:
filepath = str(file)
# Qt has this weird behaviour where if you type in the name of a file that exists
# but does not have the extension you have limited the dialog to, the OK button is greyed out
# but you can hit enter and the file will be selected.
# So we must check the extension of each file here!
if filepath.endswith('.h5') or filepath.endswith('.hdf5'):
self.load_shot(filepath)
else:
popup_warning = True
except:
popup_warning = True
raise
if popup_warning:
message = QMessageBox()
message.setText("Warning: Some shots were not loaded because they were not valid hdf5 files")
message.setIcon(QMessageBox.Warning)
message.setWindowTitle("Runviewer")
message.setStandardButtons(QMessageBox.Ok)
message.exec_()
def on_remove_shots(self, confirm=True):
# Get the selection model from the treeview
selection_model = self.ui.shot_treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in selection_model.selectedRows()]
# sort in descending order to prevent index changes of rows to be deleted
selected_row_list.sort(reverse=True)
if confirm:
reply = QMessageBox.question(self.ui, 'Runviewer', 'Remove {} shots?'.format(len(selected_row_list)),
QMessageBox.Yes | QMessageBox.No)
if reply == QMessageBox.No:
return
for row in selected_row_list:
item = self.shot_model.item(row, SHOT_MODEL__CHECKBOX_INDEX)
colour_item = self.shot_model.item(row, SHOT_MODEL__COLOUR_INDEX)
shutter_item = self.shot_model.item(row, SHOT_MODEL__SHUTTER_INDEX)
shot = item.data()
# unselect shot
item.setCheckState(Qt.Unchecked)
shutter_item.setCheckState(Qt.Unchecked)
# remove row
self.shot_model.removeRow(row)
del shot
def on_shot_selection_changed(self, item):
if self.shot_model.indexFromItem(item).column() == SHOT_MODEL__CHECKBOX_INDEX:
# add or remove a colour for this shot
checked = item.checkState()
row = self.shot_model.indexFromItem(item).row()
colour_item = self.shot_model.item(row, SHOT_MODEL__COLOUR_INDEX)
check_shutter = self.shot_model.item(row, SHOT_MODEL__SHUTTER_INDEX)
if checked:
colour = colour_item.data(Qt.UserRole)
if colour is not None:
colour = colour()
else:
colour = self.shot_colour_delegate.get_next_colour()
colour_item.setEditable(True)
pixmap = QPixmap(20, 20)
pixmap.fill(colour)
icon = QIcon(pixmap)
colour_item.setData(lambda clist=self.shot_colour_delegate._colours, colour=colour: int_to_enum(clist, colour), Qt.UserRole)
colour_item.setData(icon, Qt.DecorationRole)
shot_combobox_index = self.ui.markers_comboBox.findText(os.path.basename(item.data().path))
self.ui.markers_comboBox.model().item(shot_combobox_index).setEnabled(True)
if self.ui.markers_comboBox.currentIndex() == 0:
self.ui.markers_comboBox.setCurrentIndex(shot_combobox_index)
if item.data().shutter_times != {}:
check_shutter.setEnabled(True)
else:
check_shutter.setEnabled(False)
check_shutter.setToolTip("This shot doesn't contain shutter markers")
else:
# colour = None
# icon = None
shot_combobox_index = self.ui.markers_comboBox.findText(os.path.basename(item.data().path))
self.ui.markers_comboBox.model().item(shot_combobox_index).setEnabled(False)
if shot_combobox_index == self.ui.markers_comboBox.currentIndex():
self.ui.markers_comboBox.setCurrentIndex(0)
colour_item.setEditable(False)
check_shutter.setEnabled(False)
# model.setData(index, editor.itemIcon(editor.currentIndex()),
# model.setData(index, editor.itemData(editor.currentIndex()), Qt.UserRole)
self.update_channels_treeview()
elif self.shot_model.indexFromItem(item).column() == SHOT_MODEL__COLOUR_INDEX:
# update the plot colours
# get reference to the changed shot
current_shot = self.shot_model.item(self.shot_model.indexFromItem(item).row(), SHOT_MODEL__CHECKBOX_INDEX).data()
# find and update the pen of the plot items
for channel in self.plot_items.keys():
for shot in self.plot_items[channel]:
if shot == current_shot:
colour = item.data(Qt.UserRole)
self.plot_items[channel][shot].setPen(pg.mkPen(QColor(colour()), width=2))
elif self.shot_model.indexFromItem(item).column() == SHOT_MODEL__SHUTTER_INDEX:
current_shot = self.shot_model.item(self.shot_model.indexFromItem(item).row(), SHOT_MODEL__CHECKBOX_INDEX).data()
self.on_toggle_shutter(item.checkState(), current_shot)
def load_shot(self, filepath):
shot = Shot(filepath)
# add shot to shot list
# Create Items
items = []
colour_item = QStandardItem('')
colour_item.setEditable(False)
colour_item.setToolTip('Double-click to change colour')
items.append(colour_item)
check_shutter = QStandardItem()
check_shutter.setCheckable(True)
check_shutter.setCheckState(Qt.Unchecked) # options are Qt.Checked OR Qt.Unchecked
check_shutter.setEnabled(False)
check_shutter.setToolTip("Toggle shutter markers")
items.append(check_shutter)
check_item = QStandardItem(shot.path)
check_item.setEditable(False)
check_item.setCheckable(True)
check_item.setCheckState(Qt.Unchecked) # options are Qt.Checked OR Qt.Unchecked
check_item.setData(shot)
check_item.setToolTip(filepath)
items.append(check_item)
# script name
# path_item = QStandardItem(shot.path)
# path_item.setEditable(False)
# items.append(path_item)
self.shot_model.appendRow(items)
self.ui.markers_comboBox.addItem(os.path.basename(shot.path), shot)
shot_combobox_index = self.ui.markers_comboBox.findText(os.path.basename(shot.path))
self.ui.markers_comboBox.model().item(shot_combobox_index).setEnabled(False)
# only do this if we are checking the shot we are adding
# self.update_channels_treeview()
def get_selected_shots_and_colours(self):
# get the ticked shots
ticked_shots = {}
for i in range(self.shot_model.rowCount()):
item = self.shot_model.item(i, SHOT_MODEL__CHECKBOX_INDEX)
colour_item = self.shot_model.item(i, SHOT_MODEL__COLOUR_INDEX)
shutter_item = self.shot_model.item(i, SHOT_MODEL__SHUTTER_INDEX)
if item.checkState() == Qt.Checked:
shot = item.data()
colour_item_data = colour_item.data(Qt.UserRole)
ticked_shots[shot] = (colour_item_data(), shutter_item.checkState())
return ticked_shots
def update_channels_treeview(self):
ticked_shots = self.get_selected_shots_and_colours()
# get set of channels
channels = {}
for shot in ticked_shots.keys():
channels[shot] = set(shot.channels)
channels_set = frozenset().union(*channels.values())
# now find channels in channels_set which are not in the treeview, and add them
# now find channels in channels set which are already in the treeview, but deactivated, and activate them
treeview_channels_dict = {}
deactivated_treeview_channels_dict = {}
for i in range(self.channel_model.rowCount()):
item = self.channel_model.item(i, CHANNEL_MODEL__CHECKBOX_INDEX)
# Sanity check
if str(item.text()) in treeview_channels_dict:
raise RuntimeError("A duplicate channel name was detected in the treeview due to an internal error. Please lodge a bugreport detailing how the channels with the same name appeared in the channel treeview. Please restart the application")
treeview_channels_dict[str(item.text())] = i
if not item.isEnabled():
deactivated_treeview_channels_dict[str(item.text())] = i
treeview_channels = set(treeview_channels_dict.keys())
deactivated_treeview_channels = set(deactivated_treeview_channels_dict.keys())
# speed up working with self.channel_model by blocking signals and later reenabeling them
self.channel_model.blockSignals(True)
# find list of channels to work with
channels_to_add = channels_set.difference(treeview_channels)
for channel in sorted(channels_to_add):
items = []
check_item = QStandardItem(channel)
check_item.setEditable(False)
check_item.setCheckable(True)
check_item.setCheckState(Qt.Unchecked)
items.append(check_item)
# channel_name_item = QStandardItem(channel)
# channel_name_item.setEditable(False)
# items.append(channel_name_item)
self.channel_model.appendRow(items)
channels_to_reactivate = deactivated_treeview_channels.intersection(channels_set)
for channel in channels_to_reactivate:
for i in range(self.channel_model.columnCount()):
item = self.channel_model.item(deactivated_treeview_channels_dict[channel], i)
item.setEnabled(True)
item.setSelectable(True)
# now find channels in the treeview which are not in the channels_set and deactivate them
channels_to_deactivate = treeview_channels.difference(channels_set)
for channel in channels_to_deactivate:
for i in range(self.channel_model.columnCount()):
item = self.channel_model.item(treeview_channels_dict[channel], i)
item.setEnabled(False)
item.setSelectable(False)
self.channel_model.blockSignals(False)
self.channel_model.layoutChanged.emit()
# TODO: Also update entries in groups
self.update_plots()
def update_plots(self):
# get list of selected shots
ticked_shots = self.get_selected_shots_and_colours()
# SHould we rescale the x-axis?
# if self._hidden_plot[0].getViewBox.getState()['autoRange'][0]:
# self._hidden_plot[0].enableAutoRange(axis=pg.ViewBox.XAxis)
# else:
# self._hidden_plot[0].enableAutoRange(axis=pg.ViewBox.XAxis, enable=False)
# find stop time of longest ticked shot
largest_stop_time = 0
stop_time_set = False
for shot in ticked_shots.keys():
if self.scale_time:
st = self.scalehandler.get_scaled_time(shot.stop_time)
else:
st = shot.stop_time
if st > largest_stop_time:
largest_stop_time = st
stop_time_set = True
if not stop_time_set:
largest_stop_time = 1.0
# Update the range of the link plot
self._hidden_plot[1].setData([0, largest_stop_time], [0, 1e-9])
# Update plots
for i in range(self.channel_model.rowCount()):
check_item = self.channel_model.item(i, CHANNEL_MODEL__CHECKBOX_INDEX)
channel = str(check_item.text())
if check_item.checkState() == Qt.Checked and check_item.isEnabled():
# we want to show this plot
# does a plot already exist? If yes, show it
if channel in self.plot_widgets:
self.plot_widgets[channel].show()
# update the plot
# are there are plot items for this channel which are shown that should not be?
to_delete = []
for shot in self.plot_items[channel]:
if shot not in ticked_shots.keys():
self.plot_widgets[channel].removeItem(self.plot_items[channel][shot])
# Remove Shutter Markers of unticked Shots
if shot in self.shutter_lines[channel]:
for line in self.shutter_lines[channel][shot][0]:
self.plot_widgets[channel].removeItem(line)
for line in self.shutter_lines[channel][shot][1]:
self.plot_widgets[channel].removeItem(line)
self.shutter_lines[channel].pop(shot)
to_delete.append(shot)
for shot in to_delete:
del self.plot_items[channel][shot]
# do we need to add any plot items for shots that were not previously selected?
for shot, (colour, shutters_checked) in ticked_shots.items():
if shot not in self.plot_items[channel]:
# plot_item = self.plot_widgets[channel].plot(shot.traces[channel][0], shot.traces[channel][1], pen=pg.mkPen(QColor(colour), width=2))
# Add empty plot as it the custom resampling we do will happen quicker if we don't attempt to first plot all of the data
plot_item = self.plot_widgets[channel].plot([0, 0], [0], pen=pg.mkPen(QColor(colour), width=2), stepMode=True)
self.plot_items[channel][shot] = plot_item
# Add Shutter Markers of newly ticked Shots
self.add_shutter_markers(shot, channel, shutters_checked)
for t, m in self.all_markers.items():
color = m['color']
color = QColor(color[0], color[1], color[2])
if self.scale_time and self.scalehandler is not None:
t = self.scalehandler.get_scaled_time(t)
line = self.plot_widgets[channel].addLine(x=t, pen=pg.mkPen(color=color, width=1.5, style=Qt.DashLine))
self.all_marker_items[line] = self.plot_widgets[channel]
# If no, create one
else:
self.create_plot(channel, ticked_shots)
else:
if channel not in self.plot_widgets:
self.create_plot(channel, ticked_shots)
self.plot_widgets[channel].hide()
self._resample = True
def create_plot(self, channel, ticked_shots):
self.plot_widgets[channel] = pg.PlotWidget() # name=channel)
self.plot_widgets[channel].setMinimumHeight(200)
self.plot_widgets[channel].setMaximumHeight(200)
self.plot_widgets[channel].setLabel('bottom', 'Time', units='s')
self.plot_widgets[channel].showAxis('right', True)
self.plot_widgets[channel].showAxis('bottom', True)
self.plot_widgets[channel].setXLink('runviewer - time axis link')
self.plot_widgets[channel].sigXRangeChanged.connect(self.on_x_range_changed)
self.plot_widgets[channel].scene().sigMouseMoved.connect(lambda pos: self.mouseMovedEvent(pos, self.plot_widgets[channel], channel))
self.ui.plot_layout.insertWidget(self.ui.plot_layout.count() - 1, self.plot_widgets[channel])
self.shutter_lines[channel] = {} # initialize Storage for shutter lines
self.plot_items.setdefault(channel, {})
has_units = False
units = ''
for shot, (colour, shutters_checked) in ticked_shots.items():
if channel in shot.traces:
# plot_item = self.plot_widgets[channel].plot(shot.traces[channel][0], shot.traces[channel][1], pen=pg.mkPen(QColor(colour), width=2))
# Add empty plot as it the custom resampling we do will happen quicker if we don't attempt to first plot all of the data
plot_item = self.plot_widgets[channel].plot([0, 0], [0], pen=pg.mkPen(QColor(colour), width=2), stepMode=True)
self.plot_items[channel][shot] = plot_item
if len(shot.traces[channel]) == 3:
has_units = True
units = shot.traces[channel][2]
# Add Shutter Markers of ticked Shots
self.add_shutter_markers(shot, channel, shutters_checked)
if has_units:
self.plot_widgets[channel].setLabel('left', channel, units=units)
else:
self.plot_widgets[channel].setLabel('left', channel)
def add_shutter_markers(self, shot, channel, shutters_checked):
if shot not in self.shutter_lines[channel] and channel in shot.shutter_times:
self.shutter_lines[channel][shot] = [[], []]
open_color = QColor(0, 255, 0)
close_color = QColor(255, 0, 0)
for t, val in shot.shutter_times[channel].items():
scaled_t = t
if val: # val != 0, shutter open
line = self.plot_widgets[channel].addLine(x=scaled_t, pen=pg.mkPen(color=open_color, width=4., style=Qt.DotLine))
self.shutter_lines[channel][shot][1].append(line)
if not shutters_checked:
line.hide()
else: # else shutter close
line = self.plot_widgets[channel].addLine(x=scaled_t, pen=pg.mkPen(color=close_color, width=4., style=Qt.DotLine))
self.shutter_lines[channel][shot][0].append(line)
if not shutters_checked:
line.hide()
def on_x_range_changed(self, *args):
# print 'x range changed'
self._resample = True
@inmain_decorator(wait_for_return=True)
def _get_resample_params(self, channel, shot):
rect = self.plot_items[channel][shot].getViewBox().viewRect()
xmin, xmax = rect.left(), rect.width() + rect.left()
dx = xmax - xmin
view_range = self.plot_widgets[channel].viewRange()
return view_range[0][0], view_range[0][1], dx
def resample(self, data_x, data_y, xmin, xmax, stop_time, num_pixels):
"""This is a function for downsampling the data before plotting
it. Unlike using nearest neighbour interpolation, this method
preserves the features of the plot. It chooses what value to
use based on what values within a region are most different
from the values it's already chosen. This way, spikes of a short
duration won't just be skipped over as they would with any sort
of interpolation."""
# TODO: Only finely sample the currently visible region. Coarsely sample the rest
# x_out = numpy.float32(numpy.linspace(data_x[0], data_x[-1], 4000*(data_x[-1]-data_x[0])/(xmax-xmin)))
x_out = numpy.float64(numpy.linspace(xmin, xmax, 3 * 2000 + 2))
y_out = numpy.empty(len(x_out) - 1, dtype=numpy.float64)
data_x = numpy.float64(data_x)
data_y = numpy.float64(data_y)
# TODO: investigate only resampling when necessary.
# Currently pyqtgraph sometimes has trouble rendering things
# if you don't resample. If a point is far off the graph,
# and this point is the first that should be drawn for stepMode,
# because there is a long gap before the next point (which is
# visible) then there is a problem.
# Also need to explicitly handle cases where none of the data
# is visible (which resampling does by setting NaNs)
#
# x_data_slice = data_x[(data_x>=xmin)&(data_x<=xmax)]
# print len(data_x)
# if len(x_data_slice) < 3*2000+2:
# x_out = x_data_slice
# y_out = data_y[(data_x>=xmin)&(data_x<=xmax)][:-1]
# logger.info('skipping resampling')
# else:
resampling = True
if resampling:
_resample(data_x, data_y, x_out, y_out, numpy.float64(stop_time))
# self.__resample4(data_x, data_y, x_out, y_out, numpy.float32(stop_time))
else:
x_out, y_out = data_x, data_y
return x_out, y_out
def __resample4(self, x_in, y_in, x_out, y_out, stop_time):
# we want x-out to have three times the number of points as there are pixels
# Plus one at the end
# y_out = numpy.empty(len(x_out)-1, dtype=numpy.float64)
# print 'len x_out: %d'%len(x_out)
# A couple of special cases that I don't want to have to put extra checks in for:
if x_out[-1] < x_in[0] or x_out[0] > stop_time:
# We're all the way to the left of the data or all the way to the right. Fill with NaNs:
y_out.fill('NaN')
elif x_out[0] > x_in[-1]:
# We're after the final clock tick, but before stop_time
i = 0
while i < len(x_out) - 1:
if x_out[i] < stop_time:
y_out[i] = y_in[-1]
else:
y_out[i] = numpy.float('NaN')
i += 1
else:
i = 0
j = 1
# Until we get to the data, fill the output array with NaNs (which
# get ignored when plotted)
while x_out[i] < x_in[0]:
y_out[i] = numpy.float('NaN')
y_out[i + 1] = numpy.float('NaN')
y_out[i + 2] = numpy.float('NaN')
i += 3
# If we're some way into the data, we need to skip ahead to where
# we want to get the first datapoint from:
while x_in[j] < x_out[i]:
j += 1
# Get the first datapoint:
# y_out[i] = y_in[j-1]
# i += 1
# Get values until we get to the end of the data:
while j < len(x_in) and i < len(x_out) - 2: # Leave one spare for the final data point and one because stepMode=True requires len(y)=len(x)-1
# This is 'nearest neighbour on the left' interpolation. It's
# what we want if none of the source values checked in the
# upcoming loop are used:
y_out[i] = y_in[j - 1]
i += 2
positive_jump_value = 0
positive_jump_index = j - 1
negative_jump_value = 0
negative_jump_index = j - 1
# now find the max and min values between this x_out time point and the next x_out timepoint
# print i
while j < len(x_in) and x_in[j] < x_out[i]:
jump = y_in[j] - y_out[i - 2]
# would using this source value cause a bigger positive jump?
if jump > 0 and jump > positive_jump_value:
positive_jump_value = jump
positive_jump_index = j
# would using this source value cause a bigger negative jump?
elif jump < 0 and jump < negative_jump_value:
negative_jump_value = jump
negative_jump_index = j
j += 1
if positive_jump_index < negative_jump_index:
y_out[i - 1] = y_in[positive_jump_index]
y_out[i] = y_in[negative_jump_index]
# TODO: We could override the x_out values with x_in[jump_index]
else:
y_out[i - 1] = y_in[negative_jump_index]
y_out[i] = y_in[positive_jump_index]
i += 1
# Get the last datapoint:
if j < len(x_in):
# If the sample rate of the raw data is low, then the current
# j point could be outside the current plot view range
# If so, decrease j so that we take a value that is within the
# plot view range.
if x_in[j] > x_out[-1] and j > 0:
j -= 1
y_out[i] = y_in[j]
i += 1
# if i < len(x_out):
# y_out[i] = y_in[-1]
# i += 1
# Fill the remainder of the array with the last datapoint,
# if t < stop_time, and then NaNs after that:
while i < len(x_out) - 1:
if x_out[i] < stop_time:
y_out[i] = y_in[-1]
else:
y_out[i] = numpy.float('NaN')
i += 1
# return y_out # method changed to modify y_out array in place
def __resample3(self, x_in, y_in, x_out, stop_time):
"""This is a Python implementation of the C extension. For
debugging and developing the C extension."""
y_out = numpy.empty(len(x_out))
i = 0
j = 1
# A couple of special cases that I don't want to have to put extra checks in for:
if x_out[-1] < x_in[0] or x_out[0] > stop_time:
# We're all the way to the left of the data or all the way to the right. Fill with NaNs:
while i < len(x_out):
y_out[i] = numpy.float('NaN')
i += 1
elif x_out[0] > x_in[-1]:
# We're after the final clock tick, but before stop_time
while i < len(x_out):
if x_out[i] < stop_time:
y_out[i] = y_in[-1]
else:
y_out[i] = numpy.float('NaN')
i += 1
else:
# Until we get to the data, fill the output array with NaNs (which
# get ignored when plotted)
while x_out[i] < x_in[0]:
y_out[i] = numpy.float('NaN')
i += 1
# If we're some way into the data, we need to skip ahead to where
# we want to get the first datapoint from:
while x_in[j] < x_out[i]:
j += 1
# Get the first datapoint:
y_out[i] = y_in[j - 1]
i += 1
# Get values until we get to the end of the data:
while j < len(x_in) and i < len(x_out):
# This is 'nearest neighbour on the left' interpolation. It's
# what we want if none of the source values checked in the
# upcoming loop are used:
y_out[i] = y_in[j - 1]
while j < len(x_in) and x_in[j] < x_out[i]:
# Would using this source value cause the interpolated values
# to make a bigger jump?
if numpy.abs(y_in[j] - y_out[i - 1]) > numpy.abs(y_out[i] - y_out[i - 1]):
# If so, use this source value:
y_out[i] = y_in[j]
j += 1
i += 1
# Get the last datapoint:
if i < len(x_out):
y_out[i] = y_in[-1]
i += 1
# Fill the remainder of the array with the last datapoint,
# if t < stop_time, and then NaNs after that:
while i < len(x_out):
if x_out[i] < stop_time:
y_out[i] = y_in[-1]
else:
y_out[i] = numpy.float('NaN')
i += 1
return y_out
def _resample_thread(self):
logger = logging.getLogger('runviewer.resample_thread')
while True:
if self._resample:
self._resample = False
# print 'resampling'
ticked_shots = inmain(self.get_selected_shots_and_colours)
for shot, (colour, shutters_checked) in ticked_shots.items():
for channel in shot.traces:
if self.channel_checked_and_enabled(channel):
try:
xmin, xmax, dx = self._get_resample_params(channel, shot)
# We go a bit outside the visible range so that scrolling
# doesn't immediately go off the edge of the data, and the
# next resampling might have time to fill in more data before
# the user sees any empty space.
if self.scale_time:
xnew, ynew = self.resample(shot.scaled_times(channel), shot.traces[channel][1], xmin, xmax, shot.stop_time, dx)
else:
xnew, ynew = self.resample(shot.traces[channel][0], shot.traces[channel][1], xmin, xmax, shot.stop_time, dx)
inmain(self.plot_items[channel][shot].setData, xnew, ynew, pen=pg.mkPen(QColor(colour), width=2), stepMode=True)
except Exception:
#self._resample = True
pass
else:
logger.info('ignoring channel %s' % channel)
time.sleep(0.5)
@inmain_decorator(wait_for_return=True)
def channel_checked_and_enabled(self, channel):
logger.info('is channel %s enabled' % channel)
index = self.channel_model.index(0, CHANNEL_MODEL__CHANNEL_INDEX)
indexes = self.channel_model.match(index, Qt.DisplayRole, channel, 1, Qt.MatchExactly)
logger.info('number of matches %d' % len(indexes))
if len(indexes) == 1:
check_item = self.channel_model.itemFromIndex(indexes[0])
if check_item.checkState() == Qt.Checked and check_item.isEnabled():
return True
return False
def on_x_axis_reset(self):
ticked_shots = self.get_selected_shots_and_colours()
largest_stop_time = 0
stop_time_set = False
for shot in ticked_shots.keys():
if self.scale_time:
st = self.scalehandler.get_scaled_time(shot.stop_time)
else:
st = shot.stop_time
if st > largest_stop_time:
largest_stop_time = st
stop_time_set = True
if not stop_time_set:
largest_stop_time = 1.0
# Update the range of the link plot
self._hidden_plot[1].setData([0, largest_stop_time], [0, 1e-9])
self._hidden_plot[0].enableAutoRange(axis=pg.ViewBox.XAxis)
def on_y_axes_reset(self):
for plot_widget in self.plot_widgets.values():
plot_widget.enableAutoRange(axis=pg.ViewBox.YAxis)
def _enable_selected_shots(self):
self.update_ticks_of_selected_shots(Qt.Checked)
def _disable_selected_shots(self):
self.update_ticks_of_selected_shots(Qt.Unchecked)
def update_ticks_of_selected_shots(self, state):
# Get the selection model from the treeview
selection_model = self.ui.shot_treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in sorted(selection_model.selectedRows())]
# for each row selected
for row in selected_row_list:
check_item = self.shot_model.item(row, SHOT_MODEL__CHECKBOX_INDEX)
check_item.setCheckState(state)
def _move_up(self):
# Get the selection model from the treeview
selection_model = self.ui.channel_treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in sorted(selection_model.selectedRows())]
# For each row selected
for i, row in enumerate(selected_row_list):
# only move the row if it is not element 0, and the row above it is not selected
# (note that while a row above may have been initially selected, it should by now, be one row higher
# since we start moving elements of the list upwards starting from the lowest index)
if row > 0 and (row - 1) not in selected_row_list:
# Remove the selected row
items = self.channel_model.takeRow(row)
# Add the selected row into a position one above
self.channel_model.insertRow(row - 1, items)
# Since it is now a newly inserted row, select it again
selection_model.select(self.channel_model.indexFromItem(items[0]), QItemSelectionModel.SelectCurrent)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] -= 1
self.update_plot_positions()
def _move_down(self):
# Get the selection model from the treeview
selection_model = self.ui.channel_treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in reversed(sorted(selection_model.selectedRows()))]
# For each row selected
for i, row in enumerate(selected_row_list):
# only move the row if it is not the last element, and the row above it is not selected
# (note that while a row below may have been initially selected, it should by now, be one row lower
# since we start moving elements of the list upwards starting from the highest index)
if row < self.channel_model.rowCount() - 1 and (row + 1) not in selected_row_list:
# Remove the selected row
items = self.channel_model.takeRow(row)
# Add the selected row into a position one above
self.channel_model.insertRow(row + 1, items)
# Since it is now a newly inserted row, select it again
selection_model.select(self.channel_model.indexFromItem(items[0]), QItemSelectionModel.SelectCurrent)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] += 1
self.update_plot_positions()
def _move_top(self):
# Get the selection model from the treeview
selection_model = self.ui.channel_treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in sorted(selection_model.selectedRows())]
# For each row selected
for i, row in enumerate(selected_row_list):
# only move the row while it is not element 0, and the row above it is not selected
# (note that while a row above may have been initially selected, it should by now, be one row higher
# since we start moving elements of the list upwards starting from the lowest index)
while row > 0 and (row - 1) not in selected_row_list:
# Remove the selected row
items = self.channel_model.takeRow(row)
# Add the selected row into a position one above
self.channel_model.insertRow(row - 1, items)
# Since it is now a newly inserted row, select it again
selection_model.select(self.channel_model.indexFromItem(items[0]), QItemSelectionModel.SelectCurrent)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] -= 1
row -= 1
self.update_plot_positions()
def _move_bottom(self):
selection_model = self.ui.channel_treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in reversed(sorted(selection_model.selectedRows()))]
# For each row selected
for i, row in enumerate(selected_row_list):
# only move the row while it is not the last element, and the row above it is not selected
# (note that while a row below may have been initially selected, it should by now, be one row lower
# since we start moving elements of the list upwards starting from the highest index)
while row < self.channel_model.rowCount() - 1 and (row + 1) not in selected_row_list:
# Remove the selected row
items = self.channel_model.takeRow(row)
# Add the selected row into a position one above
self.channel_model.insertRow(row + 1, items)
# Since it is now a newly inserted row, select it again
selection_model.select(self.channel_model.indexFromItem(items[0]), QItemSelectionModel.SelectCurrent)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] += 1
row += 1
self.update_plot_positions()
def update_plot_positions(self):
# remove all widgets
layout_items = {}
for i in range(self.ui.plot_layout.count()):
if i == 0:
continue
item = self.ui.plot_layout.takeAt(i)
# add all widgets
for i in range(self.channel_model.rowCount()):
check_item = self.channel_model.item(i, CHANNEL_MODEL__CHECKBOX_INDEX)
channel = str(check_item.text())
if channel in self.plot_widgets:
self.ui.plot_layout.addWidget(self.plot_widgets[channel])
if check_item.checkState() == Qt.Checked and check_item.isEnabled():
self.plot_widgets[channel].show()
else:
self.plot_widgets[channel].hide()
self.ui.plot_layout.addWidget(self._time_axis_plot[0])
class Shot(object):
def __init__(self, path):
self.path = path
# Store list of traces
self._traces = None
# store list of channels
self._channels = None
# store list of markers
self._markers = None
self.cached_scaler = None
self._scalehandler = None
self._scaled_x = {}
# store list of shutter changes and callibrations
self._shutter_times = None
self._shutter_calibrations = {}
# TODO: Get this dynamically
device_list = ['PulseBlaster', 'NI_PCIe_6363', 'NI_PCI_6733']
# Load connection table
self.connection_table = ConnectionTable(path)
# open h5 file
with h5py.File(path, 'r') as file:
# Get master pseudoclock
self.master_pseudoclock_name = file['connection table'].attrs['master_pseudoclock']
if isinstance(self.master_pseudoclock_name, bytes):
self.master_pseudoclock_name = self.master_pseudoclock_name.decode('utf8')
else:
self.master_pseudoclock_name = str(self.master_pseudoclock_name)
# get stop time
self.stop_time = file['devices'][self.master_pseudoclock_name].attrs['stop_time']
self.device_names = list(file['devices'].keys())
# Get Shutter Calibrations
if 'calibrations' in file and 'Shutter' in file['calibrations']:
for name, open_delay, close_delay in numpy.array(file['calibrations']['Shutter']):
name = name.decode('utf8') if isinstance(name, bytes) else str(name)
self._shutter_calibrations[name] = [open_delay, close_delay]
def delete_cache(self):
self._channels = None
self._traces = None
def _load(self):
if self._channels is None:
self._channels = {}
if self._traces is None:
self._traces = {}
if self._markers is None:
self._markers = {}
if self._shutter_times is None:
self._shutter_times = {}
self._load_markers()
# Let's walk the connection table, starting with the master pseudoclock
master_pseudoclock_device = self.connection_table.find_by_name(self.master_pseudoclock_name)
self._load_device(master_pseudoclock_device)
# self._scalehandler = ScaleHandler(self._markers.keys(), self.stop_time)
def _load_markers(self):
with h5py.File(self.path, 'r') as file:
if "time_markers" in file:
for row in file["time_markers"]:
self._markers[row['time']] = {'color': row['color'].tolist()[0], 'label': row['label']}
elif "runviewer" in file:
for time, val in file["runviewer"]["markers"].attrs.items():
props = val.strip('{}}').rsplit(",", 1)
color = list(map(int, props[0].split(":")[1].strip(" ()").split(",")))
label = props[1].split(":")[1]
self._markers[float(time)] = {'color': color, 'label': label}
if 0 not in self._markers:
self._markers[0] = {'color': [0,0,0], 'label': 'Start'}
if self.stop_time not in self._markers:
self._markers[self.stop_time] = {'color': [0,0,0], 'label' : 'End'}
def add_trace(self, name, trace, parent_device_name, connection):
name = str(name)
self._channels[name] = {'device_name': parent_device_name, 'port': connection}
self._traces[name] = trace
# add shutter times
con = self.connection_table.find_by_name(name)
if con.device_class == "Shutter" and 'open_state' in con.properties:
self.add_shutter_times([(name, con.properties['open_state'])])
# Temporary solution to physical shutter times
def add_shutter_times(self, shutters):
for name, open_state in shutters:
x_values, y_values = self._traces[name]
if len(x_values) > 0:
change_indices = numpy.where(y_values[:-1] != y_values[1:])[0]
change_indices += 1 # use the index of the value that is changed to
change_values = list(zip(x_values[change_indices], y_values[change_indices]))
change_values.insert(0, (x_values[0], y_values[0])) # insert first value
self._shutter_times[name] = {x_value + (self._shutter_calibrations[name][0] if y_value == open_state else self._shutter_calibrations[name][1]): 1 if y_value == open_state else 0 for x_value, y_value in change_values}
def _load_device(self, device, clock=None):
try:
print('loading %s' % device.name)
module = device.device_class
# Load the master pseudoclock class
# labscript_devices.import_device(module)
device_class = labscript_devices.get_runviewer_parser(module)
device_instance = device_class(self.path, device)
clocklines_and_triggers = device_instance.get_traces(self.add_trace, clock)
for name, trace in clocklines_and_triggers.items():
child_device = self.connection_table.find_by_name(name)
for grandchild_device_name, grandchild_device in child_device.child_list.items():
self._load_device(grandchild_device, trace)
except Exception:
# TODO: print/log exception traceback
# if device.name == 'ni_card_0' or device.name == 'pulseblaster_0' or device.name == 'pineblaster_0' or device.name == 'ni_card_1' or device.name == 'novatechdds9m_0':
# raise
# raise
if hasattr(device, 'name'):
print('Failed to load device %s' % device.name)
else:
print('Failed to load device (unknown name, device object does not have attribute name)')
# backwards compat
with h5py.File(self.path, 'r') as file:
if "runviewer" in file:
if "shutter_times" in file["runviewer"]:
for name, val in file["runviewer"]["shutter_times"].attrs.items():
self._shutter_times[name] = {float(key_value.split(":")[0]): int(key_value.split(":")[1]) for key_value in val.strip('{}}').split(",")}
def scaled_times(self, channel):
if self.cached_scaler != app.scalehandler:
self.cached_scaler = app.scalehandler
self._scaled_x = {}
if channel not in self._scaled_x:
self._scaled_x[channel] = self.cached_scaler.get_scaled_time(self._traces[channel][0])
return self._scaled_x[channel]
@property
def channels(self):
if self._channels is None:
self._load()
return self._channels.keys()
def clear_cache(self):
# clear cache variables to cut down on memory usage
pass
@property
def markers(self):
if self._markers is None:
self._load()
return self._markers
@property
def traces(self):
# if traces cached:
# return cached traces and waits
if self._traces is None:
self._load()
return self._traces
@property
def shutter_times(self):
if self._shutter_times is None:
self._load()
return self._shutter_times
# @property
# def scalehandler(self):
# if self._scalehandler is None:
# self._load()
# return self._scalehandler
class TempShot(Shot):
def __init__(self, i):
Shot.__init__(self, 'shot %d' % i)
self._channels = ['Bx', 'By', 'Bz', 'Bq']
self.stop_time = i + 1
self.traces = {}
no_x_points = 10000
for channel in self.channels:
# self.traces[channel] = (numpy.linspace(0,10,no_x_points), numpy.random.rand(no_x_points))
x_points = numpy.linspace(0, self.stop_time, no_x_points)
self.traces[channel] = (x_points, (i + 1) * numpy.sin(x_points * numpy.pi + i / 11.0 * 2 * numpy.pi))
@property
def channels(self):
return self._channels
def get_traces(self):
return self.traces
class RunviewerServer(ZMQServer):
def __init__(self, *args, **kwargs):
ZMQServer.__init__(self, *args, **kwargs)
self.logger = logging.getLogger('runviewer.server')
def handler(self, h5_filepath):
if h5_filepath == 'hello':
return 'hello'
self.logger.info('Received hdf5 file: %s' % h5_filepath)
# Convert path to local slashes and shared drive prefix:
h5_filepath = labscript_utils.shared_drive.path_to_local(h5_filepath)
logger.info('local filepath: %s' % h5_filepath)
# we add the shot to a queue so that we don't have to wait for the app to come up before
# responding to runmanager
shots_to_process_queue.put(h5_filepath)
return 'ok'
if __name__ == "__main__":
qapplication = QApplication(sys.argv)
shots_to_process_queue = Queue()
exp_config = LabConfig(required_params = {"DEFAULT": ["experiment_name"], "paths": ["shared_drive", "experiment_shot_storage"], 'ports': ['runviewer']})
port = int(exp_config.get('ports', 'runviewer'))
myappid = 'monashbec.runviewer' # arbitrary string
try:
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
except:
logger.info('Not on a windows machine')
# Start experiment server
experiment_server = RunviewerServer(port)
app = RunViewer(exp_config)
splash.hide()
def execute_program():
qapplication.exec_()
sys.exit(execute_program())
|
channelapihelper.py | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper methods and classes for using the App Engine Channel API.
The Python App Engine API (documented at
http://code.google.com/appengine/docs/python/channel/overview.html)
provides a way of communicating from a web server to browser clients in near
real time. There's significant overheard involved in saving state and keeping
track of which clients need to be notified of which messages, though. The goal
of this module is to help with some common tasks related to that.
The module takes care of generating new channel tokens which can be returned to
a browser client. The corresponding channel id and a unique "channel key" (used
to determine what's being listened to) is stored in both the datastore and
memcache. Custom namespaces are used to prevent any collisions with existing
datastore or memcache entries.
"""
from datetime import datetime, timedelta
from uuid import uuid4
from google.appengine.api import channel, memcache, namespace_manager
from google.appengine.ext import db
DATASTORE_NAMESPACE = "channelapihelper"
# This corresponds to the current lifetime of a given channel connection.
CHANNEL_LIFETIME_HOURS = 2
CHANNEL_LIFETIME_SECONDS = CHANNEL_LIFETIME_HOURS * 60 * 60
# Set up a default SendChannelMessage method for when threading isn't supported.
SendChannelMessage = lambda channel_id, message: channel.send_message(
channel_id, message)
# The Python 2.7 App Engine runtime supports threading, so let's see if we can
# use it.
try:
import threading
SendChannelMessage = lambda channel_id, message: threading.Thread(
target=channel.send_message, args=(channel_id, message)).start()
except ImportError:
pass
def customnamespace(original_method):
"""Decorator to use a custom App Engine namespace temporarily.
The namespace applies to all memcache and datastore calls.
See http://code.google.com/appengine/docs/python/multitenancy/overview.html
"""
def wrapped_method(*args, **kwargs):
original_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(DATASTORE_NAMESPACE)
return original_method(*args, **kwargs)
finally:
namespace_manager.set_namespace(original_namespace)
return wrapped_method
def EarliestActiveTimestamp():
"""Calculates the earliest timestamp of an active channel.
Returns:
A DateTime representing the earliest possible timestamp of an active
channel.
"""
return datetime.now() - timedelta(hours=CHANNEL_LIFETIME_HOURS)
@customnamespace
def GetExpiredChannels():
"""Gets all Channel objects that are expired.
The list of expired Channel objects doesn't depend on a specific channel_key.
Returns:
A list of Channel objects whose created timestamp indicates that they're old
enough to be expired.
"""
query = Channel.all().filter("created <=", EarliestActiveTimestamp())
return [channel for channel in query]
class ChannelApiHelper(object):
"""Methods to help with creating and keeping track of Channel API channels.
Attributes:
channel_key: A string used to identify what an instance is keeping track of.
The exact value to use here is domain-specific; however, as a shortcut,
you can pass in a db.Model instance to the constructor and its key will be
used.
"""
def __init__(self, channel_key):
"""Creates a new ChannelApiHelper to track the resource channel_key.
Args:
channel_key: A string or db.Model instance.
"""
if isinstance(channel_key, db.Model):
channel_key = str(channel_key.key())
self.channel_key = channel_key
@customnamespace
def CreateChannel(self):
"""Creates a new channel with a given key.
This should be called when a browser client wants to receive updates about a
given resource (identified by self.channel_key).
A new Channel object is saved to the datastore and memcache.
Returns:
A string that can be used by browser clients as a token for connecting to
the new channel. This corresponds to the "token" value which is passed to
the JavaScript goog.appengine.Channel() constructor, documented at
http://code.google.com/appengine/docs/python/channel/javascript.html
"""
channel_id = uuid4().get_hex()
new_channel = Channel(channel_key=self.channel_key,
channel_id=channel_id)
new_channel.put()
current_channels = self.GetActiveChannels()
already_in_memcache = False
for current_channel in current_channels:
if current_channel.channel_id == channel_id:
already_in_memcache = True
if not already_in_memcache:
current_channels.append(new_channel)
memcache.set(self.channel_key, current_channels)
return channel.create_channel(channel_id)
@customnamespace
def GetActiveChannels(self):
"""Gets a list of active channels for a given key.
Returns:
A list of Channel objects, each of which has self.channel_key as their
channel_key attribute. The list should normally only contain channels that
haven't expired, but if read from memcache it's possible that a few
expired channels could sneak in.
"""
channels = memcache.get(self.channel_key)
if channels is not None:
return channels
query = Channel.all().filter("channel_key =", self.channel_key).filter(
"created >", EarliestActiveTimestamp())
channels = [channel for channel in query]
memcache.set(self.channel_key, channels, time=CHANNEL_LIFETIME_SECONDS)
return channels
def NotifyChannels(self, message):
"""Sends a message to all active channels with a given key.
If threading is enabled (e.g. because the App Engine instance is using the
Python 2.7 runtime) then new threads will be created to send the message
to each connected client.
Otherwise, the clients will be notified sequentially.
Args:
message: A string to send to all active browser clients. A best-practice
is to use a JSON-encoded string to represent a complex Python object.
"""
active_channels = self.GetActiveChannels()
for active_channel in active_channels:
SendChannelMessage(active_channel.channel_id, message)
class Channel(db.Model):
"""A class to keep track of relevant channel info in the datastore.
Attributes:
channel_id: A globally-unique string that identifies a particular connection
with a browser client. Using a uuid or similar is recommended.
channel_key: A string that uniquely identifies what the channel is keeping
track of. Use something that makes sense for your application, like the
key corresponding to a particular db.Model instance.
created: A DateTime that is automatically set to the current time upon
creation. It is used to keep track of whether a given Channel is expired.
"""
channel_id = db.StringProperty(required=True)
channel_key = db.StringProperty(required=True)
created = db.DateTimeProperty(auto_now_add=True) |
handlers.py | import logging
import queue
import threading
import time
import traceback
from typing import Any, Dict, List
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm import sessionmaker
from sqlalchemy_utils import create_database, database_exists
from c2cwsgiutils.sqlalchemylogger._filters import ContainsExpression, DoesNotContainExpression
from c2cwsgiutils.sqlalchemylogger._models import Base, create_log_class
LOG = logging.getLogger(__name__)
class SQLAlchemyHandler(logging.Handler):
"""Write the logs into a database."""
MAX_NB_LOGS = 100
MAX_TIMEOUT = 1
def __init__(
self,
sqlalchemy_url: Dict[str, str],
does_not_contain_expression: str = "",
contains_expression: str = "",
) -> None:
super().__init__()
# initialize DB session
self.engine = create_engine(sqlalchemy_url["url"])
self.Log = create_log_class(
tablename=sqlalchemy_url.get("tablename", "logs"),
tableargs=sqlalchemy_url.get("tableargs", None), # type: ignore
)
Base.metadata.bind = self.engine
DBSession = sessionmaker(bind=self.engine) # noqa
self.session = DBSession()
# initialize log queue
self.log_queue: Any = queue.Queue()
# initialize a thread to process the logs Asynchronously
self.condition = threading.Condition()
self.processor_thread = threading.Thread(target=self._processor, daemon=True)
self.processor_thread.start()
# initialize filters
if does_not_contain_expression:
self.addFilter(DoesNotContainExpression(does_not_contain_expression))
if contains_expression:
self.addFilter(ContainsExpression(contains_expression))
def _processor(self) -> None:
LOG.debug("%s: starting processor thread", __name__)
while True:
logs = []
time_since_last = time.monotonic()
while True:
with self.condition:
self.condition.wait(timeout=self.MAX_TIMEOUT)
if not self.log_queue.empty():
logs.append(self.log_queue.get())
self.log_queue.task_done()
if logs:
# try to reduce the number of INSERT requests to the DB
# by writing chunks of self.MAX_NB_LOGS size,
# but also do not wait forever before writing stuff (self.MAX_TIMOUT)
if (len(logs) >= self.MAX_NB_LOGS) or (
time.monotonic() >= (time_since_last + self.MAX_TIMEOUT)
):
self._write_logs(logs)
break
LOG.debug("%s: stopping processor thread", __name__)
def _write_logs(self, logs: List[Any]) -> None:
try:
self.session.bulk_save_objects(logs)
self.session.commit()
except (SQLAlchemyError):
try:
self.create_db()
self.session.rollback()
self.session.bulk_save_objects(logs)
self.session.commit()
except Exception as e: # pylint: disable=broad-except
# if we really cannot commit the log to DB, do not lock the
# thread and do not crash the application
LOG.critical(e)
finally:
self.session.expunge_all()
def create_db(self) -> None:
LOG.info("%s: creating new database", __name__)
if not database_exists(self.engine.url):
create_database(self.engine.url)
# FIXME: we should not access directly the private __table_args__
# variable, but add an accessor method in models.Log class
if not isinstance(self.Log.__table_args__, type(None)) and self.Log.__table_args__.get(
"schema", None
):
if not self.engine.dialect.has_schema(self.engine, self.Log.__table_args__["schema"]):
self.engine.execute(sqlalchemy.schema.CreateSchema(self.Log.__table_args__["schema"]))
Base.metadata.create_all(self.engine)
def emit(self, record: Any) -> None:
trace = None
exc = record.__dict__["exc_info"]
if exc:
trace = traceback.format_exc()
log = self.Log(
logger=record.__dict__["name"],
level=record.__dict__["levelname"],
trace=trace,
msg=record.__dict__["msg"],
)
with self.condition:
# put the log in an asynchronous queue
self.log_queue.put(log)
self.condition.notify()
|
abandonedanimal.py | import sys
import threading
def main():
n = int(input(''))
k = int(input(''))
supermarkets = []
for i in range(k):
index, food = input('').split(' ')
index = int(index)
if index >= len(supermarkets):
temp = {}
temp[food] = -1
supermarkets.append(temp)
else:
supermarkets[index][food] = -1
m = int(input(''))
items = []
for i in range(m):
items.append(input(''))
def recurse(root, foodIndex):
if root >= len(supermarkets):
return 0
if foodIndex >= len(items):
return 1
if items[foodIndex] in supermarkets[root]:
return recurse(root + 1, foodIndex) + recurse(root, foodIndex + 1)
return recurse(root + 1, foodIndex)
result = recurse(0, 0)
if result == 0:
print('impossible')
elif result == 1:
print('unique')
else:
print('ambiguous')
if __name__ == '__main__':
threading.stack_size(67108864)
sys.setrecursionlimit(2 ** 20)
thread = threading.Thread(target=main)
thread.start() |
main.py | import json
import traceback
from json import JSONDecodeError
from spotifyAPI import SpotifyAPI
from soundcloudAPI import SoundcloudAPI
from downloader import Downloader
import os
import threading
import PySimpleGUI as sg
import logging
from sys import exit
LOG_FORMAT = "%(levelname)s %(asctime)s - %(message)s"
logging.basicConfig(filename="LOG.log", level=logging.INFO, format=LOG_FORMAT)
logger = logging.getLogger()
class Main:
def __init__(self):
logging.info("[MAIN] Starting Application")
self.url = "https://open.spotify.com/playlist/3rAF2TSX5OEaizE6o8op34?si=59c270ab42004510"
#self.debug = []
self.show_chrome = False
self.save_debug = False
def loop(self):
logging.info("[MAIN] Starting Loop")
settings = self.load_settings()
output_dir = settings['result_dir']
url = settings['url']
if not os.path.isfile("config.json"):
configdata = dict()
configdata["soundcloud"] = {
"client_id": "lfCwve78235Iw2UNbGezTfUWMB5zHKmb"
}
configdata["spotify"] = {
"client_id": "9556378171da437f969055d0db88f817",
"client_secret": "9939e0ca51314408ac50ae58db7e4d21"
}
with open('config.json', "w") as f:
json.dump(configdata, f, indent=2)
layout = self.get_layout(output_dir, url)
window = sg.Window('Slider Downloader v1.3', layout)
# Main Loop
try:
while True:
event, values = window.read() # read events
# exit button clicked
if event == sg.WINDOW_CLOSED or event == 'Exit':
logging.info("[MAIN] Exiting Application")
exit(0)
# download button clicked
elif event == 'Download':
logging.info("[MAIN] Download Button pressed")
window['-OUTPUT-'].Update('')
# check if output directory is set
if not values['-FOLDER-']:
logging.error("[MAIN] Kein Zielordner angegeben!")
print("[ERR] Keinen Zielordner angegeben!")
# self.debug.append("[ERR] Keinen Zielordner angegeben!")
# check if playlist url is set
elif not values['-LINK-']:
logging.error("[MAIN] Kein Link angegeben!")
print("[ERR] Keinen Playlistlink angegeben!")
# self.debug.append("[ERR] Keinen Playlistlink angegeben!")
# if all tests pass, start download
else:
self.show_chrome = values['-SHOWCHROME-']
output_dir = values['-FOLDER-']
url = values['-LINK-']
logging.info(f"[MAIN] Saving Settings [output_dir: {output_dir}, url: {url}, showchrome: {self.show_chrome}]")
self.save_settings(output_dir, url)
# start download
try:
progress_bar = window['-PROGRESS BAR-']
logging.info("[MAIN] Starting Download Thread")
# start thread
thread = threading.Thread(target=self.run, args=(url, output_dir, progress_bar), daemon=True)
thread.start()
logging.info("[MAIN] Thread started successfully")
#self.run(url, output_dir, progress_bar)
except Exception as e:
logging.error(f"[MAIN] {e}")
print(f"[ERR] {e}")
tb = traceback.format_exc()
#self.debug.append(tb)
logging.error(f"[MAIN] {tb}")
# self.save_settings(self.result_dir, self.url)
# self.progress_bar = window['-PROGRESS BAR-']
# songs = self.get_songs(self.url)
# downloaded_tracks = self.check_dir(self.dl_path)
# song_count = len(songs) - len(downloaded_tracks)
# self.progress_bar.update_bar(1, song_count*2)
except Exception as e:
tb = traceback.format_exc()
#self.debug.append(tb)
logging.error(f"[MAIN] {tb}")
finally:
logging.info("[MAIN] Exiting..")
print("[LOG] Exiting..")
#self.debug.append("[LOG] Exiting..")
window['-OUTPUT-'].__del__()
window.close()
#if self.save_debug:
# self.print_debug()
def run(self, url, output_dir, progress_bar=None):
# set configs through gui
# output_dir = os.path.join(pathlib.Path().resolve(), "music")
# get Tracks
try:
print("[LOG] Getting Tracks")
#self.debug.append("[LOG] Getting Tracks")
logging.info("[MAIN] Getting Tracks")
tracks = self.get_tracks(url)
except Exception as e:
print(f"[ERR] {e}")
tb = traceback.format_exc()
# self.debug.append(tb)
logging.error(f"[MAIN] {e}")
logging.error(f"[MAIN] {tb}")
exit(-1)
else:
if tracks is not None:
logging.info("[MAIN] Getting Playlist Name")
# get playlist name
playlist_name = self.get_playlist_name(tracks, url)
playlist_dir = os.path.join(output_dir, playlist_name)
# make directory for playlist download if doesnt exist already
if not os.path.isdir(playlist_dir):
print(f"[LOG] Creating {playlist_dir}")
#self.debug.append(f"[LOG] Creating {playlist_dir}")
logging.info(f"[MAIN] Creating Playlist Dir {playlist_dir}")
os.mkdir(playlist_dir)
# check for existing tracks if directory exists
else:
tracks = self.filter_existing_tracks(playlist_dir, tracks)
# for track in tracks:
# print(track.print_artists())
# exit(0)
if tracks:
# download Tracks
self.download(tracks, playlist_dir, progress_bar)
# if tracks is empty
else:
print("[ERR] No new Tracks found!")
logging.warning(f"[MAIN] No new Tracks found!")
#self.debug.append("[ERR] No new Tracks found!")
# get name of playlist
def get_playlist_name(self, tracks, url):
if "soundcloud" in url:
logging.info(f"[MAIN] Getting Playlist Name from Soundcloud")
soundcloud = SoundcloudAPI()
# get Name
return soundcloud.get_playlist_name(url)
elif "spotify" in url:
logging.info(f"[MAIN] Getting Playlist Name from Spotify")
spotify = SpotifyAPI()
# get Name
return spotify.get_playlist_name(url)
# save output_dir and url
def save_settings(self, result_dir, url):
logging.info(f"[MAIN] Saving Settings file")
file = "slider_settings.json"
#if os.path.isfile(file):
# os.remove(file)
settings = {'result_dir': result_dir, 'url': url}
with open(file, "w") as f:
json.dump(settings, f)
# load previous output_dir and url
def load_settings(self):
logging.info(f"[MAIN] Load Settings file")
file = "slider_settings.json"
if not os.path.isfile(file):
return {'result_dir': '', 'url': ''}
try:
with open(file, "r") as f:
settings = json.load(f)
except JSONDecodeError as e:
print("[ERR] Error while reading settings! Check file!")
logging.error("[MAIN] Error while reading settings! Check file!")
logging.error(f"[MAIN] Error: {e}")
settings = {'result_dir': '', 'url': ''}
return settings
# get Layout for GUI
def get_layout(self, result_dir=None, url=None):
logging.info(f"[MAIN] Getting Layout")
sg.theme("Dark")
dir_input = [
sg.Text("Zielordner auswรคhlen"),
sg.In(size=(90, 10), enable_events=True, key="-FOLDER-", default_text=result_dir),
sg.FolderBrowse()
]
link_input = [
sg.Text("Playlistlink eingeben "),
sg.In(size=(90, 10), enable_events=True, key="-LINK-", default_text=url),
]
output = [
sg.Output(size=(120, 30), background_color='white', text_color='black', key='-OUTPUT-')
]
sg.SetOptions(progress_meter_color=('green', 'white'))
progress_bar = [
sg.ProgressBar(1000, orientation='h', size=(78, 20), key='-PROGRESS BAR-')
]
buttons = [
sg.Button('Exit'),
sg.Checkbox('I wanna see the magic happening', pad=((450, 0), 0), default=False, key='-SHOWCHROME-'),
sg.Button('Download', pad=((50, 0), 0)),
]
layout = [
[dir_input],
[link_input],
[output],
[progress_bar],
[buttons]
]
return layout
# check for songs in directory to prevent double downloading
def filter_existing_tracks(self, playlist_dir, tracks):
logging.info(f"[MAIN] Filter existing Tracks")
erg = []
for track in tracks:
# if Track does not exist already, save it in erg
if not os.path.isfile(os.path.join(playlist_dir, track.print_filename())):
erg.append(track)
else:
print(f"[LOG] {track.print_filename()} exists already and will not be downloaded.")
logging.info(f"[MAIN] {track.print_filename()} exists already and will not be downloaded.")
#self.debug.append(f"[LOG] {track.print_filename()} exists already and will not be downloaded.")
return erg
# def print_debug(self):
# # os.remove("debug.txt")
# i = 0
# with open("debug.txt", "w") as f:
# for message in self.debug:
# print("[" + str(i) + "] " + message)
# f.write("[" + str(i) + "] " + message + "\n")
# i += 1
def get_tracks(self, url):
# determine if soundcloud or spotify
if "soundcloud" in url:
logging.info(f"[MAIN] Fetching Tracks from Soundcloud")
soundcloud = SoundcloudAPI()
# get Tracks
return soundcloud.get_playlist_tracks(url)
elif "spotify" in url:
logging.info(f"[MAIN] Fetching Tracks from Spotify")
spotify = SpotifyAPI()
# get Tracks
return spotify.get_playlist_tracks(url)
else:
logging.error(f"[MAIN] Wrong Link used {url}")
raise Exception("Soundcloud oder Spotify URL plsss")
# Threading function for downloading
def download(self, tracks, playlist_dir, progress_bar=None):
logging.info(f"[MAIN] Creating Downloader")
downloader = Downloader(playlist_dir, self.show_chrome)
try:
logging.info(f"[MAIN] Trying Download")
not_found, low_bitrate = downloader.download(tracks, progress_bar)
finally:
downloader.tear_down()
#self.debug.extend(download_log)
# try:
# f = open("download_log.txt", "w")
# f.write(*download_log)
# finally:
# f.close()
print("<------------ Download Finished! Fasching Mafensen ------------>")
logging.info("[MAIN] <------------ Download Finished! Fasching Mafensen ------------>")
if not_found:
print("\n<------------------------Songs not found------------------------>")
logging.info("Songs not found:")
for song in not_found:
print(song)
logging.info(song)
print("-----------------------------------------------------------------------")
if low_bitrate:
print("\n<----------------------Low Bitrate---------------------------------->")
logging.info("Songs with low bitrate:")
for song in low_bitrate:
print(song)
logging.info(song)
print("---------------------------------------------------------------------------")
#self.debug.append("<------------ Download Finished! Fasching Mafensen ------------>")
if __name__ == "__main__":
main = Main()
main.loop()
#url = "https://open.spotify.com/playlist/3rAF2TSX5OEaizE6o8op34?si=cd257ac5ed824873"
#output_dir = "/home/jannik/code/SliderDownloader/app/music"
#main.run(url, output_dir)
#print("<------------ Finished! Fasching Mafensen ------------>")
|
crawler1.py | #!/usr/bin/env python
# encoding=utf-8
#codeby ้้ฟไธ้ป
#email ydhcui@suliu.net/QQ664284092
#website http://www.suliu.net
import time
import re
import os
import sys
import queue
import urllib.parse as urlparse
'''
from lib import requests
from core.util import CoroutinePool as ThreadPool
from core.cmsfind import AppFind
from core.log import logging
from core.base import BaseWebSite,ConnectionError
import settings
'''
#APP = AppFind(settings.DATAPATH + '/appdata.json')
class BaseRequest(requests.Request):
'''ๅฎไนไธไธชhttp่ฏทๆฑ็ๅบ็ฑป'''
def __init__(self,*args,**kwargs):
super(BaseRequest).__init__(Request,*args,**kwargs)
def response(self):
return req
def __str__(self):
pass
def __eq__(self,req):
return str(self) == str(req)
def __hash__(self):
return hash(str(self))
class Crawler(object):
HEADBLOCK = ('#','data:','javascript:','mailto:','about:','magnet:')
TYPEBLOCK = ('.SWF','.JPEG','.JPG','.PNG','.GIF','.EXE','.PDF','.ZIP','.RAR','.TAR.GZ','.TAR','.GZ')
CRAWL_EXCLUDE_EXTENSIONS = ("3ds", "3g2", "3gp", "7z", "DS_Store", "a", "aac", "adp", "ai", "aif", "aiff", "apk", "ar", "asf", "au", "avi", "bak", "bin", "bk", "bmp", "btif", "bz2", "cab", "caf", "cgm", "cmx", "cpio", "cr2", "dat", "deb", "djvu", "dll", "dmg", "dmp", "dng", "doc", "docx", "dot", "dotx", "dra", "dsk", "dts", "dtshd", "dvb", "dwg", "dxf", "ear", "ecelp4800", "ecelp7470", "ecelp9600", "egg", "eol", "eot", "epub", "exe", "f4v", "fbs", "fh", "fla", "flac", "fli", "flv", "fpx", "fst", "fvt", "g3", "gif", "gz", "h261", "h263", "h264", "ico", "ief", "image", "img", "ipa", "iso", "jar", "jpeg", "jpg", "jpgv", "jpm", "jxr", "ktx", "lvp", "lz", "lzma", "lzo", "m3u", "m4a", "m4v", "mar", "mdi", "mid", "mj2", "mka", "mkv", "mmr", "mng", "mov", "movie", "mp3", "mp4", "mp4a", "mpeg", "mpg", "mpga", "mxu", "nef", "npx", "o", "oga", "ogg", "ogv", "otf", "pbm", "pcx", "pdf", "pea", "pgm", "pic", "png", "pnm", "ppm", "pps", "ppt", "pptx", "ps", "psd", "pya", "pyc", "pyo", "pyv", "qt", "rar", "ras", "raw", "rgb", "rip", "rlc", "rz", "s3m", "s7z", "scm", "scpt", "sgi", "shar", "sil", "smv", "so", "sub", "swf", "tar", "tbz2", "tga", "tgz", "tif", "tiff", "tlz", "ts", "ttf", "uvh", "uvi", "uvm", "uvp", "uvs", "uvu", "viv", "vob", "war", "wav", "wax", "wbmp", "wdp", "weba", "webm", "webp", "whl", "wm", "wma", "wmv", "wmx", "woff", "woff2", "wvx", "xbm", "xif", "xls", "xlsx", "xlt", "xm", "xpi", "xpm", "xwd", "xz", "z", "zip", "zipx")
def __init__(
self,
url,
headers = {},
threads = 10,
timeout = 60,
sleep = 10,
proxy = {},
session = None,
level = False,
isdomain= True):
if isdomain:
url = '/'.join(url.split('/')[:3])+'/'
else:
url = url
self.basereq = BaseRequest(url,session=session,proxy=proxy,headers=headers)
self.website = BaseWebSite(url)
self.pag404 = self.website.pag404
self.session = self.basereq.session
self.settings = {}
self.settings['threads'] = int(threads)
self.settings['timeout'] = int(timeout)
self.settings['sleep'] = int(sleep)
self.settings['proxy'] = proxy
self.settings['level'] = level
self.basereq.headers.update(headers)
self.settings['headers'] = self.basereq.headers
self.block = []#set()
self.ISSTART = True
self.ReqQueue = queue.Queue()
self.ResQueue = queue.Queue()
self.SubDomain = set() #ๅญๅๅๅ่กจ
self.Directory = {} #็ฎๅฝ็ปๆ
def reqhook(self,req):
'''็จไบ่ฏทๆฑๆถ้ๅhook
x = Crawler(...)
x.reqhook = lambda i: i
x.run()
'''
return req
def addreq(self,req):
if(req.scheme)and(req.netloc)and(req not in self.block):
self.block.append(req)
self.ReqQueue.put(req)
def urljoin(self,url):
if url:
if url.upper().endswith(self.TYPEBLOCK): #ๅป้คๅพ็็ญไบ่ฟๅถๆไปถ
return
elif url.upper().startswith(('//','HTTP')):
if BaseRequest(url).netloc.upper() == self.basereq.netloc.upper(): #ๅๅ
if url.startswith('//'):
url = self.basereq.scheme+':'+url
return url
else:
u = BaseRequest(url)
self.SubDomain.add((u.scheme,u.netloc.replace('//','')))
elif url.startswith('/') or url.startswith(('./','../')):
return urlparse.urljoin(self.basereq.url,url)
elif '://' not in url and not url.startswith(self.HEADBLOCK):
return urlparse.urljoin(self.basereq.url,url)
def request(self,req):
req = self.reqhook(req)
try:
res = req.response()
self.ResQueue.put((req,res))
self.parse(res)
#app ่ฏๅซ
for app in APP.find(res):
self.website.content = app
except ConnectionError:
logging.warn('ConnectionError')
time.sleep(self.settings['sleep'])
except Exception as e:
logging.warn(str(e))
def parse(self,response):
content_type = response.headers.get('content-type','text')
if content_type not in ("image","octet-stream"):
response = response.text
urls = set()
urls = urls.union(set(re.findall(r"""src[\s]*:[\s]*["'](.*?)["']""",response)))
urls = urls.union(set(re.findall(r"""src[\s]*=[\s]*["'](.*?)["']""",response)))
urls = urls.union(set(re.findall(r"""href[\s]*:[\s]*["'](.*?)["']""",response)))
urls = urls.union(set(re.findall(r"""href[\s]*=[\s]*["'](.*?)["']""",response)))
urls = urls.union(set(re.findall(r"""url[\s]*:[\s]*['"](.*?)['"]""",response)))
urls = urls.union(set(re.findall(r"""url[\s]*=[\s]*['"](.*?)['"]""",response)))
urls = urls.union(set(re.findall(r'''['"](/[^/\*'"][A-Za-z0-9\.\\/_-]{1,255})['"]''',response)))
urls = urls.union(set(re.findall(r"""['"]([A-Za-z0-9\.\\/_-]{1,255}[a-zA-Z]\?[a-zA-Z].*?)['"]""",response)))
urls = urls.union(set(re.findall("""(http[s]?://(?:[-a-zA-Z0-9_]+\.)+[a-zA-Z]+(?::\d+)?(?:/[-a-zA-Z0-9_%./]+)*\??[-a-zA-Z0-9_&%=.]*)""",response)))
for url in urls:
if url:
req = BaseRequest(self.urljoin(url),headers=self.settings['headers'],proxy=self.settings['proxy'],session=self.session)
self.addreq(req)
if self.settings['level']:
posts = []
for f in re.findall(r"""<form([\s\S]*?)</form>""",response):
post = {}
post['action'] = ''.join(re.findall(r"""action[\s]*=[\s]*["'](.*?)["']""",f)) or './'
post['method'] = ''.join(re.findall(r"""method[\s]*=[\s]*["'](.*?)["']""",f)) or 'POST'
post['data'] = {}
for d in re.findall(r"""<input[\s\S]*?>""",f):
name = ''.join(re.findall(r"""name[\s]*=[\s]*["'](.*?)["']""",d))
value = ''.join(re.findall(r"""value[\s]*=[\s]*["'](.*?)["']""",d))
if not value:value = name
post['data'].update({name:value})
posts.append(post)
for post in posts:
req = BaseRequest(self.urljoin(post['action']),method=post['method'],data=post['data'],headers=self.settings['headers'],proxy=self.settings['proxy'],session=self.session)
self.addreq(req)
def run1(self):
pool = ThreadPool(self.settings['threads'])
self.FLAG = self.settings['timeout']
try:
self.addreq(self.basereq)
self.parse(self.basereq.response())
except:
self.ISSTART = False
return
#5ๅ้ๅ่ฟๆฒกๆไปปๅกๅ ่ฟๆฅๅฐฑๅฝ็ฌๅฎไบ
while self.ISSTART and self.FLAG > 0:
logging.load('Reload ... Wait for %s'%self.FLAG)
try:
req = self.ReqQueue.get(block=False)
pool.spawn(self.request,req)
except queue.Empty:
time.sleep(1)
self.FLAG -= 1
self.ISSTART = False
pool.join()
if __name__ == '__main__':
import threading
x=Crawler('http://59.41.129.37:8080/',timeout=10,threads=1000,level=True)
x.settings.update(timeout=10,threads=100,proxy={'http':'http://127.0.0.1:1111','https':'http://127.0.0.1:1111'})
threading.Thread(target=x.run1).start()
while x.ISSTART or not x.ResQueue.empty():
try:
q,r = x.ResQueue.get(block=False)
print(r.status_code,q.method,q.url)
except queue.Empty:
pass
|
x32.py | from . import Handler
import hug
import socket
import threading
from collections import Iterable
from pythonosc import udp_client, osc_server, dispatcher
from pythonosc.osc_message_builder import OscMessageBuilder
# The included SimpleUDPClient class does not allow you to build messages
# with no values, which X32 requires. Therefore we add a check in this class
# to pass an empty string as no values. (An empty string cannot be sent in
# an OSC message anyway so it is fine to do this). Other than that, this is
# a rewrite of the SimpleUDPClient class.
class ExtendedUDPClient(udp_client.UDPClient):
def send_message(self, address, value):
builder = OscMessageBuilder(address=address)
if value != '':
if not isinstance(value, Iterable) or isinstance(value, (str, bytes)):
values = [value]
else:
values = value
for val in values:
builder.add_arg(val)
msg = builder.build()
self.send(msg)
class ReuseAddressServer(osc_server.ThreadingOSCUDPServer):
allow_reuse_address = True
class X32Handler(Handler):
def __init__(self):
self.name = 'x32'
super().__init__()
self.client = ExtendedUDPClient(
self.config['console']['ip'],
int(self.config['console']['rx-port'])
)
# Because the X32 returns OSC messages to the port from which they
# were sent, we need to reuse the socket for both the client and
# server. Therefore we manually bind the socket to a port, set the
# socket reuse flags on the socket and UDPServer, and then bind the
# server to the same socket. For explanation, see this SO thread and
# GitHub issue: https://bit.ly/2FmvVap
# https://github.com/attward/python-osc/issues/41
self.client._sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.client._sock.bind(('0.0.0.0', int(self.config['server']['port'])))
client_address, client_port = self.client._sock.getsockname()
self.data = {'ch': {}, 'bus': {}}
def channel_handler(address, *args):
address = address.split('/')
ch = address[2]
if address[4] == 'name' and args:
self.data['ch'][args[0].upper()] = ch
def bus_handler(address, *args):
address = address.split('/')
bus = address[2]
if address[4] == 'name' and args:
self.data['bus'][args[0].upper()] = bus
router = dispatcher.Dispatcher()
router.map('/ch/*', channel_handler)
router.map('/bus/*', bus_handler)
# Listen for OSC packets in a new thread
server = ReuseAddressServer((
self.config['server']['listen-ip'],
client_port),
router
)
server_thread = threading.Thread(target=server.serve_forever)
try:
server_thread.start()
except (KeyboardInterrupt, SystemExit):
server.shutdown()
self.generate_label_dict()
self.x32_subscribe()
def x32_subscribe(self):
threading.Timer(9, self.x32_subscribe).start()
self.send_osc_message('/xremote', '')
def generate_label_dict(self):
for i in range(1, 33):
address = '/ch/{0}/config/name'.format(str(i).zfill(2))
self.send_osc_message(address, '')
for i in range(1, 17):
address = '/bus/{0}/config/name'.format(str(i).zfill(2))
self.send_osc_message(address, '')
def send_osc_message(self, addr, *args):
"""Send an OSC message with the specified address and arguments to the
client as defined in the config"""
self.client.send_message(addr, *args)
def ch_route_label(self, src, dest, switch):
if src.upper() not in self.data['ch']:
return 'No source called '+src
if dest.upper() not in self.data['bus']:
return 'No output called '+dest
ch_n = self.data['ch'][src.upper()]
bus_n = self.data['bus'][dest.upper()]
self.ch_route_number(ch_n, bus_n, switch)
def ch_route_number(self, ch_n, bus_n, switch):
if switch == 'on':
self.send_osc_message('/ch/{0}/mix/{1}/on'.format(ch_n, bus_n), 1)
if switch == 'off':
self.send_osc_message('/ch/{0}/mix/{1}/on'.format(ch_n, bus_n), 0)
handler = X32Handler()
@hug.put('/ch/on/label')
def bus_on(src, dest):
return handler.ch_route_label(src, dest, 'on')
@hug.put('/ch/off/label')
def bus_off(src, dest):
return handler.ch_route_label(src,dest, 'off')
|
kfold-cv-pool-answer.py | import numpy as np
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
from multiprocessing import Pool, Process
from sklearn import svm
import time
test = np.loadtxt("optdigits.txt", delimiter = ",")
X = test[:, 0:64]
y = test[:, 64]
# Plot some of the digits
#fig = plt.figure(figsize=(8, 6))
#fig.tight_layout()
#for i in range(0, 20):
# ax = fig.add_subplot(5, 5, i + 1)
# ax.imshow(X[i].reshape((8,8)), cmap = "Greys", vmin = 0, vmax = 16)
#plt.show()
def cvkfold(X, y, tuning_params, partitions, k):
n_tuning_params = tuning_params.shape[0]
partition = partitions[k]
Train = np.delete(np.arange(0, X.shape[0]), partition)
Test = partition
X_train = X[Train, :]
y_train = y[Train]
X_test = X[Test, :]
y_test = y[Test]
accuracies = np.zeros(n_tuning_params)
for i in range(0, n_tuning_params):
svc = svm.SVC(C = tuning_params[i], kernel = "linear")
accuracies[i] = svc.fit(X_train, y_train).score(X_test, y_test)
return accuracies
K = 5
tuning_params = np.logspace(-6, -1, 10)
partitions = np.array_split(np.random.permutation([i for i in range(0, X.shape[0])]), K)
t1 = time.time()
for k in range(0, K):
Accuracies = cvkfold(X, y, tuning_params, partitions, k)
ts1 = time.time() - t1
print('Serial runs %0.3f seconds.' %ts1)
CV_accuracy = np.mean(Accuracies, axis = 0)
best_tuning_param = tuning_params[np.argmax(CV_accuracy)]
print('Best tuning param %0.6f.'% best_tuning_param)
#using Pool
#using 2 processors
pool = Pool(processes=2)
t1 =time.time()
args = [(X, y, tuning_params, partitions, k) for k in range(0, K)]
Accuracies = np.array(pool.starmap(cvkfold, args))
tp2 = time.time()-t1
print("Pool for 2 processors runs %0.3f seconds. "%tp2)
pool.close()
CV_accuracy = np.mean(Accuracies, axis = 0)
best_tuning_param = tuning_params[np.argmax(CV_accuracy)]
print("Best tuning parmeter is %0.6f." %best_tuning_param)
#for 4 processors
pool = Pool(processes=4)
t1 =time.time()
args = [(X, y, tuning_params, partitions, k) for k in range(0, K)]
Accuracies = np.array(pool.starmap(cvkfold, args))
tp4 = time.time()-t1
print("Pool for 4 processors runs %0.3f seconds. "%tp4)
pool.close()
CV_accuracy = np.mean(Accuracies, axis = 0)
best_tuning_param = tuning_params[np.argmax(CV_accuracy)]
print("Best tuning parmeter is %0.6f." %best_tuning_param)
#for 8 processors
pool = Pool(processes = 8)
t1 =time.time()
args = [(X, y, tuning_params, partitions, k) for k in range(0, K)]
Accuracies = np.array(pool.starmap(cvkfold, args))
tp8 = time.time()-t1
print("Pool for 8 processors runs %0.3f seconds. "%tp8)
pool.close()
CV_accuracy = np.mean(Accuracies, axis = 0)
best_tuning_param = tuning_params[np.argmax(CV_accuracy)]
print("Best tuning parmeter is %0.6f." %best_tuning_param)
#using process
#using 2 processors
p = Pool(processes = 2)
t1=time.time()
args = [(X, y, tuning_params, partitions, k) for k in range(0, K)]
Accuracies= np.array(p.Process(target=cvkfold, args=args))
tpr2 = time.time()-t1
print("Process class for 2 processors runs %0.3f seconds."%tpr2)
CV_accuracy = Accuracies
best_tuning_param = tuning_params[np.argmax(CV_accuracy)]
print("Best tuning parmeter is %0.6f." %best_tuning_param)
#using 4 processors
p = Pool(processes = 4)
t1=time.time()
args = [(X, y, tuning_params, partitions, k) for k in range(0, K)]
Accuracies= np.array(p.Process(target=cvkfold, args=args))
tpr4 = time.time()-t1
print("Process class for 4 processors runs %0.3f seconds."%tpr4)
CV_accuracy = Accuracies
best_tuning_param = tuning_params[np.argmax(CV_accuracy)]
print("Best tuning parmeter is %0.6f." %best_tuning_param)
#using 8 processors
p = Pool(processes = 8)
t1=time.time()
args = [(X, y, tuning_params, partitions, k) for k in range(0, K)]
Accuracies= np.array(p.Process(target=cvkfold, args=args))
tpr8 = time.time()-t1
print("Process class for 8 processors runs %0.3f seconds."%tpr8)
CV_accuracy = Accuracies
best_tuning_param = tuning_params[np.argmax(CV_accuracy)]
print("Best tuning parmeter is %0.6f." %best_tuning_param)
processors = [1,2,4,8]
elapsed_ser = [ts1,ts1,ts1,ts1]
elapsed_Pool = [0,tp2,tp4,tp8]
elapsed_Process= [0,tpr2,tpr4,tpr8]
plt.plot(processors,elapsed_ser, label='Serial')
plt.plot(processors,elapsed_Pool, label='Pool')
plt.plot(processors,elapsed_Process, label='Process')
plt.xlabel('Processors')
plt.ylabel('Elapsed Time')
plt.legend()
plt.savefig('Elapsed_time.png',transparent=True, bbox_inches='tight')
plt.show()
|
parser.py | import sublime, sublime_plugin
from http.server import BaseHTTPRequestHandler, HTTPServer
import json, _thread, threading
import platform, os, sys, time
TESTS_FILE_SUFFIX = ''
totalProblems, problems_parsed, successful_problems = 1, 0, 0
settings, user_settings = None, None
contest_name, contest_dir, working_dir, error, parse_in_view_file, view_file_name, sep = None, None, None, False, False, None, False
problems = []
def reset():
global totalProblems, problems_parsed, successful_problems, contest_name, contest_dir, working_dir, error, parse_in_view_file, view_file_name, sep, problems
totalProblems, problems_parsed, successful_problems = 1, 0, 0
contest_name, contest_dir, working_dir, error, parse_in_view_file, view_file_name, sep = None, None, None, False, False, None, False
problems = []
def plugin_loaded():
update_settings()
settings.add_on_change('extensions_path', update_settings)
def show_msg(msg):
global sep
if not sep:
sep = True
print(5 * '\n' + '------------------------------START------------------------------')
sublime.active_window().run_command('show_panel', {"panel": "console"})
print(msg)
def close_panel():
if not error:
show_msg('closing console...')
time.sleep(5)
sublime.active_window().run_command('hide_panel')
print('closed console')
print('------------------------------END------------------------------')
def GetSettings(key):
global settings, user_settings
if user_settings.get(key) != None:
return user_settings.get(key)
return settings.get(key)
def update_settings():
global settings, user_settings, TESTS_FILE_SUFFIX
settings = sublime.load_settings('CompetitiveProgrammingParser ({os}).sublime-settings'.format(
os={ 'windows': 'Windows', 'linux': 'Linux', 'osx': 'OSX' }[sublime.platform().lower()])
)
user_settings = sublime.load_settings('CompetitiveProgrammingParser.sublime-settings')
if GetSettings('TESTS_FILE_SUFFIX') != None:
TESTS_FILE_SUFFIX = GetSettings('TESTS_FILE_SUFFIX')
else:
raise Exception('TESTS_FILE_SUFFIX not found in settings file')
print("CompetitiveProgrammingParser Settings loaded successfully")
# fetch current and working directories
def fetch_directory(oj, action):
global contest_name, contest_dir, working_dir, error
if contest_dir == None: # implies that command wasn't invoked from the sidebar
key = oj
if GetSettings('use_default_directory'):
key = 'default'
if key not in GetSettings('directory').keys() or GetSettings('directory')[key] == '':
error = True
raise Exception(key + ' directory not set. Please update your CompetitiveProgrammingParser settings')
contest_dir = GetSettings('directory')[key]
if not os.path.exists(contest_dir):
os.mkdir(contest_dir)
working_dir = contest_dir
if action == 'contest':
working_dir = os.path.join(working_dir, contest_name)
try:
if not os.path.exists(working_dir):
os.mkdir(working_dir)
except Exception as e:
error = True
raise Exception(str(e) + '\nPlease update your CompetitiveProgrammingParser settings.')
if GetSettings('open_in_new_window') and action == 'contest':
os.system('subl -n \"' + working_dir + '\"')
else:
os.system('subl -a \"' + working_dir + '\"')
# create file and testcases
def parse_testcases(tests, problem, action):
filename = problem + GetSettings('lang_extension')
if parse_in_view_file:
filename = view_file_name
filename = os.path.join(working_dir, filename)
if not os.path.exists(filename):
file = open(filename, 'w')
file.close()
testcases = []
tests = tests["tests"]
for test in tests:
testcase = {
"test": test["input"],
"correct_answers": [test["output"].strip()]
}
testcases.append(testcase)
with open(filename + TESTS_FILE_SUFFIX, "w") as f:
f.write(json.dumps(testcases))
global successful_problems
successful_problems += 1
def check_page_correctness(action):
global error
if totalProblems > 1 and action != 'contest':
error = True
raise Exception('It seems that you are trying to parse a contest page. Please open a problem page!')
def get_problem_name(tests, oj):
global contest_name
problem = tests["name"]
if contest_name == None:
contest_name = tests["group"].split('-', 1)[-1].strip()
show_msg('Contest: ' + contest_name)
if oj == "CodeChef":
problem = tests["url"].split('/')[-1]
elif oj == "Codeforces" or oj == "Yandex":
problem = tests["name"].split('.')[0]
elif oj == "AtCoder":
problem = tests["name"].split(' ')[0]
problem = problem.replace(" ", "_")
return problem
def handle(tests, action):
global totalProblems, problems_parsed, error
problems_parsed += 1
totalProblems = tests["batch"]['size']
try:
check_page_correctness(action)
except Exception as e:
raise Exception(e)
oj = tests["group"].split('-')[0].strip()
problem = get_problem_name(tests, oj)
cnt = problems.count(problem)
if cnt == 5:
error = True
show_msg('โ Aborting the process. Please check your internet connection and try again')
return
elif cnt > 0:
problems.append(problem)
show_msg('โ Could not parse the next problem.(possibly due to slow internet connection).\nTrying again(' + str(cnt) + ')...')
problems_parsed -= 1
return
else:
problems.append(problem)
if problems_parsed == 1 and action != 'testcase':
try:
fetch_directory(oj, action)
except Exception as e:
raise Exception(e)
show_msg('parsing ' + action + "...")
try:
parse_testcases(tests, problem, action)
show_msg("โ๏ธ Problem " + problem + " (" + str(problems_parsed) + "/" + str(totalProblems) + ")" + " success")
except Exception as e:
show_msg("โ Problem " + problem + " (" + str(problems_parsed) + "/" + str(totalProblems) + ")" + " fail")
def MakeHandlerClass(action):
class HandleRequests(BaseHTTPRequestHandler):
def do_POST(self):
try:
handle(json.loads(self.rfile.read(int(self.headers['Content-Length'])).decode('utf8')), action)
except Exception as e:
show_msg("โ error: " + str(e))
threading.Thread(target=self.server.shutdown, daemon=True).start()
return HandleRequests
class CompetitiveCompanionServer:
def startServer(action):
try:
httpd = HTTPServer(('localhost', 12345), MakeHandlerClass(action))
while problems_parsed < totalProblems and not error:
httpd.serve_forever()
if action == 'contest' and successful_problems > 0:
x = "All" if successful_problems == totalProblems else "Only"
show_msg(x + " (" + str(successful_problems) + "/" + str(totalProblems) + ") Problems of \'" + str(contest_name) + "\'" + ' were parsed successfully')
close_panel()
except Exception as e:
pass
class CompetitiveProgrammingParserFileCommand(sublime_plugin.TextCommand):
def run(self, edit, action):
global error
reset()
try:
if action == 'testcase' and self.view.file_name() == None:
error = True
raise Exception("Can't parse testcases for an untitled file.")
if action == 'testcase':
global parse_in_view_file, view_file_name
parse_in_view_file = True
view_file_name = self.view.file_name()
if GetSettings('lang_extension') == None:
error = True
raise Exception('Language not set. Update your CompetitiveProgrammingParser settings.')
_thread.start_new_thread(CompetitiveCompanionServer.startServer, (action,))
except Exception as e:
show_msg("โ error: " + str(e))
close_panel()
class CompetitiveProgrammingParserSidebarCommand(sublime_plugin.WindowCommand):
def run(self, dirs, action, **kwargs):
reset()
global contest_dir, error
contest_dir = dirs[0]
try:
if GetSettings('lang_extension') == None:
error = True
raise Exception('language extension not set. Update your CompetitiveProgrammingParser settings.')
_thread.start_new_thread(CompetitiveCompanionServer.startServer, (action,))
except Exception as e:
show_msg("โ error: " + str(e))
close_panel()
def is_enabled(self, dirs, action):
return len(dirs) == 1
def is_visible(self, dirs, action):
return len(dirs) == 1 |
loader.py | # Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Detectron data loader. The design is generic and abstracted away from any
details of the minibatch. A minibatch is a dictionary of blob name keys and
their associated numpy (float32 or int32) ndarray values.
Outline of the data loader design:
loader thread\
loader thread \ / GPU 1 enqueue thread -> feed -> EnqueueOp
... -> minibatch queue -> ...
loader thread / \ GPU N enqueue thread -> feed -> EnqueueOp
loader thread/
<---------------------------- CPU -----------------------------|---- GPU ---->
A pool of loader threads construct minibatches that are put onto the shared
minibatch queue. Each GPU has an enqueue thread that pulls a minibatch off the
minibatch queue, feeds the minibatch blobs into the workspace, and then runs
an EnqueueBlobsOp to place the minibatch blobs into the GPU's blobs queue.
During each fprop the first thing the network does is run a DequeueBlobsOp
in order to populate the workspace with the blobs from a queued minibatch.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import deque
from collections import OrderedDict
import logging
import Queue
import signal
import threading
import time
import uuid
import numpy as np
from caffe2.python import core, workspace
from detectron.core.config import cfg
from detectron.roi_data.minibatch import get_minibatch
from detectron.roi_data.minibatch import get_minibatch_blob_names
from detectron.utils.coordinator import coordinated_get
from detectron.utils.coordinator import coordinated_put
from detectron.utils.coordinator import Coordinator
import detectron.utils.c2 as c2_utils
logger = logging.getLogger(__name__)
class RoIDataLoader(object):
"""roi data loader"""
def __init__(
self,
roidb,
num_loaders=4,
minibatch_queue_size=64,
blobs_queue_capacity=8
):
self._roidb = roidb
self._lock = threading.Lock()
self._perm = deque(range(len(self._roidb)))
self._cur = 0 # _perm cursor
# The minibatch queue holds prepared training data in host (CPU) memory
# When training with N > 1 GPUs, each element in the minibatch queue
# is actually a partial minibatch which contributes 1 / N of the
# examples to the overall minibatch
self._minibatch_queue = Queue.Queue(maxsize=minibatch_queue_size)
self._blobs_queue_capacity = blobs_queue_capacity
# Random queue name in case one instantiates multple RoIDataLoaders
self._loader_id = uuid.uuid4()
self._blobs_queue_name = 'roi_blobs_queue_{}'.format(self._loader_id)
# Loader threads construct (partial) minibatches and put them on the
# minibatch queue
self._num_loaders = num_loaders
self._num_gpus = cfg.NUM_GPUS
self.coordinator = Coordinator()
self._output_names = get_minibatch_blob_names()
self._shuffle_roidb_inds()
self.create_threads()
def minibatch_loader_thread(self):
"""Load mini-batches and put them onto the mini-batch queue."""
with self.coordinator.stop_on_exception():
while not self.coordinator.should_stop():
blobs = self.get_next_minibatch()
# Blobs must be queued in the order specified by
# self.get_output_names
ordered_blobs = OrderedDict()
for key in self.get_output_names():
assert blobs[key].dtype in (np.int32, np.float32), \
'Blob {} of dtype {} must have dtype of ' \
'np.int32 or np.float32'.format(key, blobs[key].dtype)
ordered_blobs[key] = blobs[key]
coordinated_put(
self.coordinator, self._minibatch_queue, ordered_blobs
)
logger.info('Stopping mini-batch loading thread')
def enqueue_blobs_thread(self, gpu_id, blob_names):
"""Transfer mini-batches from a mini-batch queue to a BlobsQueue."""
with self.coordinator.stop_on_exception():
while not self.coordinator.should_stop():
if self._minibatch_queue.qsize == 0:
logger.warning('Mini-batch queue is empty')
blobs = coordinated_get(self.coordinator, self._minibatch_queue)
self.enqueue_blobs(gpu_id, blob_names, blobs.values())
logger.debug(
'batch queue size {}'.format(self._minibatch_queue.qsize())
)
logger.info('Stopping enqueue thread')
def get_next_minibatch(self):
"""Return the blobs to be used for the next minibatch. Thread safe."""
valid = False
while not valid:
db_inds = self._get_next_minibatch_inds()
minibatch_db = [self._roidb[i] for i in db_inds]
blobs, valid = get_minibatch(minibatch_db)
return blobs
def _shuffle_roidb_inds(self):
"""Randomly permute the training roidb. Not thread safe."""
if cfg.TRAIN.ASPECT_GROUPING:
widths = np.array([r['width'] for r in self._roidb])
heights = np.array([r['height'] for r in self._roidb])
horz = (widths >= heights)
vert = np.logical_not(horz)
horz_inds = np.where(horz)[0]
vert_inds = np.where(vert)[0]
horz_inds = np.random.permutation(horz_inds)
vert_inds = np.random.permutation(vert_inds)
mb = cfg.TRAIN.IMS_PER_BATCH
horz_inds = horz_inds[:(len(horz_inds) // mb) * mb]
vert_inds = vert_inds[:(len(vert_inds) // mb) * mb]
inds = np.hstack((horz_inds, vert_inds))
inds = np.reshape(inds, (-1, mb))
row_perm = np.random.permutation(np.arange(inds.shape[0]))
inds = np.reshape(inds[row_perm, :], (-1, ))
self._perm = inds
else:
self._perm = np.random.permutation(np.arange(len(self._roidb)))
self._perm = deque(self._perm)
self._cur = 0
def _get_next_minibatch_inds(self):
"""Return the roidb indices for the next minibatch. Thread safe."""
with self._lock:
# We use a deque and always take the *first* IMS_PER_BATCH items
# followed by *rotating* the deque so that we see fresh items
# each time. If the length of _perm is not divisible by
# IMS_PER_BATCH, then we end up wrapping around the permutation.
db_inds = [self._perm[i] for i in range(cfg.TRAIN.IMS_PER_BATCH)]
self._perm.rotate(-cfg.TRAIN.IMS_PER_BATCH)
self._cur += cfg.TRAIN.IMS_PER_BATCH
if self._cur >= len(self._perm):
self._shuffle_roidb_inds()
return db_inds
def get_output_names(self):
return self._output_names
def enqueue_blobs(self, gpu_id, blob_names, blobs):
"""Put a mini-batch on a BlobsQueue."""
assert len(blob_names) == len(blobs)
t = time.time()
dev = c2_utils.CudaDevice(gpu_id)
if gpu_id < 0:
queue_name = self._blobs_queue_name
blob_names = blob_names
else:
queue_name = 'gpu_{}/{}'.format(gpu_id, self._blobs_queue_name)
blob_names = ['gpu_{}/{}'.format(gpu_id, b) for b in blob_names]
for (blob_name, blob) in zip(blob_names, blobs):
workspace.FeedBlob(blob_name, blob, device_option=dev)
logger.debug(
'enqueue_blobs {}: workspace.FeedBlob: {}'.
format(gpu_id, time.time() - t)
)
t = time.time()
op = core.CreateOperator(
'SafeEnqueueBlobs', [queue_name] + blob_names,
blob_names + [queue_name + '_enqueue_status'],
device_option=dev
)
workspace.RunOperatorOnce(op)
logger.debug(
'enqueue_blobs {}: workspace.RunOperatorOnce: {}'.
format(gpu_id, time.time() - t)
)
def create_threads(self):
"""
Create mini-batch loader threads, each of which builds mini-batches
and places them into a queue in CPU memory
"""
self._workers = [
threading.Thread(target=self.minibatch_loader_thread)
for _ in range(self._num_loaders)
]
# Create one BlobsQueue per GPU
# (enqueue_blob_names are unscoped)
enqueue_blob_names = self.create_blobs_queues()
# Create one enqueuer thread per GPU
self._enqueuers = [
threading.Thread(
target=self.enqueue_blobs_thread,
args=(gpu_id, enqueue_blob_names)
) for gpu_id in range(self._num_gpus)
]
def start(self, prefill=False):
"""start function"""
for w in self._workers + self._enqueuers:
w.start()
if prefill:
logger.info('Pre-filling mini-batch queue...')
while not self._minibatch_queue.full():
logger.info(
' [{:d}/{:d}]'.format(
self._minibatch_queue.qsize(),
self._minibatch_queue.maxsize
)
)
time.sleep(0.1)
# Detect failure and shutdown
if self.coordinator.should_stop():
self.shutdown()
break
def shutdown(self):
"""shutdown function"""
self.coordinator.request_stop()
self.coordinator.wait_for_stop()
self.close_blobs_queues()
for w in self._workers + self._enqueuers:
w.join()
def create_blobs_queues(self):
"""Create one BlobsQueue for each GPU to hold mini-batches."""
for gpu_id in range(self._num_gpus):
with c2_utils.GpuNameScope(gpu_id):
workspace.RunOperatorOnce(
core.CreateOperator(
'CreateBlobsQueue', [], [self._blobs_queue_name],
num_blobs=len(self.get_output_names()),
capacity=self._blobs_queue_capacity
)
)
if self._num_gpus == 0:
workspace.RunOperatorOnce(
core.CreateOperator(
'CreateBlobsQueue', [], [self._blobs_queue_name],
num_blobs=len(self.get_output_names()),
capacity=self._blobs_queue_capacity
)
)
return self.create_enqueue_blobs()
def close_blobs_queues(self):
"""Close a BlobsQueue."""
for gpu_id in range(self._num_gpus):
with core.NameScope('gpu_{}'.format(gpu_id)):
workspace.RunOperatorOnce(
core.CreateOperator(
'CloseBlobsQueue', [self._blobs_queue_name], []
)
)
if self._num_gpus == 0:
workspace.RunOperatorOnce(
core.CreateOperator(
'CloseBlobsQueue', [self._blobs_queue_name], []
)
)
def create_enqueue_blobs(self):
"""create enqueue blobs"""
blob_names = self.get_output_names()
enqueue_blob_names = [
'{}_enqueue_{}'.format(b, self._loader_id) for b in blob_names
]
for gpu_id in range(self._num_gpus):
with c2_utils.NamedCudaScope(gpu_id):
for blob in enqueue_blob_names:
workspace.CreateBlob(core.ScopedName(blob))
if self._num_gpus == 0:
for blob in enqueue_blob_names:
workspace.CreateBlob(core.ScopedName(blob))
return enqueue_blob_names
def register_sigint_handler(self):
def signal_handler(signal, frame):
logger.info(
'SIGINT: Shutting down RoIDataLoader threads and exiting...'
)
self.shutdown()
signal.signal(signal.SIGINT, signal_handler)
|
traffic_util.py | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import copy
import ctypes
import ipaddress
import iperf3
import os
import pyroute2
import socket
import threading
import shlex
import subprocess
import s1ap_types
from util.traffic_messages import (
TrafficTestInstance,
TrafficRequest,
TrafficRequestType,
TrafficResponseType,
TrafficMessage,
)
# Tests shouldn't take longer than a few minutes
TRAFFIC_TEST_TIMEOUT_SEC = 180
"""
Using TrafficUtil
=================
TrafficUtil is designed to have one main entry point: generate_traffic_test.
This function sets up the necessary legwork to configuring the trfgen framework
in the S1AP tester and generating a TrafficTest object that represents the
configurations and constraints of the traffic that is to be generated.
Once generated, the TrafficTest object can be run -- either directly with the
start() function or as a context, using the `with' keyword. The wait() function
gives the tester the option to wait on the test completing before continuing.
Essentially, TrafficUtil is just a bridge for packaging together the parameters
of a given test. Once packaged, the actual testing is done via the TrafficTest
API.
"""
class TrafficUtil(object):
""" Utility wrapper for tests requiring traffic generation """
# Trfgen library setup
_trfgen_lib_name = "libtrfgen.so"
_trfgen_tests = ()
# Traffic setup
_remote_ip = ipaddress.IPv4Address("192.168.129.42")
def __init__(self):
""" Initialize the trfgen library and its callbacks """
# _test_lib is the private variable containing the ctypes reference to
# the trfgen library.
self._test_lib = None
self._init_lib()
# _config_test is the private variable containing the ctypes reference
# to the trfgen_configure_test() function in trfgen. This function is
# called to inform the S1AP tester of the parameters of a test suite,
# and is used to pass along configuration options to the tester.
self._config_test = None
self._setup_configure_test()
# _start_test is the private variable containing the ctypes reference
# to the trfgen_start_test() function in trfgen. This function is
# called to begin a single trfgen instance on a given address, using
# the predefined configuration options set with configure_test().
self._start_test = None
self._setup_start_test()
# We collect references to the data we pass into ctypes to prevent
# Python's garbage collection system from coming in and cleaning up the
# memory used, which can result in unspecified behavior.
self._data = ()
# Configuration for triggering shell commands in TRF server VM
self._cmd_data = {
"user": "vagrant",
"host": "192.168.60.144",
"password": "vagrant",
"command": "test",
}
self._command = (
"sshpass -p {password} ssh "
"-o UserKnownHostsFile=/dev/null "
"-o StrictHostKeyChecking=no "
"{user}@{host} {command}"
)
def exec_command(self, command):
"""
Run a command remotely on magma_trfserver VM.
Args:
command: command (str) to be executed on remote host
e.g. 'sed -i \'s/str1/str2/g\' /usr/local/bin/traffic_server.py'
"""
data = self._cmd_data
data["command"] = '"' + command + '"'
param_list = shlex.split(self._command.format(**data))
return subprocess.call(
param_list,
shell=False,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
def update_dl_route(self, ue_ip_block):
""" Update downlink route in TRF server """
ret_code = self.exec_command(
"sudo ip route flush via 192.168.129.1 && sudo ip route "
"add " + ue_ip_block + " via 192.168.129.1 dev eth2"
)
if ret_code != 0:
return False
return True
def _init_lib(self):
""" Initialize the trfgen library by loading in binary compiled from C
"""
lib_path = os.environ["S1AP_TESTER_ROOT"]
lib = os.path.join(lib_path, "bin", TrafficUtil._trfgen_lib_name)
os.chdir(lib_path)
self._test_lib = ctypes.cdll.LoadLibrary(lib)
self._test_lib.trfgen_init()
def _setup_configure_test(self):
""" Set up the call to trfgen_configure_test
The function prototype is:
void trfgen_configure_test(int test_id, struct_test test_parms)
This function call caches the test configurations specified in the
struct to be called upon and run from the S1AP tester binary.
"""
self._config_test = self._test_lib.trfgen_configure_test
self._config_test.restype = None
self._config_test.argtypes = (ctypes.c_int32, s1ap_types.struct_test)
def _setup_start_test(self):
""" Set up the call to trfgen_start_test
The function prototype is:
void trfgen_start_test(
int test_id, char *host_ip, char *bind_ip, char *host_port)
This function provides a configuration ID and bind address to the S1AP
tester for it to start a trfgen test. This function returns practically
immediately, as the iperf3 process is called on a separate fork.
"""
self._start_test = self._test_lib.trfgen_start_test
self._start_test.restype = None
self._start_test.argtypes = (
ctypes.c_int,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_char_p,
)
def cleanup(self):
""" Cleanup the dll loaded explicitly so the next run doesn't reuse the
same globals as ctypes LoadLibrary uses dlopen under the covers """
# self._test_lib.dlclose(self._test_lib._handle)
self._test_lib = None
self._data = None
def configure_test(self, is_uplink, duration, is_udp):
""" Returns the test configuration index for the configurations
provided. This is the index that is in the trfgen internal state. If a
configuration is new, will attempt to create a new one in trfgen
Args:
is_uplink (bool): uplink if True, downlink if False
duration (int): test duration, in seconds
is_udp (bool): use UDP if True, TCP if False
Returns: an int, the index of the test configuration in trfgen, a.k.a.
the test_id
Raises MemoryError if return test index would exceed
s1ap_types.MAX_TEST_CFG
"""
test = s1ap_types.struct_test()
test.trfgen_type = (
s1ap_types.trfgen_type.CLIENT.value
if is_uplink
else s1ap_types.trfgen_type.SERVER.value
)
test.traffic_type = (
s1ap_types.trf_type.UDP.value
if is_udp
else s1ap_types.trf_type.TCP.value
)
test.duration = duration
test.server_timeout = duration
# First we see if this test has already been configured. If so, just
# reuse that configuration
for t in self._trfgen_tests:
if (
t.trfgen_type == test.trfgen_type
and t.traffic_type == test.traffic_type
and t.duration == test.duration
and t.server_timeout == test.server_timeout
):
return t.test_id
# Otherwise, we just create the new test
if s1ap_types.MAX_TEST_CFG >= len(self._trfgen_tests):
test.test_id = len(self._trfgen_tests)
self._trfgen_tests += (test,)
self._config_test(test.test_id, test)
return test.test_id
# If we get here, then we've reached the limit on the number of tests
# that we can configure, so send an error. Eventually, come up with an
# eviction scheme
raise MemoryError(
"Reached limit on number of configurable tests: %d"
% s1ap_types.MAX_TEST_CFG
)
def generate_traffic_test(
self, ips, is_uplink=False, duration=120, is_udp=False
):
""" Creates a TrafficTest object for the given UE IPs and test type
Args:
ips (list(ipaddress.ip_address)): the IP addresses of the UEs to
which to connect
is_uplink (bool): whether to do an uplink test. Defaults to False
duration (int): duration, in seconds, of the test. Defaults to 120
is_udp (bool): whether to use UDP. If False, uses TCP. Defaults to
False
Returns: a TrafficTest object, which is used to interact with the
trfgen test
"""
test_id = self.configure_test(is_uplink, duration, is_udp)
instances = tuple(
TrafficTestInstance(is_uplink, is_udp, duration, ip, 0)
for ip in ips
)
return TrafficTest(self._start_test, instances, (test_id,) * len(ips))
class TrafficTest(object):
''' Class for representing a trfgen test with which to interact
This is the class that directly interacts with the TrafficTestServer via a
socketed connection, when the test starts (i.e. the "client" for the
"server").
'''
_alias_counter = 0
_alias_lock = threading.Lock()
_iproute = pyroute2.IPRoute()
_net_iface = 'eth2'
_port = 7000
_port_lock = threading.Lock()
# Remote iperf3 superserver (IP, port) tuple. Port 62462 is chosen because
# 'MAGMA' translates to 62462 on a 12-key phone pad
_remote_server = ('192.168.60.144', 62462)
def __init__(self, test_runner, instances, test_ids):
''' Creates a new TrafficTest object for running the test instance(s)
with the associated test_ids
Ports will be assigned when the test is run by communicating with the
test server responsible for iperf3 test servers
Args:
test_runner: the ctypes hook into the traffic gen trfgen_start_test
function
instances (list(TrafficTestInstance)): the instances to run
test_ids (list(int)): the associated trfgen test configuration
indices; must be the same length as instances
'''
assert len(instances) is len(test_ids)
self._done = threading.Event()
self._instances = tuple(instances)
self._results = None # Cached list(iperf3.TestResult) objects
self._runner = test_runner
self._test_ids = tuple(test_ids)
self._test_lock = threading.RLock() # Provide mutex between tests
def __enter__(self):
''' Starts execution of the test '''
self.start()
return self
def __exit__(self, *_):
''' Waits for test to end '''
self.wait()
@staticmethod
def _get_port():
''' Returns the next port for testing '''
with TrafficTest._port_lock:
TrafficTest._port += 1
return TrafficTest._port
@staticmethod
def _iface_up(ip):
''' Brings up an iface for the given IP
Args:
ip (ipaddress.ip_address): the IP address to use for bringing up
the iface
Returns the iface name with alias that was brought up
'''
# Generate a unique alias
with TrafficTest._alias_lock:
TrafficTest._alias_counter += 1
net_iface = TrafficTest._net_iface
alias = TrafficTest._alias_counter
net_alias = '%s:UE%d' % (net_iface, alias)
# Bring up the iface alias
net_iface_index = TrafficTest._iproute.link_lookup(
ifname=TrafficTest._net_iface)[0]
TrafficTest._iproute.addr(
'add', index=net_iface_index, label=net_alias, address=ip.exploded)
return net_alias
@staticmethod
def _network_from_ip(ip, mask_len):
''' Returns the ipaddress.ip_network with the given mask that contains
the given IP address
Args:
ip (ipaddress.ip_address): the IP address for which we want to find
the network
mask_len (int): the number of bits to mask
Returns an ipaddress.ip_network; works agnostic to IPv4 or IPv6
'''
# Convert to int to make bit shifting easier
ip_int = int.from_bytes(ip.packed, 'big') # Packed is big-endian
ip_masked = ipaddress.ip_address(ip_int >> mask_len << mask_len)
# Compute the appropriate prefix length
prefix_len = ip.max_prefixlen - mask_len
return ipaddress.ip_network('%s/%d' % (ip_masked.exploded, prefix_len))
def _run(self):
''' Run the traffic test
Sets up traffic test with remote traffic server and local ifaces, then
runs the runner hook into the trfgen binary and collects the results to
cache
Will block until the test ends
'''
# Create a snapshot of the test's states, in case they get changed or
# wiped in a later operation. Basically, render tests immune to later
# operations after the test has started.
with self._test_lock:
instances = copy.deepcopy(self._instances)
test_ids = copy.deepcopy(self._test_ids)
try:
# Set up sockets and associated streams
sc = socket.create_connection(self._remote_server)
sc_in = sc.makefile('rb')
sc_out = sc.makefile('wb')
# Set up network ifaces and get UL port assignments for DL
aliases = ()
for instance in instances:
aliases += (TrafficTest._iface_up(instance.ip),)
if not instance.is_uplink:
# Assign a local port for the downlink UE server
instance.port = TrafficTest._get_port()
# Create and send TEST message
msg = TrafficRequest(
TrafficRequestType.TEST, payload=instances)
msg.send(sc_out)
# Receive SERVER message and update test instances
msg = TrafficMessage.recv(sc_in)
assert msg.message is TrafficResponseType.SERVER
r_id = msg.id # Remote server test identifier
server_instances = msg.payload # (TrafficServerInstance, ...)
# Locally keep references to arguments passed into trfgen
args = [None] * len(instances)
# Post-SERVER, pre-START logic
for i in range(len(instances)):
instance = instances[i]
server_instance = server_instances[i]
# Add ip network route
net_iface_index = TrafficTest._iproute.link_lookup(
ifname=TrafficTest._net_iface)[0]
server_instance_network = \
TrafficTest._network_from_ip(server_instance.ip, 8)
TrafficTest._iproute.route(
'replace', dst=server_instance_network.exploded,
iif=net_iface_index, oif=net_iface_index, scope='link')
# Add arp table entry
os.system('/usr/sbin/arp -s %s %s' % (
server_instance.ip.exploded, server_instance.mac))
if instance.is_uplink:
# Port should be the port of the remote for uplink
instance.port = server_instance.port
else:
args[i] = self._run_test(
test_ids[i], server_instance.ip, instance.ip,
instance.port)
# Send START for the given r_id
msg = TrafficRequest(
TrafficRequestType.START, identifier=r_id)
msg.send(sc_out)
# Wait for STARTED response
msg = TrafficMessage.recv(sc_in)
assert msg.message is TrafficResponseType.STARTED
assert msg.id == r_id
# Post-STARTED, pre-RESULTS logic
for i in range(len(instances)):
instance = instances[i]
if instance.is_uplink:
args[i] = self._run_test(
test_ids[i], server_instances[i].ip, instance.ip,
server_instances[i].port)
# Wait for RESULTS message
msg = TrafficMessage.recv(sc_in)
assert msg.message is TrafficResponseType.RESULTS
assert msg.id == r_id
results = msg.payload
# Signal to end connection
msg = TrafficRequest(TrafficRequestType.EXIT)
msg.send(sc_out)
# Close out network ifaces
net_iface_index = TrafficTest._iproute.link_lookup(
ifname=TrafficTest._net_iface)[0]
# For some reason the first call to flush this address flushes all
# the addresses brought up during testing. But subsequent flushes
# do nothing if the address doesn't exist
for i in range(len(instances)):
TrafficTest._iproute.flush_addr(index=net_iface_index,
address=instances[i].ip.exploded)
# Do socket cleanup
sc_in.close()
sc_out.close()
sc.shutdown(socket.SHUT_RDWR) # Ensures safe socket closure
sc.close()
# Cache results after cleanup
with self._test_lock:
self._results = results
finally:
# Signal that we're done
self._done.set()
def _run_test(self, test_id, host_ip, ue_ip, port):
''' Run the test at the given index by calling the test runner on the
test parameters for the instance at the given index and port
Args:
test_id (int): the trfgen configuration index to use
host_ip (ipaddress.ip_address): the remote iperf3 server's IP
address [-c, for uplink]
ue_ip (ipaddress.ip_address): the local UE's IP address to which to
bind [-B]
port (int): the UE's port (downlink) or the remote server's port
(uplink) [-p]
Returns the raw arguments passed into the trfgen binary, for the caller
to keep track of and avoid garbage collection
'''
args = (test_id, host_ip.exploded.encode(), ue_ip.exploded.encode(),
str(port).encode())
self._runner(*args)
return args
@staticmethod
def combine(test, *tests):
''' Combines TrafficTest objects to produce a single test object that
will run the parameters given in the tests all at the same time
All tests in the argument will become unrunnable, as their instances
will be stripped!
Args:
test (TrafficTest): a test, included to force at least one test to
be passed as an argument
tests (list(TrafficTest)): any remaining tests to combine
Return a single TrafficTest that will run all the instances together
'''
runner = test._runner
tests = (test,) + tests
instances = ()
test_ids = ()
for test in tests:
with test._test_lock:
instances += test._instances
test_ids += test._test_ids
# Now disable the test from later runs
test._instances = ()
test._test_ids = ()
# Create and return the new test
return TrafficTest(runner, instances, test_ids)
@property
def results(self):
return self._results
def start(self):
''' Start this test by spinning off runner thread '''
self._done.clear()
threading.Thread(target=self._run).start()
def verify(self):
''' Verify the results of this test
Raises a RuntimeError if any tests returned with an error message
'''
self.wait()
with self._test_lock:
if not isinstance(self.results, tuple):
raise RuntimeError(
'Cached results object is not a tuple : {}'.format(
self.results),
)
for result in self.results:
if not isinstance(result, iperf3.TestResult):
raise RuntimeError(
'Cached results are not iperf3.TestResult objects')
if result.error:
# iPerf dumps out-of-order packet information on stderr,
# ignore these while verifying the test results
if "OUT OF ORDER" not in result.error:
raise RuntimeError(result.error)
def wait(self):
''' Wait for this test to complete '''
self._done.wait(timeout=TRAFFIC_TEST_TIMEOUT_SEC)
|
run_experiments.py | #!/usr/bin/env python3
#
# Copyright (C)ย ย 2022ย ย Intel Corporation.ย
#
# This software and the related documents are Intel copyrighted materials, and your use of them is governed by the express license under which they were provided to you ("License"). Unless the License provides otherwise, you may not use, modify, copy, publish, distribute, disclose or transmit this software or the related documents without Intel's prior written permission.
# This software and the related documents are provided as is, with no express or implied warranties, other than those that are expressly stated in the License.
#
# SPDX-License-Identifier: MIT
import re
import os
import pwd
import sys
import subprocess
import time
import argparse
import tempfile
import glob
from timeit import default_timer as timer
from datetime import timedelta
from threading import Thread, Lock
import threading, queue
import multiprocessing
FUZZ_SH_PATH = os.path.expandvars("$BKC_ROOT/bkc/kafl/fuzz.sh")
DEFAULT_TIMEOUT_HOURS=2
DEFAULT_COV_TIMEOUT_HOURS=2
REPEATS=1
SEEDS_DIR = os.path.expanduser("~/seeds/harnesses/")
#KAFL_EXTRA_FLAGS="-t 8 --t-soft 3 -tc --trace --log-crashes --kickstart 16"
KAFL_EXTRA_FLAGS="--trace --log-crashes"
HARNESS_PREFIX="CONFIG_TDX_FUZZ_HARNESS_"
KCFLAGS = "-fno-ipa-sra -fno-ipa-cp-clone -fno-ipa-cp"
#HARNESSES = ["DOINITCALLS_LEVEL_3", "DOINITCALLS_LEVEL_4", "DOINITCALLS_LEVEL_5", "DOINITCALLS_LEVEL_6", "DOINITCALLS_LEVEL_7", "CONFIG_TDX_FUZZ_HARNESS_POST_TRAP", "CONFIG_TDX_FUZZ_HARNESS_EARLYBOOT", "CONFIG_TDX_FUZZ_HARNESS_DOINITCALLS_PCI", "CONFIG_TDX_FUZZ_HARNESS_DOINITCALLS_VIRTIO", "CONFIG_TDX_FUZZ_HARNESS_DOINITCALLS_ACPI", "CONFIG_TDX_FUZZ_HARNESS_FULL_BOOT", "CONFIG_TDX_FUZZ_HARNESS_REST_INIT", "CONFIG_TDX_FUZZ_HARNESS_VIRTIO_BLK_PROBE", "BPH_VIRTIO_CONSOLE_INIT", "BPH_EARLY_PCI_SERIAL", "CONFIG_TDX_FUZZ_HARNESS_START_KERNEL", "CONFIG_TDX_FUZZ_HARNESS_DO_BASIC", "CONFIG_TDX_FUZZ_HARNESS_ACPI_EARLY_INIT"]
HARNESSES = [
"DOINITCALLS_LEVEL_3",
"DOINITCALLS_LEVEL_4",
"DOINITCALLS_LEVEL_5",
"DOINITCALLS_LEVEL_6",
"DOINITCALLS_LEVEL_7",
"CONFIG_TDX_FUZZ_HARNESS_POST_TRAP",
"CONFIG_TDX_FUZZ_HARNESS_EARLYBOOT",
"CONFIG_TDX_FUZZ_HARNESS_DOINITCALLS_PCI",
"CONFIG_TDX_FUZZ_HARNESS_DOINITCALLS_VIRTIO",
"CONFIG_TDX_FUZZ_HARNESS_DOINITCALLS_ACPI",
"CONFIG_TDX_FUZZ_HARNESS_FULL_BOOT",
"CONFIG_TDX_FUZZ_HARNESS_REST_INIT",
"CONFIG_TDX_FUZZ_HARNESS_VIRTIO_BLK_PROBE",
"CONFIG_TDX_FUZZ_HARNESS_START_KERNEL",
"CONFIG_TDX_FUZZ_HARNESS_DO_BASIC",
"CONFIG_TDX_FUZZ_HARNESS_ACPI_EARLY_INIT"]
#HARNESSES = ["DOINITCALLS_LEVEL_4"]
BPH_HARNESSES = [
"BPH_ACPI_INIT",
"BPH_VP_MODERN_PROBE",
"BPH_VIRTIO_CONSOLE_INIT",
"BPH_P9_VIRTIO_PROBE",
"BPH_PCI_SUBSYS_INIT",
"BPH_HANDLE_CONTROL_MESSAGE",
"BPH_VIRTIO_PCI_PROBE",
"BPH_PCIBIOS_FIXUP_IRQS"]
HARNESSES = HARNESSES + BPH_HARNESSES
HARNESS_TIMEOUT_OVERRIDES = {
"FULL_BOOT": 24,
"DOINITCALLS_LEVEL_6": 24,
"DOINITCALLS_LEVEL_4": 24,
"DO_BASIC": 24,
}
# Harnesses that run FULL_BOOT with extra kernel boot params
BOOT_PARAM_HARNESSES = {
"BPH_ACPI_INIT": "fuzzing_func_harness=acpi_init",
"BPH_VP_MODERN_PROBE": "fuzzing_func_harness=vp_modern_probe fuzzing_disallow=virtio_pci_find_capability",
"BPH_VIRTIO_CONSOLE_INIT": "fuzzing_func_harness=init",
"BPH_VIRTIO_PCI_PROBE": "fuzzing_func_harness=virtio_pci_probe",
"BPH_P9_VIRTIO_PROBE": "fuzzing_func_harness=p9_virtio_probe",
"BPH_PCI_SUBSYS_INIT": "fuzzing_func_harness=pci_subsys_init",
# TODO: kprobes not avail, do manual harness
# "BPH_EARLY_PCI_SERIAL": "fuzzing_func_harness=setup_early_printk earlyprintk=pciserial,force,00:18.1,115200",
"BPH_PCIBIOS_FIXUP_IRQS": "fuzzing_func_harness=pcibios_fixup_irqs acpi=noirq",
"BPH_HANDLE_CONTROL_MESSAGE": "fuzzing_func_harness=handle_control_message fuzzing_disallow=virtio_pci_find_capability,pci_read_config_dword",
#"FULL_BOOT": "tsc_early_khz=2600",
}
KAFL_PARAM_HARNESSES = {
"FULL_BOOT": "-t 8 -ts 3"
}
DISABLE_HARNESSES = []
command_log = []
"""
# SET these in .config.tmpl
default_config_options = {"CONFIG_TDX_FUZZ_KAFL_DETERMINISTIC": "y",
"CONFIG_TDX_FUZZ_KAFL_DISABLE_CPUID_FUZZ": "y",
"CONFIG_TDX_FUZZ_KAFL_SKIP_IOAPIC_READS": "n",
"CONFIG_TDX_FUZZ_KAFL_SKIP_ACPI_PIO": "n",
"CONFIG_TDX_FUZZ_KAFL_SKIP_RNG_SEEDING": "y",
"CONFIG_TDX_FUZZ_KAFL_SKIP_MSR": "n",
"CONFIG_TDX_FUZZ_KAFL_SKIP_PARAVIRT_REWRITE": "n",
}
"""
harness_config_options = {
"CONFIG_TDX_FUZZ_HARNESS_EARLYBOOT": {"CONFIG_TDX_FUZZ_KAFL_SKIP_PARAVIRT_REWRITE": "n"},
"CONFIG_TDX_FUZZ_HARNESS_DOINITCALLS": {"CONFIG_TDX_FUZZ_KAFL_SKIP_IOAPIC_READS": "y", "CONFIG_TDX_FUZZ_KAFL_SKIP_ACPI_PIO": "y"},
"CONFIG_TDX_FUZZ_HARNESS_FULL_BOOT": {"CONFIG_TDX_FUZZ_KAFL_SKIP_PARAVIRT_REWRITE": "y"},
"CONFIG_TDX_FUZZ_HARNESS_POST_TRAP": {"CONFIG_TDX_FUZZ_KAFL_SKIP_ACPI_PIO": "y", "CONFIG_TDX_FUZZ_KAFL_SKIP_PARAVIRT_REWRITE": "y"},
"DOINITCALLS_LEVEL_7": {"CONFIG_TDX_FUZZ_KAFL_VIRTIO": "y"},
"DOINITCALLS_LEVEL_6": {"CONFIG_TDX_FUZZ_KAFL_VIRTIO": "y"},
"CONFIG_TDX_FUZZ_HARNESS_DOINITCALLS_VIRTIO": {"CONFIG_TDX_FUZZ_KAFL_VIRTIO": "y"},
"BPH_VIRTIO_CONSOLE_INIT": {"CONFIG_TDX_FUZZ_KAFL_VIRTIO": "y"},
"CONFIG_TDX_FUZZ_HARNESS_DOINITCALLS_PCI": {"CONFIG_TDX_FUZZ_KAFL_SKIP_ACPI_PIO": "y"},
"CONFIG_TDX_FUZZ_HARNESS_DOINITCALLS_VIRTIO": {"CONFIG_TDX_FUZZ_KAFL_VIRTIO": "y"},
"CONFIG_TDX_FUZZ_HARNESS_START_KERNEL": {"CONFIG_TDX_FUZZ_KAFL_SKIP_ACPI_PIO": "y"},
}
config_options_dependencies = {}
kernel_build_mutex = Lock()
q = queue.Queue()
"""
Strips the CONFIG_TDX_HARNESS_ part from the harness name
"""
def normalize_harness_name(s):
return s[len(HARNESS_PREFIX):] if s.startswith(HARNESS_PREFIX) else s
def linux_conf_harness_name(s):
return HARNESS_PREFIX + normalize_harness_name(s)
def name_to_harness(s):
s = s.split("-")[0] # Remove -tmpXXX
if s.startswith("BPH_"):
return s
elif s.startswith("DOINITCALLS_LEVEL_"):
return s
return HARNESS_PREFIX + s
def get_work_parallelism():
with open(FUZZ_SH_PATH, "r") as fh:
d = fh.read()
matches = re.finditer("KAFL_FULL_OPTS=.*-p\s*(\d+).*", d)
for m in matches:
return int(m.group(1))
def parse_linux_config(fname):
return HARNESSES
"""
harnesses = []
with open(fname, "r") as fh:
config_data = fh.read()
harness_re = re.finditer("CONFIG_TDX_FUZZ_HARNESS_[^=\s]+", config_data)
for m in harness_re:
harness = m.group(0)
if harness in DISABLE_HARNESSES:
continue
harnesses.append(harness)
return harnesses
"""
def generate_setups(harnesses):
setups = set()
for harness in harnesses:
req_conf = ((harness, "y"),)
harness_options = harness_config_options.get(harness, None)
if harness_options:
req_conf = req_conf + tuple(harness_options.items())
if harness.startswith("DOINITCALLS_LEVEL"):
level = harness[len("DOINITCALLS_LEVEL_"):]
req_conf = req_conf + (("CONFIG_TDX_FUZZ_HARNESS_DOINITCALLS", "y"), ("CONFIG_TDX_FUZZ_HARNESS_DOINITCALLS_LEVEL", level),)
if harness.startswith("BPH_"):
req_conf = req_conf + (("CONFIG_TDX_FUZZ_HARNESS_NONE", "y"),)
setups.add(req_conf)
return setups
def build_kernel(setup, linux_source, global_storage_dir, debug=False):
out_stdout = subprocess.DEVNULL
out_stderr = subprocess.DEVNULL
if debug:
out_stdout = None
out_stderr = None
harness = normalize_harness_name(setup[0][0])
storage_dir = tempfile.mkdtemp(dir=global_storage_dir, prefix=harness+"-")
campaign_name = os.path.basename(storage_dir)
print(f"Configuring kernel for campaign '{campaign_name}'")
old_cwd = os.getcwd()
# Enter Linux CWD
os.chdir(linux_source)
kernel_build_mutex.acquire()
subprocess.run(f"cp .config.tmpl .config", shell=True, stdout=out_stdout, stderr=out_stderr)
print(f"Generating config for {setup}")
for conf,val in setup:
if val is None:
# Handle after all values have been set
pass
else:
subprocess.run(f"./scripts/config --set-val {conf} {val}", shell=True, stdout=out_stdout, stderr=out_stderr)
# Unsets need to happen after setting vals
for conf,val in setup:
if val is None:
subprocess.run(f"./scripts/config -d {conf}", shell=True, stdout=out_stdout, stderr=out_stderr)
print("Building kernel")
kernel_build_path = os.path.join(storage_dir, "build")
os.makedirs(kernel_build_path, exist_ok=True)
subprocess.run(f"make -j $(nproc) KCFLAGS=\"{KCFLAGS}\"", shell=True, stdout=out_stdout, stderr=out_stderr)
#subprocess.run(f"make -j $(nproc)", shell=True, stdout=out_stdout, stderr=out_stderr)
time.sleep(1)
# Copy over built kernel to own directory
subprocess.run(f"cp vmlinux System.map arch/x86/boot/bzImage .config {kernel_build_path}", shell=True, stdout=out_stdout, stderr=out_stderr)
kernel_build_mutex.release()
print(f"Copied kernel for campaign '{campaign_name}' to {kernel_build_path}")
# Reset CWD
os.chdir(old_cwd)
return campaign_name
def run_setup(campaign_name, setup, linux_source, global_storage_dir, debug=False, cpu_offset=0, dry_run=False):
out_stdout = subprocess.DEVNULL
out_stderr = subprocess.DEVNULL
if debug:
out_stdout = None
out_stderr = None
harness = normalize_harness_name(setup[0][0])
print(f"Preparing campaign '{campaign_name}'")
#campaign_name = time.strftime("%Y%m%d-%H%M%S")
storage_dir = os.path.join(global_storage_dir, campaign_name)
username = pwd.getpwuid(os.getuid()).pw_name
workdir_path = f"/dev/shm/{username}_tdfl-{campaign_name}"
kernel_build_path = os.path.join(storage_dir, "build")
old_cwd = os.getcwd()
# Get default seeds for harness
seeds_dir = None
harness_seeds = os.path.join(SEEDS_DIR, harness)
if os.path.exists(harness_seeds):
seeds_dir = harness_seeds
else:
print(f"Could not find seed dir {harness_seeds}")
seed_str = f"--seed-dir {seeds_dir}" if seeds_dir else ""
print(f"Running campaign {workdir_path} with seeds '{seeds_dir}'")
dry_run_flags = "--abort-exec=10000" if dry_run else ""
timeout = HARNESS_TIMEOUT_OVERRIDES.get(harness, DEFAULT_TIMEOUT_HOURS)
kernel_boot_params = BOOT_PARAM_HARNESSES.get(harness, "")
kafl_harness_extra_params = KAFL_PARAM_HARNESSES.get(harness, "")
try:
exc_cmd = f"KAFL_WORKDIR={workdir_path} KERNEL_BOOT_PARAMS=\"{kernel_boot_params}\" {FUZZ_SH_PATH} full {kernel_build_path} --abort-time={timeout} --cpu-offset={cpu_offset} {seed_str} {KAFL_EXTRA_FLAGS} {kafl_harness_extra_params} {dry_run_flags}"
command_log.append(exc_cmd)
#with open(os.path.join(workdir_path, "cmd"), "w") as f:
# print(exc_cmd, file=f)
subprocess.run(exc_cmd, shell=True, timeout=timeout * 3600 + 60, stdout=out_stdout, stderr=out_stderr)
except subprocess.TimeoutExpired as e:
print(e)
# Wait for stuff to settle down... might not be necessary
print(f"Done running campaign {workdir_path}")
time.sleep(2)
subprocess.run(f"{FUZZ_SH_PATH} ranges {workdir_path} > {workdir_path}/pt_ranges.txt", shell=True, stdout=out_stdout, stderr=out_stderr)
subprocess.run(f"mv {workdir_path}/* {storage_dir}", shell=True, stdout=out_stdout, stderr=out_stderr)
subprocess.run(f"rm -r {workdir_path}", shell=True, stdout=out_stdout, stderr=out_stderr)
target_dir = os.path.join(storage_dir, "target")
if not os.path.isdir(target_dir):
print(f"Could not find ./target/ in '{storage_dir}'. Something most likely went wrong. Doing a manual copy.")
os.makedirs(target_dir, exist_ok=True)
## HACK: overwrite ./target/ copied by fuzz.sh since vmlinux could have changed due to parallel campaign compilation
#subprocess.run(f"cp {kernel_build_path}/* {target_dir}", shell=True, stdout=out_stdout, stderr=out_stderr)
def worker(i, work_parallelism, stop, dry_run):
cpu_offset = i*work_parallelism
print(f"Starting worker thread {i} with cpu-offset {cpu_offset} (work_parallelism={work_parallelism})")
while True:
try:
work_args = q.get(timeout=1)
run_setup(*work_args, cpu_offset=cpu_offset, dry_run=dry_run)
q.task_done()
except queue.Empty:
if stop():
break
def do_cov(args):
out_stdout = subprocess.DEVNULL
out_stderr = subprocess.DEVNULL
if args.debug:
out_stdout = None
out_stderr = None
for d in glob.glob(args.storage_dir + "/*/"):
exp_name = os.path.basename(os.path.normpath(d))
harness = normalize_harness_name(name_to_harness(exp_name))
if harness in args.skip_harness:
continue
# Skip coverage gathering for campaigns that already have linecov.lst
if (not args.rerun) and os.path.exists(os.path.join(d, "traces/linecov.lst")):
continue
ncpu = args.work_parallelism * args.p
kernel_boot_params = BOOT_PARAM_HARNESSES.get(harness, "")
cmd_cov = f"KERNEL_BOOT_PARAMS=\"{kernel_boot_params}\" {FUZZ_SH_PATH} cov {d} -p {ncpu}"
cmd_smatch = f"USE_GHIDRA=1 {FUZZ_SH_PATH} smatch {d}"
print(f"Gathering coverage for '{d}' with -p {ncpu}")
subprocess.run(cmd_cov, shell=True, stdout=out_stdout, stderr=out_stderr)
subprocess.run(cmd_smatch, shell=True, stdout=out_stdout, stderr=out_stderr)
#print(cmd_cov)
#print(cmd_smatch)
print(f"DONE Gathering coverage for '{d}' with -p {ncpu}\n")
def do_run(args):
linux_src = args.linux_src
storage_dir = args.storage_dir
if not args.allow_existing_dir and os.path.isdir(storage_dir):
print(f"Storage path '{storage_dir}' already exists. Please choose a new dir.")
sys.exit(1)
os.makedirs(storage_dir, exist_ok=True)
linux_config_path = os.path.join(linux_src, ".config")
linux_config_tmpl_path = os.path.join(linux_src, ".config.tmpl")
linux_config_bak_path = os.path.join(linux_src, ".config.fuzz.bak")
print(f"Backing up .config to {linux_config_bak_path}")
subprocess.run(f"cp {linux_config_path} {linux_config_bak_path}", shell=True)
if os.path.isfile(linux_config_tmpl_path):
print(f"Using Kernel config template '{linux_config_tmpl_path}'")
else:
print(f"Kernel .config template file '{linux_config_tmpl_path}' does not exists, using ' {linux_config_path}'")
subprocess.run(f"cp {linux_config_path} {linux_config_tmpl_path}", shell=True)
harnesses = parse_linux_config(linux_config_path)
setups = generate_setups(harnesses)
print("Campaign will run {} different setups".format(len(setups)))
# Start up workers
work_parallelism = args.work_parallelism
if args.overcommit is False and work_parallelism * args.p > multiprocessing.cpu_count():
print(f"Using more parallelism than cores available ({work_parallelism} * {args.p} > {multiprocessing.cpu_count()})!! If you really want this, specify --overcommit")
sys.exit(1)
start = timer()
for setup in setups:
#run_setup(setup, linux_src, storage_dir, debug=args.debug)
for i in range(REPEATS):
# TODO: no need to build separate kernels for repeats. Needs refactoring
campaign_name = build_kernel(setup, linux_src, storage_dir, debug=True)
q.put((campaign_name, setup, linux_src, storage_dir, args.debug))
threads = []
# Condition variable. No need for it to be atomic..
stop_threads = False
for i in range(args.p):
t = threading.Thread(target=worker, args=(i, work_parallelism, lambda: stop_threads, args.dry_run))
threads.append(t)
t.start()
subprocess.run(f"mv {linux_config_bak_path} {linux_config_path}", shell=True)
# block until all campaigns are done
q.join()
end = timer()
print("Campaign ran {} different setups in {}".format(len(setups), (timedelta(seconds=end-start))))
stop_threads = True
for t in threads:
t.join()
out_stdout = subprocess.DEVNULL
out_stderr = subprocess.DEVNULL
if args.debug:
out_stdout = None
out_stderr = None
print("Command log:")
for cmd in command_log:
print(cmd)
print("END command log")
if args.coverage:
for d in glob.glob(storage_dir + "/*"):
ncpu = work_parallelism * args.p
#ncpu = args.p
harness = name_to_harness(d)
if harness in args.skip_harness:
continue
kernel_boot_params = BOOT_PARAM_HARNESSES.get(harness, "")
cmd_cov = f"KERNEL_BOOT_PARAMS=\"{kernel_boot_params}\" {FUZZ_SH_PATH} cov {d} -p {ncpu}"
cmd_smatch = f"USE_GHIDRA=1 {FUZZ_SH_PATH} smatch {d}"
print(f"Gathering coverage for '{d}' with -p {ncpu}")
try:
subprocess.run(cmd_cov, shell=True, stdout=out_stdout, stderr=out_stderr, timeout=DEFAULT_COV_TIMEOUT_HOURS*3600)
subprocess.run(cmd_smatch, shell=True, stdout=out_stdout, stderr=out_stderr, timeout=DEFAULT_COV_TIMEOUT_HOURS*3600)
except subprocess.TimeoutExpired as e:
print(f"TIMEOUT while getting coverage for '{d}'")
print(f"DONE Gathering coverage for '{d}' with -p {ncpu}")
def parse_args():
def parse_as_path(pathname):
return os.path.abspath(
os.path.expanduser(
os.path.expandvars(pathname)))
def parse_as_file(filename):
expanded = parse_as_path(filename)
if not os.path.exists(expanded):
raise argparse.ArgumentTypeError("Failed to find file argument %s (expanded: %s)" % (filename, expanded))
return expanded
def parse_as_dir(dirname):
expanded = parse_as_path(dirname)
if not os.path.exists(expanded):
raise argparse.ArgumentTypeError("Failed to find file argument %s (expanded: %s)" % (dirname, expanded))
return expanded
main_parser = argparse.ArgumentParser(description='kAFL TDX fuzzing experiments runner.')
subparsers = main_parser.add_subparsers(dest='action', metavar='<action>', required=True)
cov_parser = subparsers.add_parser("cov", help="collect coverage")
run_parser = subparsers.add_parser("run", help="run campaigns")
cov_parser.add_argument('storage_dir', metavar='<dir>', type=str,
help='target dir containing the results of prior fuzzing run')
cov_parser.add_argument('--rerun', action="store_true",
help='Force rerun of coverage gathering')
run_parser.add_argument('linux_src', metavar='<dir>', type=parse_as_dir,
help='path to your linux kernel tree')
run_parser.add_argument('storage_dir', metavar='<dir>', type=parse_as_path,
help='target dir to store the results. will be created / must not exist.')
run_parser.add_argument('--allow-existing-dir', action="store_true",
help='Allow storing results in existing dir')
run_parser.add_argument('--dry-run', action="store_true",
help='Perform dry run')
run_parser.add_argument('-c', '--coverage', action="store_true",
help='Gather coverage + smatch after running campaigns')
run_parser.add_argument('--launcher', type=parse_as_file, default="$BKC_ROOT/bkc/kafl/fuzz.sh",
help='fuzzer launch script (default: $BKC_ROOT/bkc/kafl/fuzz.sh)')
main_parser.add_argument('--debug', action='store_true',
help='Turn on debug output (show fuzzer stdout/stderr)')
main_parser.add_argument('-p', metavar='<n>', type=int, default=1,
help='Parallelize workload')
main_parser.add_argument('--work_parallelism', metavar='<n>', type=int, default=get_work_parallelism(),
help='Parallelism used by fuzzer. Only use for manual override, automatically obtained from fuzz.sh')
main_parser.add_argument('--overcommit', type=bool, default=False,
help='Overcommit parallelization')
main_parser.add_argument('--skip-harness', nargs="*", type=str, default=[],
help='Skip processing for specified harnesses')
return main_parser.parse_args()
def main():
args = parse_args()
if not os.path.exists(FUZZ_SH_PATH):
print("Could not find kAFL launcher in %s. Exit" % FUZZ_SH_PATH)
return
if args.action == "cov":
do_cov(args)
if args.action == "run":
do_run(args)
if __name__ == "__main__":
main()
|
fake_wsdl_server.py | from threading import Thread
from wsgiref.simple_server import make_server
from spyne import (Application, ComplexModel, Integer, Iterable, ServiceBase,
String, Unicode, rpc)
from spyne.protocol.soap import Soap11
from spyne.server.wsgi import WsgiApplication
class Person(ComplexModel):
name = String
address = String
def __init__(self, name="person", address="personville"):
self.name = name
self.address = address
class Dog(ComplexModel):
name = String
address = String
toys = Iterable(String)
def __init__(self, name, address, toys=[]):
self.name = name
self.address = address
self.toys = toys
class Neighborhood(ComplexModel):
name = String
people = Iterable(Person)
dogs = Iterable(Dog)
phone_numbers = Iterable(String)
def __init__(self, name, people=[], dogs=[], phone_numbers=[]):
self.name = name
self.people = people
self.dogs = dogs
self.phone_numbers = phone_numbers
class HelloWorldService(ServiceBase):
@rpc(Unicode, _returns=Unicode)
def say_my_name(ctx, name):
return name
@rpc(Unicode, Integer, _returns=Iterable(Unicode))
def say_hello(ctx, name, times):
for i in range(times):
yield "Hello, %s" % name
@rpc(Person, _returns=Dog)
def person_to_dog(ctx, person):
return Dog(person.name, person.address)
@rpc(_returns=Iterable(Dog))
def good_dogs(_ctx):
return [
Dog("Pi", "123 Bork Street", ["Food", "Socks"]),
Dog("Cricket", "123 Bork Street", ["Llama"]),
]
@rpc(_returns=Iterable(Neighborhood))
def neighborhoods(_ctx):
return [
Neighborhood(
"Meadows",
[Dog("Pi", "123 Bork Street", ["Food", "Kitchen Towel"])],
[Person("Joe", "123 American Way")],
["555-123-4567"],
),
Neighborhood(
"Montana",
[Dog("Max", "123 Dump Road", ["Beggin Strips"])],
[Person("Jim", "123 Western Way")],
["555-890-1234"],
),
]
application = Application(
[HelloWorldService],
tns="spyne.examples.hello",
in_protocol=Soap11(validator="lxml"),
out_protocol=Soap11(),
)
def create_fake_server(port=8000, daemon=True):
wsgi_app = WsgiApplication(application)
server = make_server("0.0.0.0", port, wsgi_app)
Thread(target=server.serve_forever, daemon=daemon).start()
return f"http://localhost:{str(port)}?WSDL"
if __name__ == "__main__":
create_fake_server(daemon=False)
|
test_utils.py | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for swift.common.utils"""
from __future__ import print_function
from test.unit import temptree, debug_logger
import ctypes
import contextlib
import errno
import eventlet
import eventlet.event
import functools
import grp
import logging
import os
import mock
import random
import re
import socket
import sys
import json
import math
import six
from six import BytesIO, StringIO
from six.moves.queue import Queue, Empty
from six.moves import range
from textwrap import dedent
import tempfile
import time
import unittest
import fcntl
import shutil
from getpass import getuser
from shutil import rmtree
from functools import partial
from tempfile import TemporaryFile, NamedTemporaryFile, mkdtemp
from netifaces import AF_INET6
from mock import MagicMock, patch
from six.moves.configparser import NoSectionError, NoOptionError
from uuid import uuid4
from swift.common.exceptions import Timeout, MessageTimeout, \
ConnectionTimeout, LockTimeout, ReplicationLockTimeout, \
MimeInvalid
from swift.common import utils
from swift.common.utils import is_valid_ip, is_valid_ipv4, is_valid_ipv6
from swift.common.container_sync_realms import ContainerSyncRealms
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.swob import Request, Response
from test.unit import FakeLogger, requires_o_tmpfile_support
threading = eventlet.patcher.original('threading')
class MockOs(object):
def __init__(self, pass_funcs=None, called_funcs=None, raise_funcs=None):
if pass_funcs is None:
pass_funcs = []
if called_funcs is None:
called_funcs = []
if raise_funcs is None:
raise_funcs = []
self.closed_fds = []
for func in pass_funcs:
setattr(self, func, self.pass_func)
self.called_funcs = {}
for func in called_funcs:
c_func = partial(self.called_func, func)
setattr(self, func, c_func)
for func in raise_funcs:
r_func = partial(self.raise_func, func)
setattr(self, func, r_func)
def pass_func(self, *args, **kwargs):
pass
setgroups = chdir = setsid = setgid = setuid = umask = pass_func
def called_func(self, name, *args, **kwargs):
self.called_funcs[name] = True
def raise_func(self, name, *args, **kwargs):
self.called_funcs[name] = True
raise OSError()
def dup2(self, source, target):
self.closed_fds.append(target)
def geteuid(self):
'''Pretend we are running as root.'''
return 0
def __getattr__(self, name):
# I only over-ride portions of the os module
try:
return object.__getattr__(self, name)
except AttributeError:
return getattr(os, name)
class MockUdpSocket(object):
def __init__(self, sendto_errno=None):
self.sent = []
self.sendto_errno = sendto_errno
def sendto(self, data, target):
if self.sendto_errno:
raise socket.error(self.sendto_errno,
'test errno %s' % self.sendto_errno)
self.sent.append((data, target))
def close(self):
pass
class MockSys(object):
def __init__(self):
self.stdin = TemporaryFile('w')
self.stdout = TemporaryFile('r')
self.stderr = TemporaryFile('r')
self.__stderr__ = self.stderr
self.stdio_fds = [self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()]
def reset_loggers():
if hasattr(utils.get_logger, 'handler4logger'):
for logger, handler in utils.get_logger.handler4logger.items():
logger.removeHandler(handler)
delattr(utils.get_logger, 'handler4logger')
if hasattr(utils.get_logger, 'console_handler4logger'):
for logger, h in utils.get_logger.console_handler4logger.items():
logger.removeHandler(h)
delattr(utils.get_logger, 'console_handler4logger')
# Reset the LogAdapter class thread local state. Use get_logger() here
# to fetch a LogAdapter instance because the items from
# get_logger.handler4logger above are the underlying logger instances,
# not the LogAdapter.
utils.get_logger(None).thread_locals = (None, None)
def reset_logger_state(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
reset_loggers()
try:
return f(self, *args, **kwargs)
finally:
reset_loggers()
return wrapper
class TestTimestamp(unittest.TestCase):
"""Tests for swift.common.utils.Timestamp"""
def test_invalid_input(self):
self.assertRaises(ValueError, utils.Timestamp, time.time(), offset=-1)
def test_invalid_string_conversion(self):
t = utils.Timestamp(time.time())
self.assertRaises(TypeError, str, t)
def test_offset_limit(self):
t = 1417462430.78693
# can't have a offset above MAX_OFFSET
self.assertRaises(ValueError, utils.Timestamp, t,
offset=utils.MAX_OFFSET + 1)
# exactly max offset is fine
ts = utils.Timestamp(t, offset=utils.MAX_OFFSET)
self.assertEqual(ts.internal, '1417462430.78693_ffffffffffffffff')
# but you can't offset it further
self.assertRaises(ValueError, utils.Timestamp, ts.internal, offset=1)
# unless you start below it
ts = utils.Timestamp(t, offset=utils.MAX_OFFSET - 1)
self.assertEqual(utils.Timestamp(ts.internal, offset=1),
'1417462430.78693_ffffffffffffffff')
def test_normal_format_no_offset(self):
expected = '1402436408.91203'
test_values = (
'1402436408.91203',
'1402436408.91203_00000000',
'1402436408.912030000',
'1402436408.912030000_0000000000000',
'000001402436408.912030000',
'000001402436408.912030000_0000000000',
1402436408.91203,
1402436408.912029,
1402436408.9120300000000000,
1402436408.91202999999999999,
utils.Timestamp(1402436408.91203),
utils.Timestamp(1402436408.91203, offset=0),
utils.Timestamp(1402436408.912029),
utils.Timestamp(1402436408.912029, offset=0),
utils.Timestamp('1402436408.91203'),
utils.Timestamp('1402436408.91203', offset=0),
utils.Timestamp('1402436408.91203_00000000'),
utils.Timestamp('1402436408.91203_00000000', offset=0),
)
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertEqual(timestamp.normal, expected)
# timestamp instance can also compare to string or float
self.assertEqual(timestamp, expected)
self.assertEqual(timestamp, float(expected))
self.assertEqual(timestamp, utils.normalize_timestamp(expected))
def test_isoformat(self):
expected = '2014-06-10T22:47:32.054580'
test_values = (
'1402440452.05458',
'1402440452.054579',
'1402440452.05458_00000000',
'1402440452.054579_00000000',
'1402440452.054580000',
'1402440452.054579999',
'1402440452.054580000_0000000000000',
'1402440452.054579999_0000ff00',
'000001402440452.054580000',
'000001402440452.0545799',
'000001402440452.054580000_0000000000',
'000001402440452.054579999999_00000fffff',
1402440452.05458,
1402440452.054579,
1402440452.0545800000000000,
1402440452.054579999,
utils.Timestamp(1402440452.05458),
utils.Timestamp(1402440452.0545799),
utils.Timestamp(1402440452.05458, offset=0),
utils.Timestamp(1402440452.05457999999, offset=0),
utils.Timestamp(1402440452.05458, offset=100),
utils.Timestamp(1402440452.054579, offset=100),
utils.Timestamp('1402440452.05458'),
utils.Timestamp('1402440452.054579999'),
utils.Timestamp('1402440452.05458', offset=0),
utils.Timestamp('1402440452.054579', offset=0),
utils.Timestamp('1402440452.05458', offset=300),
utils.Timestamp('1402440452.05457999', offset=300),
utils.Timestamp('1402440452.05458_00000000'),
utils.Timestamp('1402440452.05457999_00000000'),
utils.Timestamp('1402440452.05458_00000000', offset=0),
utils.Timestamp('1402440452.05457999_00000aaa', offset=0),
utils.Timestamp('1402440452.05458_00000000', offset=400),
utils.Timestamp('1402440452.054579_0a', offset=400),
)
for value in test_values:
self.assertEqual(utils.Timestamp(value).isoformat, expected)
expected = '1970-01-01T00:00:00.000000'
test_values = (
'0',
'0000000000.00000',
'0000000000.00000_ffffffffffff',
0,
0.0,
)
for value in test_values:
self.assertEqual(utils.Timestamp(value).isoformat, expected)
def test_not_equal(self):
ts = '1402436408.91203_0000000000000001'
test_values = (
utils.Timestamp('1402436408.91203_0000000000000002'),
utils.Timestamp('1402436408.91203'),
utils.Timestamp(1402436408.91203),
utils.Timestamp(1402436408.91204),
utils.Timestamp(1402436408.91203, offset=0),
utils.Timestamp(1402436408.91203, offset=2),
)
for value in test_values:
self.assertTrue(value != ts)
self.assertIs(True, utils.Timestamp(ts) == ts) # sanity
self.assertIs(False, utils.Timestamp(ts) != utils.Timestamp(ts))
self.assertIs(False, utils.Timestamp(ts) != ts)
self.assertIs(False, utils.Timestamp(ts) is None)
self.assertIs(True, utils.Timestamp(ts) is not None)
def test_no_force_internal_no_offset(self):
"""Test that internal is the same as normal with no offset"""
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
self.assertEqual(utils.Timestamp(0).internal, '0000000000.00000')
self.assertEqual(utils.Timestamp(1402437380.58186).internal,
'1402437380.58186')
self.assertEqual(utils.Timestamp(1402437380.581859).internal,
'1402437380.58186')
self.assertEqual(utils.Timestamp(0).internal,
utils.normalize_timestamp(0))
def test_no_force_internal_with_offset(self):
"""Test that internal always includes the offset if significant"""
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
self.assertEqual(utils.Timestamp(0, offset=1).internal,
'0000000000.00000_0000000000000001')
self.assertEqual(
utils.Timestamp(1402437380.58186, offset=16).internal,
'1402437380.58186_0000000000000010')
self.assertEqual(
utils.Timestamp(1402437380.581859, offset=240).internal,
'1402437380.58186_00000000000000f0')
self.assertEqual(
utils.Timestamp('1402437380.581859_00000001',
offset=240).internal,
'1402437380.58186_00000000000000f1')
def test_force_internal(self):
"""Test that internal always includes the offset if forced"""
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=True):
self.assertEqual(utils.Timestamp(0).internal,
'0000000000.00000_0000000000000000')
self.assertEqual(utils.Timestamp(1402437380.58186).internal,
'1402437380.58186_0000000000000000')
self.assertEqual(utils.Timestamp(1402437380.581859).internal,
'1402437380.58186_0000000000000000')
self.assertEqual(utils.Timestamp(0, offset=1).internal,
'0000000000.00000_0000000000000001')
self.assertEqual(
utils.Timestamp(1402437380.58186, offset=16).internal,
'1402437380.58186_0000000000000010')
self.assertEqual(
utils.Timestamp(1402437380.581859, offset=16).internal,
'1402437380.58186_0000000000000010')
def test_internal_format_no_offset(self):
expected = '1402436408.91203_0000000000000000'
test_values = (
'1402436408.91203',
'1402436408.91203_00000000',
'1402436408.912030000',
'1402436408.912030000_0000000000000',
'000001402436408.912030000',
'000001402436408.912030000_0000000000',
1402436408.91203,
1402436408.9120300000000000,
1402436408.912029,
1402436408.912029999999999999,
utils.Timestamp(1402436408.91203),
utils.Timestamp(1402436408.91203, offset=0),
utils.Timestamp(1402436408.912029),
utils.Timestamp(1402436408.91202999999999999, offset=0),
utils.Timestamp('1402436408.91203'),
utils.Timestamp('1402436408.91203', offset=0),
utils.Timestamp('1402436408.912029'),
utils.Timestamp('1402436408.912029', offset=0),
utils.Timestamp('1402436408.912029999999999'),
utils.Timestamp('1402436408.912029999999999', offset=0),
)
for value in test_values:
# timestamp instance is always equivalent
self.assertEqual(utils.Timestamp(value), expected)
if utils.FORCE_INTERNAL:
# the FORCE_INTERNAL flag makes the internal format always
# include the offset portion of the timestamp even when it's
# not significant and would be bad during upgrades
self.assertEqual(utils.Timestamp(value).internal, expected)
else:
# unless we FORCE_INTERNAL, when there's no offset the
# internal format is equivalent to the normalized format
self.assertEqual(utils.Timestamp(value).internal,
'1402436408.91203')
def test_internal_format_with_offset(self):
expected = '1402436408.91203_00000000000000f0'
test_values = (
'1402436408.91203_000000f0',
'1402436408.912030000_0000000000f0',
'1402436408.912029_000000f0',
'1402436408.91202999999_0000000000f0',
'000001402436408.912030000_000000000f0',
'000001402436408.9120299999_000000000f0',
utils.Timestamp(1402436408.91203, offset=240),
utils.Timestamp(1402436408.912029, offset=240),
utils.Timestamp('1402436408.91203', offset=240),
utils.Timestamp('1402436408.91203_00000000', offset=240),
utils.Timestamp('1402436408.91203_0000000f', offset=225),
utils.Timestamp('1402436408.9120299999', offset=240),
utils.Timestamp('1402436408.9120299999_00000000', offset=240),
utils.Timestamp('1402436408.9120299999_00000010', offset=224),
)
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertEqual(timestamp.internal, expected)
# can compare with offset if the string is internalized
self.assertEqual(timestamp, expected)
# if comparison value only includes the normalized portion and the
# timestamp includes an offset, it is considered greater
normal = utils.Timestamp(expected).normal
self.assertTrue(timestamp > normal,
'%r is not bigger than %r given %r' % (
timestamp, normal, value))
self.assertTrue(timestamp > float(normal),
'%r is not bigger than %f given %r' % (
timestamp, float(normal), value))
def test_short_format_with_offset(self):
expected = '1402436408.91203_f0'
timestamp = utils.Timestamp(1402436408.91203, 0xf0)
self.assertEqual(expected, timestamp.short)
expected = '1402436408.91203'
timestamp = utils.Timestamp(1402436408.91203)
self.assertEqual(expected, timestamp.short)
def test_raw(self):
expected = 140243640891203
timestamp = utils.Timestamp(1402436408.91203)
self.assertEqual(expected, timestamp.raw)
# 'raw' does not include offset
timestamp = utils.Timestamp(1402436408.91203, 0xf0)
self.assertEqual(expected, timestamp.raw)
def test_delta(self):
def _assertWithinBounds(expected, timestamp):
tolerance = 0.00001
minimum = expected - tolerance
maximum = expected + tolerance
self.assertTrue(float(timestamp) > minimum)
self.assertTrue(float(timestamp) < maximum)
timestamp = utils.Timestamp(1402436408.91203, delta=100)
_assertWithinBounds(1402436408.91303, timestamp)
self.assertEqual(140243640891303, timestamp.raw)
timestamp = utils.Timestamp(1402436408.91203, delta=-100)
_assertWithinBounds(1402436408.91103, timestamp)
self.assertEqual(140243640891103, timestamp.raw)
timestamp = utils.Timestamp(1402436408.91203, delta=0)
_assertWithinBounds(1402436408.91203, timestamp)
self.assertEqual(140243640891203, timestamp.raw)
# delta is independent of offset
timestamp = utils.Timestamp(1402436408.91203, offset=42, delta=100)
self.assertEqual(140243640891303, timestamp.raw)
self.assertEqual(42, timestamp.offset)
# cannot go negative
self.assertRaises(ValueError, utils.Timestamp, 1402436408.91203,
delta=-140243640891203)
def test_int(self):
expected = 1402437965
test_values = (
'1402437965.91203',
'1402437965.91203_00000000',
'1402437965.912030000',
'1402437965.912030000_0000000000000',
'000001402437965.912030000',
'000001402437965.912030000_0000000000',
1402437965.91203,
1402437965.9120300000000000,
1402437965.912029,
1402437965.912029999999999999,
utils.Timestamp(1402437965.91203),
utils.Timestamp(1402437965.91203, offset=0),
utils.Timestamp(1402437965.91203, offset=500),
utils.Timestamp(1402437965.912029),
utils.Timestamp(1402437965.91202999999999999, offset=0),
utils.Timestamp(1402437965.91202999999999999, offset=300),
utils.Timestamp('1402437965.91203'),
utils.Timestamp('1402437965.91203', offset=0),
utils.Timestamp('1402437965.91203', offset=400),
utils.Timestamp('1402437965.912029'),
utils.Timestamp('1402437965.912029', offset=0),
utils.Timestamp('1402437965.912029', offset=200),
utils.Timestamp('1402437965.912029999999999'),
utils.Timestamp('1402437965.912029999999999', offset=0),
utils.Timestamp('1402437965.912029999999999', offset=100),
)
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertEqual(int(timestamp), expected)
self.assertTrue(timestamp > expected)
def test_float(self):
expected = 1402438115.91203
test_values = (
'1402438115.91203',
'1402438115.91203_00000000',
'1402438115.912030000',
'1402438115.912030000_0000000000000',
'000001402438115.912030000',
'000001402438115.912030000_0000000000',
1402438115.91203,
1402438115.9120300000000000,
1402438115.912029,
1402438115.912029999999999999,
utils.Timestamp(1402438115.91203),
utils.Timestamp(1402438115.91203, offset=0),
utils.Timestamp(1402438115.91203, offset=500),
utils.Timestamp(1402438115.912029),
utils.Timestamp(1402438115.91202999999999999, offset=0),
utils.Timestamp(1402438115.91202999999999999, offset=300),
utils.Timestamp('1402438115.91203'),
utils.Timestamp('1402438115.91203', offset=0),
utils.Timestamp('1402438115.91203', offset=400),
utils.Timestamp('1402438115.912029'),
utils.Timestamp('1402438115.912029', offset=0),
utils.Timestamp('1402438115.912029', offset=200),
utils.Timestamp('1402438115.912029999999999'),
utils.Timestamp('1402438115.912029999999999', offset=0),
utils.Timestamp('1402438115.912029999999999', offset=100),
)
tolerance = 0.00001
minimum = expected - tolerance
maximum = expected + tolerance
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertTrue(float(timestamp) > minimum,
'%f is not bigger than %f given %r' % (
timestamp, minimum, value))
self.assertTrue(float(timestamp) < maximum,
'%f is not smaller than %f given %r' % (
timestamp, maximum, value))
# direct comparison of timestamp works too
self.assertTrue(timestamp > minimum,
'%s is not bigger than %f given %r' % (
timestamp.normal, minimum, value))
self.assertTrue(timestamp < maximum,
'%s is not smaller than %f given %r' % (
timestamp.normal, maximum, value))
# ... even against strings
self.assertTrue(timestamp > '%f' % minimum,
'%s is not bigger than %s given %r' % (
timestamp.normal, minimum, value))
self.assertTrue(timestamp < '%f' % maximum,
'%s is not smaller than %s given %r' % (
timestamp.normal, maximum, value))
def test_false(self):
self.assertFalse(utils.Timestamp(0))
self.assertFalse(utils.Timestamp(0, offset=0))
self.assertFalse(utils.Timestamp('0'))
self.assertFalse(utils.Timestamp('0', offset=0))
self.assertFalse(utils.Timestamp(0.0))
self.assertFalse(utils.Timestamp(0.0, offset=0))
self.assertFalse(utils.Timestamp('0.0'))
self.assertFalse(utils.Timestamp('0.0', offset=0))
self.assertFalse(utils.Timestamp(00000000.00000000))
self.assertFalse(utils.Timestamp(00000000.00000000, offset=0))
self.assertFalse(utils.Timestamp('00000000.00000000'))
self.assertFalse(utils.Timestamp('00000000.00000000', offset=0))
def test_true(self):
self.assertTrue(utils.Timestamp(1))
self.assertTrue(utils.Timestamp(1, offset=1))
self.assertTrue(utils.Timestamp(0, offset=1))
self.assertTrue(utils.Timestamp('1'))
self.assertTrue(utils.Timestamp('1', offset=1))
self.assertTrue(utils.Timestamp('0', offset=1))
self.assertTrue(utils.Timestamp(1.1))
self.assertTrue(utils.Timestamp(1.1, offset=1))
self.assertTrue(utils.Timestamp(0.0, offset=1))
self.assertTrue(utils.Timestamp('1.1'))
self.assertTrue(utils.Timestamp('1.1', offset=1))
self.assertTrue(utils.Timestamp('0.0', offset=1))
self.assertTrue(utils.Timestamp(11111111.11111111))
self.assertTrue(utils.Timestamp(11111111.11111111, offset=1))
self.assertTrue(utils.Timestamp(00000000.00000000, offset=1))
self.assertTrue(utils.Timestamp('11111111.11111111'))
self.assertTrue(utils.Timestamp('11111111.11111111', offset=1))
self.assertTrue(utils.Timestamp('00000000.00000000', offset=1))
def test_greater_no_offset(self):
now = time.time()
older = now - 1
timestamp = utils.Timestamp(now)
test_values = (
0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
1402443112.213252, '1402443112.213252', '1402443112.213252_ffff',
older, '%f' % older, '%f_0000ffff' % older,
)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp > value,
'%r is not greater than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp > other,
'%r is not greater than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp > other.normal,
'%r is not greater than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp > other.internal,
'%r is not greater than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp > float(other),
'%r is not greater than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp > int(other),
'%r is not greater than %r given %r' % (
timestamp, int(other), value))
def test_greater_with_offset(self):
now = time.time()
older = now - 1
test_values = (
0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
1402443346.935174, '1402443346.93517', '1402443346.935169_ffff',
older, '%f' % older, '%f_0000ffff' % older,
now, '%f' % now, '%f_00000000' % now,
)
for offset in range(1, 1000, 100):
timestamp = utils.Timestamp(now, offset=offset)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp > value,
'%r is not greater than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp > other,
'%r is not greater than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp > other.normal,
'%r is not greater than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp > other.internal,
'%r is not greater than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp > float(other),
'%r is not greater than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp > int(other),
'%r is not greater than %r given %r' % (
timestamp, int(other), value))
def test_smaller_no_offset(self):
now = time.time()
newer = now + 1
timestamp = utils.Timestamp(now)
test_values = (
9999999999.99999, '9999999999.99999', '9999999999.99999_ffff',
newer, '%f' % newer, '%f_0000ffff' % newer,
)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp < value,
'%r is not smaller than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp < other,
'%r is not smaller than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp < other.normal,
'%r is not smaller than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp < other.internal,
'%r is not smaller than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp < float(other),
'%r is not smaller than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp < int(other),
'%r is not smaller than %r given %r' % (
timestamp, int(other), value))
def test_smaller_with_offset(self):
now = time.time()
newer = now + 1
test_values = (
9999999999.99999, '9999999999.99999', '9999999999.99999_ffff',
newer, '%f' % newer, '%f_0000ffff' % newer,
)
for offset in range(1, 1000, 100):
timestamp = utils.Timestamp(now, offset=offset)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp < value,
'%r is not smaller than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp < other,
'%r is not smaller than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp < other.normal,
'%r is not smaller than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp < other.internal,
'%r is not smaller than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp < float(other),
'%r is not smaller than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp < int(other),
'%r is not smaller than %r given %r' % (
timestamp, int(other), value))
def test_cmp_with_none(self):
self.assertGreater(utils.Timestamp(0), None)
self.assertGreater(utils.Timestamp(1.0), None)
self.assertGreater(utils.Timestamp(1.0, 42), None)
def test_ordering(self):
given = [
'1402444820.62590_000000000000000a',
'1402444820.62589_0000000000000001',
'1402444821.52589_0000000000000004',
'1402444920.62589_0000000000000004',
'1402444821.62589_000000000000000a',
'1402444821.72589_000000000000000a',
'1402444920.62589_0000000000000002',
'1402444820.62589_0000000000000002',
'1402444820.62589_000000000000000a',
'1402444820.62590_0000000000000004',
'1402444920.62589_000000000000000a',
'1402444820.62590_0000000000000002',
'1402444821.52589_0000000000000002',
'1402444821.52589_0000000000000000',
'1402444920.62589',
'1402444821.62589_0000000000000004',
'1402444821.72589_0000000000000001',
'1402444820.62590',
'1402444820.62590_0000000000000001',
'1402444820.62589_0000000000000004',
'1402444821.72589_0000000000000000',
'1402444821.52589_000000000000000a',
'1402444821.72589_0000000000000004',
'1402444821.62589',
'1402444821.52589_0000000000000001',
'1402444821.62589_0000000000000001',
'1402444821.62589_0000000000000002',
'1402444821.72589_0000000000000002',
'1402444820.62589',
'1402444920.62589_0000000000000001']
expected = [
'1402444820.62589',
'1402444820.62589_0000000000000001',
'1402444820.62589_0000000000000002',
'1402444820.62589_0000000000000004',
'1402444820.62589_000000000000000a',
'1402444820.62590',
'1402444820.62590_0000000000000001',
'1402444820.62590_0000000000000002',
'1402444820.62590_0000000000000004',
'1402444820.62590_000000000000000a',
'1402444821.52589',
'1402444821.52589_0000000000000001',
'1402444821.52589_0000000000000002',
'1402444821.52589_0000000000000004',
'1402444821.52589_000000000000000a',
'1402444821.62589',
'1402444821.62589_0000000000000001',
'1402444821.62589_0000000000000002',
'1402444821.62589_0000000000000004',
'1402444821.62589_000000000000000a',
'1402444821.72589',
'1402444821.72589_0000000000000001',
'1402444821.72589_0000000000000002',
'1402444821.72589_0000000000000004',
'1402444821.72589_000000000000000a',
'1402444920.62589',
'1402444920.62589_0000000000000001',
'1402444920.62589_0000000000000002',
'1402444920.62589_0000000000000004',
'1402444920.62589_000000000000000a',
]
# less visual version
"""
now = time.time()
given = [
utils.Timestamp(now + i, offset=offset).internal
for i in (0, 0.00001, 0.9, 1.0, 1.1, 100.0)
for offset in (0, 1, 2, 4, 10)
]
expected = [t for t in given]
random.shuffle(given)
"""
self.assertEqual(len(given), len(expected)) # sanity
timestamps = [utils.Timestamp(t) for t in given]
# our expected values don't include insignificant offsets
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
self.assertEqual(
[t.internal for t in sorted(timestamps)], expected)
# string sorting works as well
self.assertEqual(
sorted([t.internal for t in timestamps]), expected)
def test_hashable(self):
ts_0 = utils.Timestamp('1402444821.72589')
ts_0_also = utils.Timestamp('1402444821.72589')
self.assertEqual(ts_0, ts_0_also) # sanity
self.assertEqual(hash(ts_0), hash(ts_0_also))
d = {ts_0: 'whatever'}
self.assertIn(ts_0, d) # sanity
self.assertIn(ts_0_also, d)
class TestTimestampEncoding(unittest.TestCase):
def setUp(self):
t0 = utils.Timestamp(0.0)
t1 = utils.Timestamp(997.9996)
t2 = utils.Timestamp(999)
t3 = utils.Timestamp(1000, 24)
t4 = utils.Timestamp(1001)
t5 = utils.Timestamp(1002.00040)
# encodings that are expected when explicit = False
self.non_explicit_encodings = (
('0000001000.00000_18', (t3, t3, t3)),
('0000001000.00000_18', (t3, t3, None)),
)
# mappings that are expected when explicit = True
self.explicit_encodings = (
('0000001000.00000_18+0+0', (t3, t3, t3)),
('0000001000.00000_18+0', (t3, t3, None)),
)
# mappings that are expected when explicit = True or False
self.encodings = (
('0000001000.00000_18+0+186a0', (t3, t3, t4)),
('0000001000.00000_18+186a0+186c8', (t3, t4, t5)),
('0000001000.00000_18-186a0+0', (t3, t2, t2)),
('0000001000.00000_18+0-186a0', (t3, t3, t2)),
('0000001000.00000_18-186a0-186c8', (t3, t2, t1)),
('0000001000.00000_18', (t3, None, None)),
('0000001000.00000_18+186a0', (t3, t4, None)),
('0000001000.00000_18-186a0', (t3, t2, None)),
('0000001000.00000_18', (t3, None, t1)),
('0000001000.00000_18-5f5e100', (t3, t0, None)),
('0000001000.00000_18+0-5f5e100', (t3, t3, t0)),
('0000001000.00000_18-5f5e100+5f45a60', (t3, t0, t2)),
)
# decodings that are expected when explicit = False
self.non_explicit_decodings = (
('0000001000.00000_18', (t3, t3, t3)),
('0000001000.00000_18+186a0', (t3, t4, t4)),
('0000001000.00000_18-186a0', (t3, t2, t2)),
('0000001000.00000_18+186a0', (t3, t4, t4)),
('0000001000.00000_18-186a0', (t3, t2, t2)),
('0000001000.00000_18-5f5e100', (t3, t0, t0)),
)
# decodings that are expected when explicit = True
self.explicit_decodings = (
('0000001000.00000_18+0+0', (t3, t3, t3)),
('0000001000.00000_18+0', (t3, t3, None)),
('0000001000.00000_18', (t3, None, None)),
('0000001000.00000_18+186a0', (t3, t4, None)),
('0000001000.00000_18-186a0', (t3, t2, None)),
('0000001000.00000_18-5f5e100', (t3, t0, None)),
)
# decodings that are expected when explicit = True or False
self.decodings = (
('0000001000.00000_18+0+186a0', (t3, t3, t4)),
('0000001000.00000_18+186a0+186c8', (t3, t4, t5)),
('0000001000.00000_18-186a0+0', (t3, t2, t2)),
('0000001000.00000_18+0-186a0', (t3, t3, t2)),
('0000001000.00000_18-186a0-186c8', (t3, t2, t1)),
('0000001000.00000_18-5f5e100+5f45a60', (t3, t0, t2)),
)
def _assertEqual(self, expected, actual, test):
self.assertEqual(expected, actual,
'Got %s but expected %s for parameters %s'
% (actual, expected, test))
def test_encoding(self):
for test in self.explicit_encodings:
actual = utils.encode_timestamps(test[1][0], test[1][1],
test[1][2], True)
self._assertEqual(test[0], actual, test[1])
for test in self.non_explicit_encodings:
actual = utils.encode_timestamps(test[1][0], test[1][1],
test[1][2], False)
self._assertEqual(test[0], actual, test[1])
for explicit in (True, False):
for test in self.encodings:
actual = utils.encode_timestamps(test[1][0], test[1][1],
test[1][2], explicit)
self._assertEqual(test[0], actual, test[1])
def test_decoding(self):
for test in self.explicit_decodings:
actual = utils.decode_timestamps(test[0], True)
self._assertEqual(test[1], actual, test[0])
for test in self.non_explicit_decodings:
actual = utils.decode_timestamps(test[0], False)
self._assertEqual(test[1], actual, test[0])
for explicit in (True, False):
for test in self.decodings:
actual = utils.decode_timestamps(test[0], explicit)
self._assertEqual(test[1], actual, test[0])
class TestUtils(unittest.TestCase):
"""Tests for swift.common.utils """
def setUp(self):
utils.HASH_PATH_SUFFIX = 'endcap'
utils.HASH_PATH_PREFIX = 'startcap'
def test_lock_path(self):
tmpdir = mkdtemp()
try:
with utils.lock_path(tmpdir, 0.1):
exc = None
success = False
try:
with utils.lock_path(tmpdir, 0.1):
success = True
except LockTimeout as err:
exc = err
self.assertTrue(exc is not None)
self.assertTrue(not success)
finally:
shutil.rmtree(tmpdir)
def test_lock_path_num_sleeps(self):
tmpdir = mkdtemp()
num_short_calls = [0]
exception_raised = [False]
def my_sleep(to_sleep):
if to_sleep == 0.01:
num_short_calls[0] += 1
else:
raise Exception('sleep time changed: %s' % to_sleep)
try:
with mock.patch('swift.common.utils.sleep', my_sleep):
with utils.lock_path(tmpdir):
with utils.lock_path(tmpdir):
pass
except Exception as e:
exception_raised[0] = True
self.assertTrue('sleep time changed' in str(e))
finally:
shutil.rmtree(tmpdir)
self.assertEqual(num_short_calls[0], 11)
self.assertTrue(exception_raised[0])
def test_lock_path_class(self):
tmpdir = mkdtemp()
try:
with utils.lock_path(tmpdir, 0.1, ReplicationLockTimeout):
exc = None
exc2 = None
success = False
try:
with utils.lock_path(tmpdir, 0.1, ReplicationLockTimeout):
success = True
except ReplicationLockTimeout as err:
exc = err
except LockTimeout as err:
exc2 = err
self.assertTrue(exc is not None)
self.assertTrue(exc2 is None)
self.assertTrue(not success)
exc = None
exc2 = None
success = False
try:
with utils.lock_path(tmpdir, 0.1):
success = True
except ReplicationLockTimeout as err:
exc = err
except LockTimeout as err:
exc2 = err
self.assertTrue(exc is None)
self.assertTrue(exc2 is not None)
self.assertTrue(not success)
finally:
shutil.rmtree(tmpdir)
def test_normalize_timestamp(self):
# Test swift.common.utils.normalize_timestamp
self.assertEqual(utils.normalize_timestamp('1253327593.48174'),
"1253327593.48174")
self.assertEqual(utils.normalize_timestamp(1253327593.48174),
"1253327593.48174")
self.assertEqual(utils.normalize_timestamp('1253327593.48'),
"1253327593.48000")
self.assertEqual(utils.normalize_timestamp(1253327593.48),
"1253327593.48000")
self.assertEqual(utils.normalize_timestamp('253327593.48'),
"0253327593.48000")
self.assertEqual(utils.normalize_timestamp(253327593.48),
"0253327593.48000")
self.assertEqual(utils.normalize_timestamp('1253327593'),
"1253327593.00000")
self.assertEqual(utils.normalize_timestamp(1253327593),
"1253327593.00000")
self.assertRaises(ValueError, utils.normalize_timestamp, '')
self.assertRaises(ValueError, utils.normalize_timestamp, 'abc')
def test_normalize_delete_at_timestamp(self):
self.assertEqual(
utils.normalize_delete_at_timestamp(1253327593),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp(1253327593.67890),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp('1253327593'),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp('1253327593.67890'),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp(-1253327593),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp(-1253327593.67890),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp('-1253327593'),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp('-1253327593.67890'),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp(71253327593),
'9999999999')
self.assertEqual(
utils.normalize_delete_at_timestamp(71253327593.67890),
'9999999999')
self.assertEqual(
utils.normalize_delete_at_timestamp('71253327593'),
'9999999999')
self.assertEqual(
utils.normalize_delete_at_timestamp('71253327593.67890'),
'9999999999')
self.assertRaises(ValueError, utils.normalize_timestamp, '')
self.assertRaises(ValueError, utils.normalize_timestamp, 'abc')
def test_last_modified_date_to_timestamp(self):
expectations = {
'1970-01-01T00:00:00.000000': 0.0,
'2014-02-28T23:22:36.698390': 1393629756.698390,
'2011-03-19T04:03:00.604554': 1300507380.604554,
}
for last_modified, ts in expectations.items():
real = utils.last_modified_date_to_timestamp(last_modified)
self.assertEqual(real, ts, "failed for %s" % last_modified)
def test_last_modified_date_to_timestamp_when_system_not_UTC(self):
try:
old_tz = os.environ.get('TZ')
# Western Argentina Summer Time. Found in glibc manual; this
# timezone always has a non-zero offset from UTC, so this test is
# always meaningful.
os.environ['TZ'] = 'WART4WARST,J1/0,J365/25'
self.assertEqual(utils.last_modified_date_to_timestamp(
'1970-01-01T00:00:00.000000'),
0.0)
finally:
if old_tz is not None:
os.environ['TZ'] = old_tz
else:
os.environ.pop('TZ')
def test_backwards(self):
# Test swift.common.utils.backward
# The lines are designed so that the function would encounter
# all of the boundary conditions and typical conditions.
# Block boundaries are marked with '<>' characters
blocksize = 25
lines = [b'123456789x12345678><123456789\n', # block larger than rest
b'123456789x123>\n', # block ends just before \n character
b'123423456789\n',
b'123456789x\n', # block ends at the end of line
b'<123456789x123456789x123\n',
b'<6789x123\n', # block ends at the beginning of the line
b'6789x1234\n',
b'1234><234\n', # block ends typically in the middle of line
b'123456789x123456789\n']
with TemporaryFile() as f:
for line in lines:
f.write(line)
count = len(lines) - 1
for line in utils.backward(f, blocksize):
self.assertEqual(line, lines[count].split(b'\n')[0])
count -= 1
# Empty file case
with TemporaryFile('r') as f:
self.assertEqual([], list(utils.backward(f)))
def test_mkdirs(self):
testdir_base = mkdtemp()
testroot = os.path.join(testdir_base, 'mkdirs')
try:
self.assertTrue(not os.path.exists(testroot))
utils.mkdirs(testroot)
self.assertTrue(os.path.exists(testroot))
utils.mkdirs(testroot)
self.assertTrue(os.path.exists(testroot))
rmtree(testroot, ignore_errors=1)
testdir = os.path.join(testroot, 'one/two/three')
self.assertTrue(not os.path.exists(testdir))
utils.mkdirs(testdir)
self.assertTrue(os.path.exists(testdir))
utils.mkdirs(testdir)
self.assertTrue(os.path.exists(testdir))
rmtree(testroot, ignore_errors=1)
open(testroot, 'wb').close()
self.assertTrue(not os.path.exists(testdir))
self.assertRaises(OSError, utils.mkdirs, testdir)
os.unlink(testroot)
finally:
rmtree(testdir_base)
def test_split_path(self):
# Test swift.common.utils.split_account_path
self.assertRaises(ValueError, utils.split_path, '')
self.assertRaises(ValueError, utils.split_path, '/')
self.assertRaises(ValueError, utils.split_path, '//')
self.assertEqual(utils.split_path('/a'), ['a'])
self.assertRaises(ValueError, utils.split_path, '//a')
self.assertEqual(utils.split_path('/a/'), ['a'])
self.assertRaises(ValueError, utils.split_path, '/a/c')
self.assertRaises(ValueError, utils.split_path, '//c')
self.assertRaises(ValueError, utils.split_path, '/a/c/')
self.assertRaises(ValueError, utils.split_path, '/a//')
self.assertRaises(ValueError, utils.split_path, '/a', 2)
self.assertRaises(ValueError, utils.split_path, '/a', 2, 3)
self.assertRaises(ValueError, utils.split_path, '/a', 2, 3, True)
self.assertEqual(utils.split_path('/a/c', 2), ['a', 'c'])
self.assertEqual(utils.split_path('/a/c/o', 3), ['a', 'c', 'o'])
self.assertRaises(ValueError, utils.split_path, '/a/c/o/r', 3, 3)
self.assertEqual(utils.split_path('/a/c/o/r', 3, 3, True),
['a', 'c', 'o/r'])
self.assertEqual(utils.split_path('/a/c', 2, 3, True),
['a', 'c', None])
self.assertRaises(ValueError, utils.split_path, '/a', 5, 4)
self.assertEqual(utils.split_path('/a/c/', 2), ['a', 'c'])
self.assertEqual(utils.split_path('/a/c/', 2, 3), ['a', 'c', ''])
try:
utils.split_path('o\nn e', 2)
except ValueError as err:
self.assertEqual(str(err), 'Invalid path: o%0An%20e')
try:
utils.split_path('o\nn e', 2, 3, True)
except ValueError as err:
self.assertEqual(str(err), 'Invalid path: o%0An%20e')
def test_validate_device_partition(self):
# Test swift.common.utils.validate_device_partition
utils.validate_device_partition('foo', 'bar')
self.assertRaises(ValueError,
utils.validate_device_partition, '', '')
self.assertRaises(ValueError,
utils.validate_device_partition, '', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo/bar', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', 'foo/bar')
self.assertRaises(ValueError,
utils.validate_device_partition, '.', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, '..', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '.')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '..')
try:
utils.validate_device_partition('o\nn e', 'foo')
except ValueError as err:
self.assertEqual(str(err), 'Invalid device: o%0An%20e')
try:
utils.validate_device_partition('foo', 'o\nn e')
except ValueError as err:
self.assertEqual(str(err), 'Invalid partition: o%0An%20e')
def test_NullLogger(self):
# Test swift.common.utils.NullLogger
sio = StringIO()
nl = utils.NullLogger()
nl.write('test')
self.assertEqual(sio.getvalue(), '')
def test_LoggerFileObject(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
sio = StringIO()
handler = logging.StreamHandler(sio)
logger = logging.getLogger()
logger.addHandler(handler)
lfo_stdout = utils.LoggerFileObject(logger)
lfo_stderr = utils.LoggerFileObject(logger)
lfo_stderr = utils.LoggerFileObject(logger, 'STDERR')
print('test1')
self.assertEqual(sio.getvalue(), '')
sys.stdout = lfo_stdout
print('test2')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\n')
sys.stderr = lfo_stderr
print('test4', file=sys.stderr)
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n')
sys.stdout = orig_stdout
print('test5')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n')
print('test6', file=sys.stderr)
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\n')
sys.stderr = orig_stderr
print('test8')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\n')
lfo_stdout.writelines(['a', 'b', 'c'])
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\n')
lfo_stdout.close()
lfo_stderr.close()
lfo_stdout.write('d')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n')
lfo_stdout.flush()
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n')
for lfo in (lfo_stdout, lfo_stderr):
got_exc = False
try:
for line in lfo:
pass
except Exception:
got_exc = True
self.assertTrue(got_exc)
got_exc = False
try:
for line in lfo:
pass
except Exception:
got_exc = True
self.assertTrue(got_exc)
self.assertRaises(IOError, lfo.read)
self.assertRaises(IOError, lfo.read, 1024)
self.assertRaises(IOError, lfo.readline)
self.assertRaises(IOError, lfo.readline, 1024)
lfo.tell()
def test_LoggerFileObject_recursion(self):
crashy_calls = [0]
class CrashyLogger(logging.Handler):
def emit(self, record):
crashy_calls[0] += 1
try:
# Pretend to be trying to send to syslog, but syslogd is
# dead. We need the raise here to set sys.exc_info.
raise socket.error(errno.ENOTCONN, "This is an ex-syslog")
except socket.error:
self.handleError(record)
logger = logging.getLogger()
logger.addHandler(CrashyLogger())
# Set up some real file descriptors for stdio. If you run
# nosetests with "-s", you already have real files there, but
# otherwise they're StringIO objects.
#
# In any case, since capture_stdio() closes sys.stdin and friends,
# we'd want to set up some sacrificial files so as to not goof up
# the testrunner.
new_stdin = open(os.devnull, 'r+b')
new_stdout = open(os.devnull, 'w+b')
new_stderr = open(os.devnull, 'w+b')
with contextlib.closing(new_stdin), contextlib.closing(new_stdout), \
contextlib.closing(new_stderr):
# logging.raiseExceptions is set to False in test/__init__.py, but
# is True in Swift daemons, and the error doesn't manifest without
# it.
with mock.patch('sys.stdin', new_stdin), \
mock.patch('sys.stdout', new_stdout), \
mock.patch('sys.stderr', new_stderr), \
mock.patch.object(logging, 'raiseExceptions', True):
# Note: since stdio is hooked up to /dev/null in here, using
# pdb is basically impossible. Sorry about that.
utils.capture_stdio(logger)
logger.info("I like ham")
self.assertTrue(crashy_calls[0], 1)
def test_parse_options(self):
# Get a file that is definitely on disk
with NamedTemporaryFile() as f:
conf_file = f.name
conf, options = utils.parse_options(test_args=[conf_file])
self.assertEqual(conf, conf_file)
# assert defaults
self.assertEqual(options['verbose'], False)
self.assertTrue('once' not in options)
# assert verbose as option
conf, options = utils.parse_options(test_args=[conf_file, '-v'])
self.assertEqual(options['verbose'], True)
# check once option
conf, options = utils.parse_options(test_args=[conf_file],
once=True)
self.assertEqual(options['once'], False)
test_args = [conf_file, '--once']
conf, options = utils.parse_options(test_args=test_args, once=True)
self.assertEqual(options['once'], True)
# check options as arg parsing
test_args = [conf_file, 'once', 'plugin_name', 'verbose']
conf, options = utils.parse_options(test_args=test_args, once=True)
self.assertEqual(options['verbose'], True)
self.assertEqual(options['once'], True)
self.assertEqual(options['extra_args'], ['plugin_name'])
def test_parse_options_errors(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
stdo = StringIO()
stde = StringIO()
utils.sys.stdout = stdo
utils.sys.stderr = stde
self.assertRaises(SystemExit, utils.parse_options, once=True,
test_args=[])
self.assertTrue('missing config' in stdo.getvalue())
# verify conf file must exist, context manager will delete temp file
with NamedTemporaryFile() as f:
conf_file = f.name
self.assertRaises(SystemExit, utils.parse_options, once=True,
test_args=[conf_file])
self.assertTrue('unable to locate' in stdo.getvalue())
# reset stdio
utils.sys.stdout = orig_stdout
utils.sys.stderr = orig_stderr
def test_dump_recon_cache(self):
testdir_base = mkdtemp()
testcache_file = os.path.join(testdir_base, 'cache.recon')
logger = utils.get_logger(None, 'server', log_route='server')
try:
submit_dict = {'key1': {'value1': 1, 'value2': 2}}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
fd = open(testcache_file)
file_dict = json.loads(fd.readline())
fd.close()
self.assertEqual(submit_dict, file_dict)
# Use a nested entry
submit_dict = {'key1': {'key2': {'value1': 1, 'value2': 2}}}
result_dict = {'key1': {'key2': {'value1': 1, 'value2': 2},
'value1': 1, 'value2': 2}}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
fd = open(testcache_file)
file_dict = json.loads(fd.readline())
fd.close()
self.assertEqual(result_dict, file_dict)
finally:
rmtree(testdir_base)
def test_dump_recon_cache_permission_denied(self):
testdir_base = mkdtemp()
testcache_file = os.path.join(testdir_base, 'cache.recon')
class MockLogger(object):
def __init__(self):
self._excs = []
def exception(self, message):
_junk, exc, _junk = sys.exc_info()
self._excs.append(exc)
logger = MockLogger()
try:
submit_dict = {'key1': {'value1': 1, 'value2': 2}}
with mock.patch(
'swift.common.utils.NamedTemporaryFile',
side_effect=IOError(13, 'Permission Denied')):
utils.dump_recon_cache(submit_dict, testcache_file, logger)
self.assertIsInstance(logger._excs[0], IOError)
finally:
rmtree(testdir_base)
def test_get_logger(self):
sio = StringIO()
logger = logging.getLogger('server')
logger.addHandler(logging.StreamHandler(sio))
logger = utils.get_logger(None, 'server', log_route='server')
logger.warning('test1')
self.assertEqual(sio.getvalue(), 'test1\n')
logger.debug('test2')
self.assertEqual(sio.getvalue(), 'test1\n')
logger = utils.get_logger({'log_level': 'DEBUG'}, 'server',
log_route='server')
logger.debug('test3')
self.assertEqual(sio.getvalue(), 'test1\ntest3\n')
# Doesn't really test that the log facility is truly being used all the
# way to syslog; but exercises the code.
logger = utils.get_logger({'log_facility': 'LOG_LOCAL3'}, 'server',
log_route='server')
logger.warning('test4')
self.assertEqual(sio.getvalue(),
'test1\ntest3\ntest4\n')
# make sure debug doesn't log by default
logger.debug('test5')
self.assertEqual(sio.getvalue(),
'test1\ntest3\ntest4\n')
# make sure notice lvl logs by default
logger.notice('test6')
self.assertEqual(sio.getvalue(),
'test1\ntest3\ntest4\ntest6\n')
def test_get_logger_sysloghandler_plumbing(self):
orig_sysloghandler = utils.SysLogHandler
syslog_handler_args = []
def syslog_handler_catcher(*args, **kwargs):
syslog_handler_args.append((args, kwargs))
return orig_sysloghandler(*args, **kwargs)
syslog_handler_catcher.LOG_LOCAL0 = orig_sysloghandler.LOG_LOCAL0
syslog_handler_catcher.LOG_LOCAL3 = orig_sysloghandler.LOG_LOCAL3
try:
utils.SysLogHandler = syslog_handler_catcher
utils.get_logger({
'log_facility': 'LOG_LOCAL3',
}, 'server', log_route='server')
expected_args = [((), {'address': '/dev/log',
'facility': orig_sysloghandler.LOG_LOCAL3})]
if not os.path.exists('/dev/log') or \
os.path.isfile('/dev/log') or \
os.path.isdir('/dev/log'):
# Since socket on OSX is in /var/run/syslog, there will be
# a fallback to UDP.
expected_args.append(
((), {'facility': orig_sysloghandler.LOG_LOCAL3}))
self.assertEqual(expected_args, syslog_handler_args)
syslog_handler_args = []
utils.get_logger({
'log_facility': 'LOG_LOCAL3',
'log_address': '/foo/bar',
}, 'server', log_route='server')
self.assertEqual([
((), {'address': '/foo/bar',
'facility': orig_sysloghandler.LOG_LOCAL3}),
# Second call is because /foo/bar didn't exist (and wasn't a
# UNIX domain socket).
((), {'facility': orig_sysloghandler.LOG_LOCAL3})],
syslog_handler_args)
# Using UDP with default port
syslog_handler_args = []
utils.get_logger({
'log_udp_host': 'syslog.funtimes.com',
}, 'server', log_route='server')
self.assertEqual([
((), {'address': ('syslog.funtimes.com',
logging.handlers.SYSLOG_UDP_PORT),
'facility': orig_sysloghandler.LOG_LOCAL0})],
syslog_handler_args)
# Using UDP with non-default port
syslog_handler_args = []
utils.get_logger({
'log_udp_host': 'syslog.funtimes.com',
'log_udp_port': '2123',
}, 'server', log_route='server')
self.assertEqual([
((), {'address': ('syslog.funtimes.com', 2123),
'facility': orig_sysloghandler.LOG_LOCAL0})],
syslog_handler_args)
finally:
utils.SysLogHandler = orig_sysloghandler
@reset_logger_state
def test_clean_logger_exception(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
logger.logger.addHandler(handler)
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
def log_exception(exc):
try:
raise exc
except (Exception, Timeout):
logger.exception('blah')
try:
# establish base case
self.assertEqual(strip_value(sio), '')
logger.info('test')
self.assertEqual(strip_value(sio), 'test\n')
self.assertEqual(strip_value(sio), '')
logger.info('test')
logger.info('test')
self.assertEqual(strip_value(sio), 'test\ntest\n')
self.assertEqual(strip_value(sio), '')
# test OSError
for en in (errno.EIO, errno.ENOSPC):
log_exception(OSError(en, 'my %s error message' % en))
log_msg = strip_value(sio)
self.assertTrue('Traceback' not in log_msg)
self.assertTrue('my %s error message' % en in log_msg)
# unfiltered
log_exception(OSError())
self.assertTrue('Traceback' in strip_value(sio))
# test socket.error
log_exception(socket.error(errno.ECONNREFUSED,
'my error message'))
log_msg = strip_value(sio)
self.assertTrue('Traceback' not in log_msg)
self.assertTrue('errno.ECONNREFUSED message test' not in log_msg)
self.assertTrue('Connection refused' in log_msg)
log_exception(socket.error(errno.EHOSTUNREACH,
'my error message'))
log_msg = strip_value(sio)
self.assertTrue('Traceback' not in log_msg)
self.assertTrue('my error message' not in log_msg)
self.assertTrue('Host unreachable' in log_msg)
log_exception(socket.error(errno.ETIMEDOUT, 'my error message'))
log_msg = strip_value(sio)
self.assertTrue('Traceback' not in log_msg)
self.assertTrue('my error message' not in log_msg)
self.assertTrue('Connection timeout' in log_msg)
# unfiltered
log_exception(socket.error(0, 'my error message'))
log_msg = strip_value(sio)
self.assertTrue('Traceback' in log_msg)
self.assertTrue('my error message' in log_msg)
# test eventlet.Timeout
connection_timeout = ConnectionTimeout(42, 'my error message')
log_exception(connection_timeout)
log_msg = strip_value(sio)
self.assertTrue('Traceback' not in log_msg)
self.assertTrue('ConnectionTimeout' in log_msg)
self.assertTrue('(42s)' in log_msg)
self.assertTrue('my error message' not in log_msg)
connection_timeout.cancel()
message_timeout = MessageTimeout(42, 'my error message')
log_exception(message_timeout)
log_msg = strip_value(sio)
self.assertTrue('Traceback' not in log_msg)
self.assertTrue('MessageTimeout' in log_msg)
self.assertTrue('(42s)' in log_msg)
self.assertTrue('my error message' in log_msg)
message_timeout.cancel()
# test unhandled
log_exception(Exception('my error message'))
log_msg = strip_value(sio)
self.assertTrue('Traceback' in log_msg)
self.assertTrue('my error message' in log_msg)
finally:
logger.logger.removeHandler(handler)
@reset_logger_state
def test_swift_log_formatter_max_line_length(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
formatter = utils.SwiftLogFormatter(max_line_length=10)
handler.setFormatter(formatter)
logger.logger.addHandler(handler)
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
try:
logger.info('12345')
self.assertEqual(strip_value(sio), '12345\n')
logger.info('1234567890')
self.assertEqual(strip_value(sio), '1234567890\n')
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '12 ... de\n')
formatter.max_line_length = 11
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '123 ... cde\n')
formatter.max_line_length = 0
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1234567890abcde\n')
formatter.max_line_length = 1
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1\n')
formatter.max_line_length = 2
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '12\n')
formatter.max_line_length = 3
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '123\n')
formatter.max_line_length = 4
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1234\n')
formatter.max_line_length = 5
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '12345\n')
formatter.max_line_length = 6
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '123456\n')
formatter.max_line_length = 7
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1 ... e\n')
formatter.max_line_length = -10
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1234567890abcde\n')
finally:
logger.logger.removeHandler(handler)
@reset_logger_state
def test_swift_log_formatter(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
handler.setFormatter(utils.SwiftLogFormatter())
logger.logger.addHandler(handler)
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
try:
self.assertFalse(logger.txn_id)
logger.error('my error message')
log_msg = strip_value(sio)
self.assertTrue('my error message' in log_msg)
self.assertTrue('txn' not in log_msg)
logger.txn_id = '12345'
logger.error('test')
log_msg = strip_value(sio)
self.assertTrue('txn' in log_msg)
self.assertTrue('12345' in log_msg)
# test txn in info message
self.assertEqual(logger.txn_id, '12345')
logger.info('test')
log_msg = strip_value(sio)
self.assertTrue('txn' in log_msg)
self.assertTrue('12345' in log_msg)
# test txn already in message
self.assertEqual(logger.txn_id, '12345')
logger.warning('test 12345 test')
self.assertEqual(strip_value(sio), 'test 12345 test\n')
# Test multi line collapsing
logger.error('my\nerror\nmessage')
log_msg = strip_value(sio)
self.assertTrue('my#012error#012message' in log_msg)
# test client_ip
self.assertFalse(logger.client_ip)
logger.error('my error message')
log_msg = strip_value(sio)
self.assertTrue('my error message' in log_msg)
self.assertTrue('client_ip' not in log_msg)
logger.client_ip = '1.2.3.4'
logger.error('test')
log_msg = strip_value(sio)
self.assertTrue('client_ip' in log_msg)
self.assertTrue('1.2.3.4' in log_msg)
# test no client_ip on info message
self.assertEqual(logger.client_ip, '1.2.3.4')
logger.info('test')
log_msg = strip_value(sio)
self.assertTrue('client_ip' not in log_msg)
self.assertTrue('1.2.3.4' not in log_msg)
# test client_ip (and txn) already in message
self.assertEqual(logger.client_ip, '1.2.3.4')
logger.warning('test 1.2.3.4 test 12345')
self.assertEqual(strip_value(sio), 'test 1.2.3.4 test 12345\n')
finally:
logger.logger.removeHandler(handler)
def test_storage_directory(self):
self.assertEqual(utils.storage_directory('objects', '1', 'ABCDEF'),
'objects/1/DEF/ABCDEF')
def test_is_valid_ip(self):
self.assertTrue(is_valid_ip("127.0.0.1"))
self.assertTrue(is_valid_ip("10.0.0.1"))
ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80::204:61ff:fe9d:f156"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80::204:61ff:254.157.241.86"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80::"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "::1"
self.assertTrue(is_valid_ip(ipv6))
not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a"
self.assertFalse(is_valid_ip(not_ipv6))
not_ipv6 = "1:2:3:4:5:6::7:8"
self.assertFalse(is_valid_ip(not_ipv6))
def test_is_valid_ipv4(self):
self.assertTrue(is_valid_ipv4("127.0.0.1"))
self.assertTrue(is_valid_ipv4("10.0.0.1"))
ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80::204:61ff:fe9d:f156"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80::204:61ff:254.157.241.86"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80::"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "::1"
self.assertFalse(is_valid_ipv4(ipv6))
not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a"
self.assertFalse(is_valid_ipv4(not_ipv6))
not_ipv6 = "1:2:3:4:5:6::7:8"
self.assertFalse(is_valid_ipv4(not_ipv6))
def test_is_valid_ipv6(self):
self.assertFalse(is_valid_ipv6("127.0.0.1"))
self.assertFalse(is_valid_ipv6("10.0.0.1"))
ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80::204:61ff:fe9d:f156"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80::204:61ff:254.157.241.86"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80::"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "::1"
self.assertTrue(is_valid_ipv6(ipv6))
not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a"
self.assertFalse(is_valid_ipv6(not_ipv6))
not_ipv6 = "1:2:3:4:5:6::7:8"
self.assertFalse(is_valid_ipv6(not_ipv6))
def test_expand_ipv6(self):
expanded_ipv6 = "fe80::204:61ff:fe9d:f156"
upper_ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertEqual(expanded_ipv6, utils.expand_ipv6(upper_ipv6))
omit_ipv6 = "fe80:0000:0000::0204:61ff:fe9d:f156"
self.assertEqual(expanded_ipv6, utils.expand_ipv6(omit_ipv6))
less_num_ipv6 = "fe80:0:00:000:0204:61ff:fe9d:f156"
self.assertEqual(expanded_ipv6, utils.expand_ipv6(less_num_ipv6))
def test_whataremyips(self):
myips = utils.whataremyips()
self.assertTrue(len(myips) > 1)
self.assertTrue('127.0.0.1' in myips)
def test_whataremyips_bind_to_all(self):
for any_addr in ('0.0.0.0', '0000:0000:0000:0000:0000:0000:0000:0000',
'::0', '::0000', '::',
# Wacky parse-error input produces all IPs
'I am a bear'):
myips = utils.whataremyips(any_addr)
self.assertTrue(len(myips) > 1)
self.assertTrue('127.0.0.1' in myips)
def test_whataremyips_bind_ip_specific(self):
self.assertEqual(['1.2.3.4'], utils.whataremyips('1.2.3.4'))
def test_whataremyips_error(self):
def my_interfaces():
return ['eth0']
def my_ifaddress_error(interface):
raise ValueError
with patch('netifaces.interfaces', my_interfaces), \
patch('netifaces.ifaddresses', my_ifaddress_error):
self.assertEqual(utils.whataremyips(), [])
def test_whataremyips_ipv6(self):
test_ipv6_address = '2001:6b0:dead:beef:2::32'
test_interface = 'eth0'
def my_ipv6_interfaces():
return ['eth0']
def my_ipv6_ifaddresses(interface):
return {AF_INET6:
[{'netmask': 'ffff:ffff:ffff:ffff::',
'addr': '%s%%%s' % (test_ipv6_address, test_interface)}]}
with patch('netifaces.interfaces', my_ipv6_interfaces), \
patch('netifaces.ifaddresses', my_ipv6_ifaddresses):
myips = utils.whataremyips()
self.assertEqual(len(myips), 1)
self.assertEqual(myips[0], test_ipv6_address)
def test_hash_path(self):
# Yes, these tests are deliberately very fragile. We want to make sure
# that if someones changes the results hash_path produces, they know it
with mock.patch('swift.common.utils.HASH_PATH_PREFIX', ''):
self.assertEqual(utils.hash_path('a'),
'1c84525acb02107ea475dcd3d09c2c58')
self.assertEqual(utils.hash_path('a', 'c'),
'33379ecb053aa5c9e356c68997cbb59e')
self.assertEqual(utils.hash_path('a', 'c', 'o'),
'06fbf0b514e5199dfc4e00f42eb5ea83')
self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=False),
'06fbf0b514e5199dfc4e00f42eb5ea83')
self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=True),
'\x06\xfb\xf0\xb5\x14\xe5\x19\x9d\xfcN'
'\x00\xf4.\xb5\xea\x83')
self.assertRaises(ValueError, utils.hash_path, 'a', object='o')
utils.HASH_PATH_PREFIX = 'abcdef'
self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=False),
'363f9b535bfb7d17a43a46a358afca0e')
def test_validate_hash_conf(self):
# no section causes InvalidHashPathConfigError
self._test_validate_hash_conf([], [], True)
# 'swift-hash' section is there but no options causes
# InvalidHashPathConfigError
self._test_validate_hash_conf(['swift-hash'], [], True)
# if we have the section and either of prefix or suffix,
# InvalidHashPathConfigError doesn't occur
self._test_validate_hash_conf(
['swift-hash'], ['swift_hash_path_prefix'], False)
self._test_validate_hash_conf(
['swift-hash'], ['swift_hash_path_suffix'], False)
# definitely, we have the section and both of them,
# InvalidHashPathConfigError doesn't occur
self._test_validate_hash_conf(
['swift-hash'],
['swift_hash_path_suffix', 'swift_hash_path_prefix'], False)
# But invalid section name should make an error even if valid
# options are there
self._test_validate_hash_conf(
['swift-hash-xxx'],
['swift_hash_path_suffix', 'swift_hash_path_prefix'], True)
def _test_validate_hash_conf(self, sections, options, should_raise_error):
class FakeConfigParser(object):
def read(self, conf_path):
return True
def get(self, section, option):
if section not in sections:
raise NoSectionError('section error')
elif option not in options:
raise NoOptionError('option error', 'this option')
else:
return 'some_option_value'
with mock.patch('swift.common.utils.HASH_PATH_PREFIX', ''), \
mock.patch('swift.common.utils.HASH_PATH_SUFFIX', ''), \
mock.patch('swift.common.utils.ConfigParser',
FakeConfigParser):
try:
utils.validate_hash_conf()
except utils.InvalidHashPathConfigError:
if not should_raise_error:
self.fail('validate_hash_conf should not raise an error')
else:
if should_raise_error:
self.fail('validate_hash_conf should raise an error')
def test_load_libc_function(self):
self.assertTrue(callable(
utils.load_libc_function('printf')))
self.assertTrue(callable(
utils.load_libc_function('some_not_real_function')))
self.assertRaises(AttributeError,
utils.load_libc_function, 'some_not_real_function',
fail_if_missing=True)
def test_readconf(self):
conf = '''[section1]
foo = bar
[section2]
log_name = yarr'''
# setup a real file
fd, temppath = tempfile.mkstemp(dir='/tmp')
with os.fdopen(fd, 'wb') as f:
f.write(conf)
make_filename = lambda: temppath
# setup a file stream
make_fp = lambda: StringIO(conf)
for conf_object_maker in (make_filename, make_fp):
conffile = conf_object_maker()
result = utils.readconf(conffile)
expected = {'__file__': conffile,
'log_name': None,
'section1': {'foo': 'bar'},
'section2': {'log_name': 'yarr'}}
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1')
expected = {'__file__': conffile, 'log_name': 'section1',
'foo': 'bar'}
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile,
'section2').get('log_name')
expected = 'yarr'
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1',
log_name='foo').get('log_name')
expected = 'foo'
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1',
defaults={'bar': 'baz'})
expected = {'__file__': conffile, 'log_name': 'section1',
'foo': 'bar', 'bar': 'baz'}
self.assertEqual(result, expected)
self.assertRaises(SystemExit, utils.readconf, temppath, 'section3')
os.unlink(temppath)
self.assertRaises(SystemExit, utils.readconf, temppath)
def test_readconf_raw(self):
conf = '''[section1]
foo = bar
[section2]
log_name = %(yarr)s'''
# setup a real file
fd, temppath = tempfile.mkstemp(dir='/tmp')
with os.fdopen(fd, 'wb') as f:
f.write(conf)
make_filename = lambda: temppath
# setup a file stream
make_fp = lambda: StringIO(conf)
for conf_object_maker in (make_filename, make_fp):
conffile = conf_object_maker()
result = utils.readconf(conffile, raw=True)
expected = {'__file__': conffile,
'log_name': None,
'section1': {'foo': 'bar'},
'section2': {'log_name': '%(yarr)s'}}
self.assertEqual(result, expected)
os.unlink(temppath)
self.assertRaises(SystemExit, utils.readconf, temppath)
def test_readconf_dir(self):
config_dir = {
'server.conf.d/01.conf': """
[DEFAULT]
port = 8080
foo = bar
[section1]
name=section1
""",
'server.conf.d/section2.conf': """
[DEFAULT]
port = 8081
bar = baz
[section2]
name=section2
""",
'other-server.conf.d/01.conf': """
[DEFAULT]
port = 8082
[section3]
name=section3
"""
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as path:
conf_dir = os.path.join(path, 'server.conf.d')
conf = utils.readconf(conf_dir)
expected = {
'__file__': os.path.join(path, 'server.conf.d'),
'log_name': None,
'section1': {
'port': '8081',
'foo': 'bar',
'bar': 'baz',
'name': 'section1',
},
'section2': {
'port': '8081',
'foo': 'bar',
'bar': 'baz',
'name': 'section2',
},
}
self.assertEqual(conf, expected)
def test_readconf_dir_ignores_hidden_and_nondotconf_files(self):
config_dir = {
'server.conf.d/01.conf': """
[section1]
port = 8080
""",
'server.conf.d/.01.conf.swp': """
[section]
port = 8081
""",
'server.conf.d/01.conf-bak': """
[section]
port = 8082
""",
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as path:
conf_dir = os.path.join(path, 'server.conf.d')
conf = utils.readconf(conf_dir)
expected = {
'__file__': os.path.join(path, 'server.conf.d'),
'log_name': None,
'section1': {
'port': '8080',
},
}
self.assertEqual(conf, expected)
def test_drop_privileges(self):
user = getuser()
# over-ride os with mock
required_func_calls = ('setgroups', 'setgid', 'setuid', 'setsid',
'chdir', 'umask')
utils.os = MockOs(called_funcs=required_func_calls)
# exercise the code
utils.drop_privileges(user)
for func in required_func_calls:
self.assertTrue(utils.os.called_funcs[func])
import pwd
self.assertEqual(pwd.getpwnam(user)[5], utils.os.environ['HOME'])
groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
groups.append(pwd.getpwnam(user).pw_gid)
self.assertEqual(set(groups), set(os.getgroups()))
# reset; test same args, OSError trying to get session leader
utils.os = MockOs(called_funcs=required_func_calls,
raise_funcs=('setsid',))
for func in required_func_calls:
self.assertFalse(utils.os.called_funcs.get(func, False))
utils.drop_privileges(user)
for func in required_func_calls:
self.assertTrue(utils.os.called_funcs[func])
def test_drop_privileges_no_call_setsid(self):
user = getuser()
# over-ride os with mock
required_func_calls = ('setgroups', 'setgid', 'setuid', 'chdir',
'umask')
bad_func_calls = ('setsid',)
utils.os = MockOs(called_funcs=required_func_calls,
raise_funcs=bad_func_calls)
# exercise the code
utils.drop_privileges(user, call_setsid=False)
for func in required_func_calls:
self.assertTrue(utils.os.called_funcs[func])
for func in bad_func_calls:
self.assertTrue(func not in utils.os.called_funcs)
@reset_logger_state
def test_capture_stdio(self):
# stubs
logger = utils.get_logger(None, 'dummy')
# mock utils system modules
_orig_sys = utils.sys
_orig_os = utils.os
try:
utils.sys = MockSys()
utils.os = MockOs()
# basic test
utils.capture_stdio(logger)
self.assertTrue(utils.sys.excepthook is not None)
self.assertEqual(utils.os.closed_fds, utils.sys.stdio_fds)
self.assertTrue(
isinstance(utils.sys.stdout, utils.LoggerFileObject))
self.assertTrue(
isinstance(utils.sys.stderr, utils.LoggerFileObject))
# reset; test same args, but exc when trying to close stdio
utils.os = MockOs(raise_funcs=('dup2',))
utils.sys = MockSys()
# test unable to close stdio
utils.capture_stdio(logger)
self.assertTrue(utils.sys.excepthook is not None)
self.assertEqual(utils.os.closed_fds, [])
self.assertTrue(
isinstance(utils.sys.stdout, utils.LoggerFileObject))
self.assertTrue(
isinstance(utils.sys.stderr, utils.LoggerFileObject))
# reset; test some other args
utils.os = MockOs()
utils.sys = MockSys()
logger = utils.get_logger(None, log_to_console=True)
# test console log
utils.capture_stdio(logger, capture_stdout=False,
capture_stderr=False)
self.assertTrue(utils.sys.excepthook is not None)
# when logging to console, stderr remains open
self.assertEqual(utils.os.closed_fds, utils.sys.stdio_fds[:2])
reset_loggers()
# stdio not captured
self.assertFalse(isinstance(utils.sys.stdout,
utils.LoggerFileObject))
self.assertFalse(isinstance(utils.sys.stderr,
utils.LoggerFileObject))
finally:
utils.sys = _orig_sys
utils.os = _orig_os
@reset_logger_state
def test_get_logger_console(self):
logger = utils.get_logger(None)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertFalse(console_handlers)
logger = utils.get_logger(None, log_to_console=True)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertTrue(console_handlers)
# make sure you can't have two console handlers
self.assertEqual(len(console_handlers), 1)
old_handler = console_handlers[0]
logger = utils.get_logger(None, log_to_console=True)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertEqual(len(console_handlers), 1)
new_handler = console_handlers[0]
self.assertNotEqual(new_handler, old_handler)
def verify_under_pseudo_time(
self, func, target_runtime_ms=1, *args, **kwargs):
curr_time = [42.0]
def my_time():
curr_time[0] += 0.001
return curr_time[0]
def my_sleep(duration):
curr_time[0] += 0.001
curr_time[0] += duration
with patch('time.time', my_time), \
patch('time.sleep', my_sleep), \
patch('eventlet.sleep', my_sleep):
start = time.time()
func(*args, **kwargs)
# make sure it's accurate to 10th of a second, converting the time
# difference to milliseconds, 100 milliseconds is 1/10 of a second
diff_from_target_ms = abs(
target_runtime_ms - ((time.time() - start) * 1000))
self.assertTrue(diff_from_target_ms < 100,
"Expected %d < 100" % diff_from_target_ms)
def test_ratelimit_sleep(self):
def testfunc():
running_time = 0
for i in range(100):
running_time = utils.ratelimit_sleep(running_time, -5)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=1)
def testfunc():
running_time = 0
for i in range(100):
running_time = utils.ratelimit_sleep(running_time, 0)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=1)
def testfunc():
running_time = 0
for i in range(50):
running_time = utils.ratelimit_sleep(running_time, 200)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=250)
def test_ratelimit_sleep_with_incr(self):
def testfunc():
running_time = 0
vals = [5, 17, 0, 3, 11, 30,
40, 4, 13, 2, -1] * 2 # adds up to 248
total = 0
for i in vals:
running_time = utils.ratelimit_sleep(running_time,
500, incr_by=i)
total += i
self.assertEqual(248, total)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=500)
def test_ratelimit_sleep_with_sleep(self):
def testfunc():
running_time = 0
sleeps = [0] * 7 + [.2] * 3 + [0] * 30
for i in sleeps:
running_time = utils.ratelimit_sleep(running_time, 40,
rate_buffer=1)
time.sleep(i)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=900)
def test_urlparse(self):
parsed = utils.urlparse('http://127.0.0.1/')
self.assertEqual(parsed.scheme, 'http')
self.assertEqual(parsed.hostname, '127.0.0.1')
self.assertEqual(parsed.path, '/')
parsed = utils.urlparse('http://127.0.0.1:8080/')
self.assertEqual(parsed.port, 8080)
parsed = utils.urlparse('https://127.0.0.1/')
self.assertEqual(parsed.scheme, 'https')
parsed = utils.urlparse('http://[::1]/')
self.assertEqual(parsed.hostname, '::1')
parsed = utils.urlparse('http://[::1]:8080/')
self.assertEqual(parsed.hostname, '::1')
self.assertEqual(parsed.port, 8080)
parsed = utils.urlparse('www.example.com')
self.assertEqual(parsed.hostname, '')
def test_search_tree(self):
# file match & ext miss
with temptree(['asdf.conf', 'blarg.conf', 'asdf.cfg']) as t:
asdf = utils.search_tree(t, 'a*', '.conf')
self.assertEqual(len(asdf), 1)
self.assertEqual(asdf[0],
os.path.join(t, 'asdf.conf'))
# multi-file match & glob miss & sort
with temptree(['application.bin', 'apple.bin', 'apropos.bin']) as t:
app_bins = utils.search_tree(t, 'app*', 'bin')
self.assertEqual(len(app_bins), 2)
self.assertEqual(app_bins[0],
os.path.join(t, 'apple.bin'))
self.assertEqual(app_bins[1],
os.path.join(t, 'application.bin'))
# test file in folder & ext miss & glob miss
files = (
'sub/file1.ini',
'sub/file2.conf',
'sub.bin',
'bus.ini',
'bus/file3.ini',
)
with temptree(files) as t:
sub_ini = utils.search_tree(t, 'sub*', '.ini')
self.assertEqual(len(sub_ini), 1)
self.assertEqual(sub_ini[0],
os.path.join(t, 'sub/file1.ini'))
# test multi-file in folder & sub-folder & ext miss & glob miss
files = (
'folder_file.txt',
'folder/1.txt',
'folder/sub/2.txt',
'folder2/3.txt',
'Folder3/4.txt'
'folder.rc',
)
with temptree(files) as t:
folder_texts = utils.search_tree(t, 'folder*', '.txt')
self.assertEqual(len(folder_texts), 4)
f1 = os.path.join(t, 'folder_file.txt')
f2 = os.path.join(t, 'folder/1.txt')
f3 = os.path.join(t, 'folder/sub/2.txt')
f4 = os.path.join(t, 'folder2/3.txt')
for f in [f1, f2, f3, f4]:
self.assertTrue(f in folder_texts)
def test_search_tree_with_directory_ext_match(self):
files = (
'object-server/object-server.conf-base',
'object-server/1.conf.d/base.conf',
'object-server/1.conf.d/1.conf',
'object-server/2.conf.d/base.conf',
'object-server/2.conf.d/2.conf',
'object-server/3.conf.d/base.conf',
'object-server/3.conf.d/3.conf',
'object-server/4.conf.d/base.conf',
'object-server/4.conf.d/4.conf',
)
with temptree(files) as t:
conf_dirs = utils.search_tree(t, 'object-server', '.conf',
dir_ext='conf.d')
self.assertEqual(len(conf_dirs), 4)
for i in range(4):
conf_dir = os.path.join(t, 'object-server/%d.conf.d' % (i + 1))
self.assertTrue(conf_dir in conf_dirs)
def test_search_tree_conf_dir_with_named_conf_match(self):
files = (
'proxy-server/proxy-server.conf.d/base.conf',
'proxy-server/proxy-server.conf.d/pipeline.conf',
'proxy-server/proxy-noauth.conf.d/base.conf',
'proxy-server/proxy-noauth.conf.d/pipeline.conf',
)
with temptree(files) as t:
conf_dirs = utils.search_tree(t, 'proxy-server', 'noauth.conf',
dir_ext='noauth.conf.d')
self.assertEqual(len(conf_dirs), 1)
conf_dir = conf_dirs[0]
expected = os.path.join(t, 'proxy-server/proxy-noauth.conf.d')
self.assertEqual(conf_dir, expected)
def test_search_tree_conf_dir_pid_with_named_conf_match(self):
files = (
'proxy-server/proxy-server.pid.d',
'proxy-server/proxy-noauth.pid.d',
)
with temptree(files) as t:
pid_files = utils.search_tree(t, 'proxy-server',
exts=['noauth.pid', 'noauth.pid.d'])
self.assertEqual(len(pid_files), 1)
pid_file = pid_files[0]
expected = os.path.join(t, 'proxy-server/proxy-noauth.pid.d')
self.assertEqual(pid_file, expected)
def test_write_file(self):
with temptree([]) as t:
file_name = os.path.join(t, 'test')
utils.write_file(file_name, 'test')
with open(file_name, 'r') as f:
contents = f.read()
self.assertEqual(contents, 'test')
# and also subdirs
file_name = os.path.join(t, 'subdir/test2')
utils.write_file(file_name, 'test2')
with open(file_name, 'r') as f:
contents = f.read()
self.assertEqual(contents, 'test2')
# but can't over-write files
file_name = os.path.join(t, 'subdir/test2/test3')
self.assertRaises(IOError, utils.write_file, file_name,
'test3')
def test_remove_file(self):
with temptree([]) as t:
file_name = os.path.join(t, 'blah.pid')
# assert no raise
self.assertEqual(os.path.exists(file_name), False)
self.assertEqual(utils.remove_file(file_name), None)
with open(file_name, 'w') as f:
f.write('1')
self.assertTrue(os.path.exists(file_name))
self.assertEqual(utils.remove_file(file_name), None)
self.assertFalse(os.path.exists(file_name))
def test_human_readable(self):
self.assertEqual(utils.human_readable(0), '0')
self.assertEqual(utils.human_readable(1), '1')
self.assertEqual(utils.human_readable(10), '10')
self.assertEqual(utils.human_readable(100), '100')
self.assertEqual(utils.human_readable(999), '999')
self.assertEqual(utils.human_readable(1024), '1Ki')
self.assertEqual(utils.human_readable(1535), '1Ki')
self.assertEqual(utils.human_readable(1536), '2Ki')
self.assertEqual(utils.human_readable(1047552), '1023Ki')
self.assertEqual(utils.human_readable(1048063), '1023Ki')
self.assertEqual(utils.human_readable(1048064), '1Mi')
self.assertEqual(utils.human_readable(1048576), '1Mi')
self.assertEqual(utils.human_readable(1073741824), '1Gi')
self.assertEqual(utils.human_readable(1099511627776), '1Ti')
self.assertEqual(utils.human_readable(1125899906842624), '1Pi')
self.assertEqual(utils.human_readable(1152921504606846976), '1Ei')
self.assertEqual(utils.human_readable(1180591620717411303424), '1Zi')
self.assertEqual(utils.human_readable(1208925819614629174706176),
'1Yi')
self.assertEqual(utils.human_readable(1237940039285380274899124224),
'1024Yi')
def test_validate_sync_to(self):
fname = 'container-sync-realms.conf'
fcontents = '''
[US]
key = 9ff3b71c849749dbaec4ccdd3cbab62b
cluster_dfw1 = http://dfw1.host/v1/
'''
with temptree([fname], [fcontents]) as tempdir:
logger = FakeLogger()
fpath = os.path.join(tempdir, fname)
csr = ContainerSyncRealms(fpath, logger)
for realms_conf in (None, csr):
for goodurl, result in (
('http://1.1.1.1/v1/a/c',
(None, 'http://1.1.1.1/v1/a/c', None, None)),
('http://1.1.1.1:8080/a/c',
(None, 'http://1.1.1.1:8080/a/c', None, None)),
('http://2.2.2.2/a/c',
(None, 'http://2.2.2.2/a/c', None, None)),
('https://1.1.1.1/v1/a/c',
(None, 'https://1.1.1.1/v1/a/c', None, None)),
('//US/DFW1/a/c',
(None, 'http://dfw1.host/v1/a/c', 'US',
'9ff3b71c849749dbaec4ccdd3cbab62b')),
('//us/DFW1/a/c',
(None, 'http://dfw1.host/v1/a/c', 'US',
'9ff3b71c849749dbaec4ccdd3cbab62b')),
('//us/dfw1/a/c',
(None, 'http://dfw1.host/v1/a/c', 'US',
'9ff3b71c849749dbaec4ccdd3cbab62b')),
('//',
(None, None, None, None)),
('',
(None, None, None, None))):
if goodurl.startswith('//') and not realms_conf:
self.assertEqual(
utils.validate_sync_to(
goodurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
(None, None, None, None))
else:
self.assertEqual(
utils.validate_sync_to(
goodurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
result)
for badurl, result in (
('http://1.1.1.1',
('Path required in X-Container-Sync-To', None, None,
None)),
('httpq://1.1.1.1/v1/a/c',
('Invalid scheme \'httpq\' in X-Container-Sync-To, '
'must be "//", "http", or "https".', None, None,
None)),
('http://1.1.1.1/v1/a/c?query',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c#frag',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c?query#frag',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c?query=param',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c?query=param#frag',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.2/v1/a/c',
("Invalid host '1.1.1.2' in X-Container-Sync-To",
None, None, None)),
('//us/invalid/a/c',
("No cluster endpoint for 'us' 'invalid'", None,
None, None)),
('//invalid/dfw1/a/c',
("No realm key for 'invalid'", None, None, None)),
('//us/invalid1/a/',
("Invalid X-Container-Sync-To format "
"'//us/invalid1/a/'", None, None, None)),
('//us/invalid1/a',
("Invalid X-Container-Sync-To format "
"'//us/invalid1/a'", None, None, None)),
('//us/invalid1/',
("Invalid X-Container-Sync-To format "
"'//us/invalid1/'", None, None, None)),
('//us/invalid1',
("Invalid X-Container-Sync-To format "
"'//us/invalid1'", None, None, None)),
('//us/',
("Invalid X-Container-Sync-To format "
"'//us/'", None, None, None)),
('//us',
("Invalid X-Container-Sync-To format "
"'//us'", None, None, None))):
if badurl.startswith('//') and not realms_conf:
self.assertEqual(
utils.validate_sync_to(
badurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
(None, None, None, None))
else:
self.assertEqual(
utils.validate_sync_to(
badurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
result)
def test_TRUE_VALUES(self):
for v in utils.TRUE_VALUES:
self.assertEqual(v, v.lower())
def test_config_true_value(self):
orig_trues = utils.TRUE_VALUES
try:
utils.TRUE_VALUES = 'hello world'.split()
for val in 'hello world HELLO WORLD'.split():
self.assertTrue(utils.config_true_value(val) is True)
self.assertTrue(utils.config_true_value(True) is True)
self.assertTrue(utils.config_true_value('foo') is False)
self.assertTrue(utils.config_true_value(False) is False)
finally:
utils.TRUE_VALUES = orig_trues
def test_config_auto_int_value(self):
expectations = {
# (value, default) : expected,
('1', 0): 1,
(1, 0): 1,
('asdf', 0): ValueError,
('auto', 1): 1,
('AutO', 1): 1,
('Aut0', 1): ValueError,
(None, 1): 1,
}
for (value, default), expected in expectations.items():
try:
rv = utils.config_auto_int_value(value, default)
except Exception as e:
if e.__class__ is not expected:
raise
else:
self.assertEqual(expected, rv)
def test_streq_const_time(self):
self.assertTrue(utils.streq_const_time('abc123', 'abc123'))
self.assertFalse(utils.streq_const_time('a', 'aaaaa'))
self.assertFalse(utils.streq_const_time('ABC123', 'abc123'))
def test_quorum_size(self):
expected_sizes = {1: 1,
2: 1,
3: 2,
4: 2,
5: 3}
got_sizes = dict([(n, utils.quorum_size(n))
for n in expected_sizes])
self.assertEqual(expected_sizes, got_sizes)
def test_majority_size(self):
expected_sizes = {1: 1,
2: 2,
3: 2,
4: 3,
5: 3}
got_sizes = dict([(n, utils.majority_size(n))
for n in expected_sizes])
self.assertEqual(expected_sizes, got_sizes)
def test_rsync_ip_ipv4_localhost(self):
self.assertEqual(utils.rsync_ip('127.0.0.1'), '127.0.0.1')
def test_rsync_ip_ipv6_random_ip(self):
self.assertEqual(
utils.rsync_ip('fe80:0000:0000:0000:0202:b3ff:fe1e:8329'),
'[fe80:0000:0000:0000:0202:b3ff:fe1e:8329]')
def test_rsync_ip_ipv6_ipv4_compatible(self):
self.assertEqual(
utils.rsync_ip('::ffff:192.0.2.128'), '[::ffff:192.0.2.128]')
def test_rsync_module_interpolation(self):
fake_device = {'ip': '127.0.0.1', 'port': 11,
'replication_ip': '127.0.0.2', 'replication_port': 12,
'region': '1', 'zone': '2', 'device': 'sda1',
'meta': 'just_a_string'}
self.assertEqual(
utils.rsync_module_interpolation('{ip}', fake_device),
'127.0.0.1')
self.assertEqual(
utils.rsync_module_interpolation('{port}', fake_device),
'11')
self.assertEqual(
utils.rsync_module_interpolation('{replication_ip}', fake_device),
'127.0.0.2')
self.assertEqual(
utils.rsync_module_interpolation('{replication_port}',
fake_device),
'12')
self.assertEqual(
utils.rsync_module_interpolation('{region}', fake_device),
'1')
self.assertEqual(
utils.rsync_module_interpolation('{zone}', fake_device),
'2')
self.assertEqual(
utils.rsync_module_interpolation('{device}', fake_device),
'sda1')
self.assertEqual(
utils.rsync_module_interpolation('{meta}', fake_device),
'just_a_string')
self.assertEqual(
utils.rsync_module_interpolation('{replication_ip}::object',
fake_device),
'127.0.0.2::object')
self.assertEqual(
utils.rsync_module_interpolation('{ip}::container{port}',
fake_device),
'127.0.0.1::container11')
self.assertEqual(
utils.rsync_module_interpolation(
'{replication_ip}::object_{device}', fake_device),
'127.0.0.2::object_sda1')
self.assertEqual(
utils.rsync_module_interpolation(
'127.0.0.3::object_{replication_port}', fake_device),
'127.0.0.3::object_12')
self.assertRaises(ValueError, utils.rsync_module_interpolation,
'{replication_ip}::object_{deivce}', fake_device)
def test_fallocate_reserve(self):
class StatVFS(object):
f_frsize = 1024
f_bavail = 1
f_blocks = 100
def fstatvfs(fd):
return StatVFS()
orig_FALLOCATE_RESERVE = utils.FALLOCATE_RESERVE
orig_fstatvfs = utils.os.fstatvfs
try:
fallocate = utils.FallocateWrapper(noop=True)
utils.os.fstatvfs = fstatvfs
# Make sure setting noop, which disables fallocate, also stops the
# fallocate_reserve check.
# Set the fallocate_reserve to 99% and request an object that is
# about 50% the size. With fallocate_reserve off this will succeed.
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('99%')
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(500)), 0)
# Setting noop to False after the constructor allows us to use
# a noop fallocate syscall and still test fallocate_reserve.
fallocate.noop = False
# Want 1023 reserved, have 1024 * 1 free, so succeeds
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1023')
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0)
# Want 1023 reserved, have 512 * 2 free, so succeeds
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1023')
StatVFS.f_frsize = 512
StatVFS.f_bavail = 2
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0)
# Want 1024 reserved, have 1024 * 1 free, so fails
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1024')
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(0))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 1024 <= 1024'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
# Want 1024 reserved, have 512 * 2 free, so fails
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1024')
StatVFS.f_frsize = 512
StatVFS.f_bavail = 2
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(0))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 1024 <= 1024'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
# Want 2048 reserved, have 1024 * 1 free, so fails
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('2048')
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(0))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 1024 <= 2048'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
# Want 2048 reserved, have 512 * 2 free, so fails
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('2048')
StatVFS.f_frsize = 512
StatVFS.f_bavail = 2
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(0))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 1024 <= 2048'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
# Want 1023 reserved, have 1024 * 1 free, but file size is 1, so
# fails
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1023')
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(1))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 1023 <= 1023'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
# Want 1022 reserved, have 1024 * 1 free, and file size is 1, so
# succeeds
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1022')
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(1)), 0)
# Want 1% reserved, have 100 bytes * 2/100 free, and file size is
# 99, so succeeds
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1%')
StatVFS.f_frsize = 100
StatVFS.f_bavail = 2
StatVFS.f_blocks = 100
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(99)), 0)
# Want 2% reserved, have 50 bytes * 2/50 free, and file size is 49,
# so succeeds
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('2%')
StatVFS.f_frsize = 50
StatVFS.f_bavail = 2
StatVFS.f_blocks = 50
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(49)), 0)
# Want 100% reserved, have 100 * 100/100 free, and file size is 0,
# so fails.
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('100%')
StatVFS.f_frsize = 100
StatVFS.f_bavail = 100
StatVFS.f_blocks = 100
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(0))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 100.0 <= 100.0'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
# Want 1% reserved, have 100 * 2/100 free, and file size is 101,
# so fails.
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('1%')
StatVFS.f_frsize = 100
StatVFS.f_bavail = 2
StatVFS.f_blocks = 100
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(101))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 0.99 <= 1.0'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
# is 100, so fails
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('98%')
StatVFS.f_frsize = 100
StatVFS.f_bavail = 99
StatVFS.f_blocks = 100
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(100))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 98.0 <= 98.0'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
# Want 2% reserved, have 1000 bytes * 21/1000 free, and file size
# is 999, so succeeds.
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('2%')
StatVFS.f_frsize = 1000
StatVFS.f_bavail = 21
StatVFS.f_blocks = 1000
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(999)), 0)
# Want 2% resereved, have 1000 bytes * 21/1000 free, and file size
# is 1000, so fails.
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value('2%')
StatVFS.f_frsize = 1000
StatVFS.f_bavail = 21
StatVFS.f_blocks = 1000
with self.assertRaises(OSError) as catcher:
fallocate(0, 1, 0, ctypes.c_uint64(1000))
self.assertEqual(
str(catcher.exception),
'[Errno %d] FALLOCATE_RESERVE fail 2.0 <= 2.0'
% errno.ENOSPC)
self.assertEqual(catcher.exception.errno, errno.ENOSPC)
finally:
utils.FALLOCATE_RESERVE = orig_FALLOCATE_RESERVE
utils.os.fstatvfs = orig_fstatvfs
def test_fallocate_func(self):
class FallocateWrapper(object):
def __init__(self):
self.last_call = None
def __call__(self, *args):
self.last_call = list(args)
self.last_call[-1] = self.last_call[-1].value
return 0
orig__sys_fallocate = utils._sys_fallocate
try:
utils._sys_fallocate = FallocateWrapper()
# Ensure fallocate calls _sys_fallocate even with 0 bytes
utils._sys_fallocate.last_call = None
utils.fallocate(1234, 0)
self.assertEqual(utils._sys_fallocate.last_call,
[1234, 1, 0, 0])
# Ensure fallocate calls _sys_fallocate even with negative bytes
utils._sys_fallocate.last_call = None
utils.fallocate(1234, -5678)
self.assertEqual(utils._sys_fallocate.last_call,
[1234, 1, 0, 0])
# Ensure fallocate calls _sys_fallocate properly with positive
# bytes
utils._sys_fallocate.last_call = None
utils.fallocate(1234, 1)
self.assertEqual(utils._sys_fallocate.last_call,
[1234, 1, 0, 1])
utils._sys_fallocate.last_call = None
utils.fallocate(1234, 10 * 1024 * 1024 * 1024)
self.assertEqual(utils._sys_fallocate.last_call,
[1234, 1, 0, 10 * 1024 * 1024 * 1024])
finally:
utils._sys_fallocate = orig__sys_fallocate
def test_generate_trans_id(self):
fake_time = 1366428370.5163341
with patch.object(utils.time, 'time', return_value=fake_time):
trans_id = utils.generate_trans_id('')
self.assertEqual(len(trans_id), 34)
self.assertEqual(trans_id[:2], 'tx')
self.assertEqual(trans_id[23], '-')
self.assertEqual(int(trans_id[24:], 16), int(fake_time))
with patch.object(utils.time, 'time', return_value=fake_time):
trans_id = utils.generate_trans_id('-suffix')
self.assertEqual(len(trans_id), 41)
self.assertEqual(trans_id[:2], 'tx')
self.assertEqual(trans_id[34:], '-suffix')
self.assertEqual(trans_id[23], '-')
self.assertEqual(int(trans_id[24:34], 16), int(fake_time))
def test_get_trans_id_time(self):
ts = utils.get_trans_id_time('tx8c8bc884cdaf499bb29429aa9c46946e')
self.assertEqual(ts, None)
ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-0051720c06')
self.assertEqual(ts, 1366428678)
self.assertEqual(
time.asctime(time.gmtime(ts)) + ' UTC',
'Sat Apr 20 03:31:18 2013 UTC')
ts = utils.get_trans_id_time(
'tx1df4ff4f55ea45f7b2ec2-0051720c06-suffix')
self.assertEqual(ts, 1366428678)
self.assertEqual(
time.asctime(time.gmtime(ts)) + ' UTC',
'Sat Apr 20 03:31:18 2013 UTC')
ts = utils.get_trans_id_time('')
self.assertEqual(ts, None)
ts = utils.get_trans_id_time('garbage')
self.assertEqual(ts, None)
ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-almostright')
self.assertEqual(ts, None)
def test_config_fallocate_value(self):
fallocate_value, is_percent = utils.config_fallocate_value('10%')
self.assertEqual(fallocate_value, 10)
self.assertTrue(is_percent)
fallocate_value, is_percent = utils.config_fallocate_value('10')
self.assertEqual(fallocate_value, 10)
self.assertFalse(is_percent)
try:
fallocate_value, is_percent = utils.config_fallocate_value('ab%')
except ValueError as err:
exc = err
self.assertEqual(str(exc), 'Error: ab% is an invalid value for '
'fallocate_reserve.')
try:
fallocate_value, is_percent = utils.config_fallocate_value('ab')
except ValueError as err:
exc = err
self.assertEqual(str(exc), 'Error: ab is an invalid value for '
'fallocate_reserve.')
try:
fallocate_value, is_percent = utils.config_fallocate_value('1%%')
except ValueError as err:
exc = err
self.assertEqual(str(exc), 'Error: 1%% is an invalid value for '
'fallocate_reserve.')
try:
fallocate_value, is_percent = utils.config_fallocate_value('10.0')
except ValueError as err:
exc = err
self.assertEqual(str(exc), 'Error: 10.0 is an invalid value for '
'fallocate_reserve.')
fallocate_value, is_percent = utils.config_fallocate_value('10.5%')
self.assertEqual(fallocate_value, 10.5)
self.assertTrue(is_percent)
fallocate_value, is_percent = utils.config_fallocate_value('10.000%')
self.assertEqual(fallocate_value, 10.000)
self.assertTrue(is_percent)
def test_tpool_reraise(self):
with patch.object(utils.tpool, 'execute', lambda f: f()):
self.assertTrue(
utils.tpool_reraise(MagicMock(return_value='test1')), 'test1')
self.assertRaises(
Exception,
utils.tpool_reraise, MagicMock(side_effect=Exception('test2')))
self.assertRaises(
BaseException,
utils.tpool_reraise,
MagicMock(side_effect=BaseException('test3')))
def test_lock_file(self):
flags = os.O_CREAT | os.O_RDWR
with NamedTemporaryFile(delete=False) as nt:
nt.write("test string")
nt.flush()
nt.close()
with utils.lock_file(nt.name, unlink=False) as f:
self.assertEqual(f.read(), "test string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(IOError, fcntl.flock, fd,
fcntl.LOCK_EX | fcntl.LOCK_NB)
with utils.lock_file(nt.name, unlink=False, append=True) as f:
f.seek(0)
self.assertEqual(f.read(), "test string")
f.seek(0)
f.write("\nanother string")
f.flush()
f.seek(0)
self.assertEqual(f.read(), "test string\nanother string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(IOError, fcntl.flock, fd,
fcntl.LOCK_EX | fcntl.LOCK_NB)
with utils.lock_file(nt.name, timeout=3, unlink=False) as f:
try:
with utils.lock_file(
nt.name, timeout=1, unlink=False) as f:
self.assertTrue(
False, "Expected LockTimeout exception")
except LockTimeout:
pass
with utils.lock_file(nt.name, unlink=True) as f:
self.assertEqual(f.read(), "test string\nanother string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(
IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
self.assertRaises(OSError, os.remove, nt.name)
def test_lock_file_unlinked_after_open(self):
os_open = os.open
first_pass = [True]
def deleting_open(filename, flags):
# unlink the file after it's opened. once.
fd = os_open(filename, flags)
if first_pass[0]:
os.unlink(filename)
first_pass[0] = False
return fd
with NamedTemporaryFile(delete=False) as nt:
with mock.patch('os.open', deleting_open):
with utils.lock_file(nt.name, unlink=True) as f:
self.assertNotEqual(os.fstat(nt.fileno()).st_ino,
os.fstat(f.fileno()).st_ino)
first_pass = [True]
def recreating_open(filename, flags):
# unlink and recreate the file after it's opened
fd = os_open(filename, flags)
if first_pass[0]:
os.unlink(filename)
os.close(os_open(filename, os.O_CREAT | os.O_RDWR))
first_pass[0] = False
return fd
with NamedTemporaryFile(delete=False) as nt:
with mock.patch('os.open', recreating_open):
with utils.lock_file(nt.name, unlink=True) as f:
self.assertNotEqual(os.fstat(nt.fileno()).st_ino,
os.fstat(f.fileno()).st_ino)
def test_lock_file_held_on_unlink(self):
os_unlink = os.unlink
def flocking_unlink(filename):
# make sure the lock is held when we unlink
fd = os.open(filename, os.O_RDWR)
self.assertRaises(
IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
os.close(fd)
os_unlink(filename)
with NamedTemporaryFile(delete=False) as nt:
with mock.patch('os.unlink', flocking_unlink):
with utils.lock_file(nt.name, unlink=True):
pass
def test_lock_file_no_unlink_if_fail(self):
os_open = os.open
with NamedTemporaryFile(delete=True) as nt:
def lock_on_open(filename, flags):
# lock the file on another fd after it's opened.
fd = os_open(filename, flags)
fd2 = os_open(filename, flags)
fcntl.flock(fd2, fcntl.LOCK_EX | fcntl.LOCK_NB)
return fd
try:
timedout = False
with mock.patch('os.open', lock_on_open):
with utils.lock_file(nt.name, unlink=False, timeout=0.01):
pass
except LockTimeout:
timedout = True
self.assertTrue(timedout)
self.assertTrue(os.path.exists(nt.name))
def test_ismount_path_does_not_exist(self):
tmpdir = mkdtemp()
try:
self.assertFalse(utils.ismount(os.path.join(tmpdir, 'bar')))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_not_mount(self):
tmpdir = mkdtemp()
try:
self.assertFalse(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_error(self):
def _mock_os_lstat(path):
raise OSError(13, "foo")
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
# Raises exception with _raw -- see next test.
utils.ismount(tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_raw_path_error(self):
def _mock_os_lstat(path):
raise OSError(13, "foo")
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertRaises(OSError, utils.ismount_raw, tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_is_symlink(self):
tmpdir = mkdtemp()
try:
link = os.path.join(tmpdir, "tmp")
os.symlink("/tmp", link)
self.assertFalse(utils.ismount(link))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_is_root(self):
self.assertTrue(utils.ismount('/'))
def test_ismount_parent_path_error(self):
_os_lstat = os.lstat
def _mock_os_lstat(path):
if path.endswith(".."):
raise OSError(13, "foo")
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
# Raises exception with _raw -- see next test.
utils.ismount(tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_raw_parent_path_error(self):
_os_lstat = os.lstat
def _mock_os_lstat(path):
if path.endswith(".."):
raise OSError(13, "foo")
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertRaises(OSError, utils.ismount_raw, tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_successes_dev(self):
_os_lstat = os.lstat
class MockStat(object):
def __init__(self, mode, dev, ino):
self.st_mode = mode
self.st_dev = dev
self.st_ino = ino
def _mock_os_lstat(path):
if path.endswith(".."):
parent = _os_lstat(path)
return MockStat(parent.st_mode, parent.st_dev + 1,
parent.st_ino)
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertTrue(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_ismount_successes_ino(self):
_os_lstat = os.lstat
class MockStat(object):
def __init__(self, mode, dev, ino):
self.st_mode = mode
self.st_dev = dev
self.st_ino = ino
def _mock_os_lstat(path):
if path.endswith(".."):
return _os_lstat(path)
else:
parent_path = os.path.join(path, "..")
child = _os_lstat(path)
parent = _os_lstat(parent_path)
return MockStat(child.st_mode, parent.st_ino,
child.st_dev)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertTrue(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_parse_content_type(self):
self.assertEqual(utils.parse_content_type('text/plain'),
('text/plain', []))
self.assertEqual(utils.parse_content_type('text/plain;charset=utf-8'),
('text/plain', [('charset', 'utf-8')]))
self.assertEqual(
utils.parse_content_type('text/plain;hello="world";charset=utf-8'),
('text/plain', [('hello', '"world"'), ('charset', 'utf-8')]))
self.assertEqual(
utils.parse_content_type('text/plain; hello="world"; a=b'),
('text/plain', [('hello', '"world"'), ('a', 'b')]))
self.assertEqual(
utils.parse_content_type(r'text/plain; x="\""; a=b'),
('text/plain', [('x', r'"\""'), ('a', 'b')]))
self.assertEqual(
utils.parse_content_type(r'text/plain; x; a=b'),
('text/plain', [('x', ''), ('a', 'b')]))
self.assertEqual(
utils.parse_content_type(r'text/plain; x="\""; a'),
('text/plain', [('x', r'"\""'), ('a', '')]))
def test_override_bytes_from_content_type(self):
listing_dict = {
'bytes': 1234, 'hash': 'asdf', 'name': 'zxcv',
'content_type': 'text/plain; hello="world"; swift_bytes=15'}
utils.override_bytes_from_content_type(listing_dict,
logger=FakeLogger())
self.assertEqual(listing_dict['bytes'], 15)
self.assertEqual(listing_dict['content_type'],
'text/plain;hello="world"')
listing_dict = {
'bytes': 1234, 'hash': 'asdf', 'name': 'zxcv',
'content_type': 'text/plain; hello="world"; swift_bytes=hey'}
utils.override_bytes_from_content_type(listing_dict,
logger=FakeLogger())
self.assertEqual(listing_dict['bytes'], 1234)
self.assertEqual(listing_dict['content_type'],
'text/plain;hello="world"')
def test_extract_swift_bytes(self):
scenarios = {
# maps input value -> expected returned tuple
'': ('', None),
'text/plain': ('text/plain', None),
'text/plain; other=thing': ('text/plain;other=thing', None),
'text/plain; swift_bytes=123': ('text/plain', '123'),
'text/plain; other=thing;swift_bytes=123':
('text/plain;other=thing', '123'),
'text/plain; swift_bytes=123; other=thing':
('text/plain;other=thing', '123'),
'text/plain; swift_bytes=123; swift_bytes=456':
('text/plain', '456'),
'text/plain; swift_bytes=123; other=thing;swift_bytes=456':
('text/plain;other=thing', '456')}
for test_value, expected in scenarios.items():
self.assertEqual(expected, utils.extract_swift_bytes(test_value))
def test_clean_content_type(self):
subtests = {
'': '', 'text/plain': 'text/plain',
'text/plain; someother=thing': 'text/plain; someother=thing',
'text/plain; swift_bytes=123': 'text/plain',
'text/plain; someother=thing; swift_bytes=123':
'text/plain; someother=thing',
# Since Swift always tacks on the swift_bytes, clean_content_type()
# only strips swift_bytes if it's last. The next item simply shows
# that if for some other odd reason it's not last,
# clean_content_type() will not remove it from the header.
'text/plain; swift_bytes=123; someother=thing':
'text/plain; swift_bytes=123; someother=thing'}
for before, after in subtests.items():
self.assertEqual(utils.clean_content_type(before), after)
def test_quote(self):
res = utils.quote('/v1/a/c3/subdirx/')
assert res == '/v1/a/c3/subdirx/'
res = utils.quote('/v1/a&b/c3/subdirx/')
assert res == '/v1/a%26b/c3/subdirx/'
res = utils.quote('/v1/a&b/c3/subdirx/', safe='&')
assert res == '%2Fv1%2Fa&b%2Fc3%2Fsubdirx%2F'
unicode_sample = u'\uc77c\uc601'
account = 'abc_' + unicode_sample
valid_utf8_str = utils.get_valid_utf8_str(account)
account = 'abc_' + unicode_sample.encode('utf-8')[::-1]
invalid_utf8_str = utils.get_valid_utf8_str(account)
self.assertEqual('abc_%EC%9D%BC%EC%98%81',
utils.quote(valid_utf8_str))
self.assertEqual('abc_%EF%BF%BD%EF%BF%BD%EC%BC%9D%EF%BF%BD',
utils.quote(invalid_utf8_str))
def test_get_hmac(self):
self.assertEqual(
utils.get_hmac('GET', '/path', 1, 'abc'),
'b17f6ff8da0e251737aa9e3ee69a881e3e092e2f')
def test_get_policy_index(self):
# Account has no information about a policy
req = Request.blank(
'/sda1/p/a',
environ={'REQUEST_METHOD': 'GET'})
res = Response()
self.assertIsNone(utils.get_policy_index(req.headers,
res.headers))
# The policy of a container can be specified by the response header
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'GET'})
res = Response(headers={'X-Backend-Storage-Policy-Index': '1'})
self.assertEqual('1', utils.get_policy_index(req.headers,
res.headers))
# The policy of an object to be created can be specified by the request
# header
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Backend-Storage-Policy-Index': '2'})
res = Response()
self.assertEqual('2', utils.get_policy_index(req.headers,
res.headers))
def test_get_log_line(self):
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD', 'REMOTE_ADDR': '1.2.3.4'})
res = Response()
trans_time = 1.2
additional_info = 'some information'
server_pid = 1234
exp_line = '1.2.3.4 - - [01/Jan/1970:02:46:41 +0000] "HEAD ' \
'/sda1/p/a/c/o" 200 - "-" "-" "-" 1.2000 "some information" 1234 -'
with mock.patch(
'time.gmtime',
mock.MagicMock(side_effect=[time.gmtime(10001.0)])):
with mock.patch(
'os.getpid', mock.MagicMock(return_value=server_pid)):
self.assertEqual(
exp_line,
utils.get_log_line(req, res, trans_time, additional_info))
def test_cache_from_env(self):
# should never get logging when swift.cache is found
env = {'swift.cache': 42}
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env, False))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env, True))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
# check allow_none controls logging when swift.cache is not found
err_msg = 'ERROR: swift.cache could not be found in env!'
env = {}
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertIsNone(utils.cache_from_env(env))
self.assertTrue(err_msg in logger.get_lines_for_level('error'))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertIsNone(utils.cache_from_env(env, False))
self.assertTrue(err_msg in logger.get_lines_for_level('error'))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertIsNone(utils.cache_from_env(env, True))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
def test_fsync_dir(self):
tempdir = None
fd = None
try:
tempdir = mkdtemp(dir='/tmp')
fd, temppath = tempfile.mkstemp(dir=tempdir)
_mock_fsync = mock.Mock()
_mock_close = mock.Mock()
with patch('swift.common.utils.fsync', _mock_fsync):
with patch('os.close', _mock_close):
utils.fsync_dir(tempdir)
self.assertTrue(_mock_fsync.called)
self.assertTrue(_mock_close.called)
self.assertTrue(isinstance(_mock_fsync.call_args[0][0], int))
self.assertEqual(_mock_fsync.call_args[0][0],
_mock_close.call_args[0][0])
# Not a directory - arg is file path
self.assertRaises(OSError, utils.fsync_dir, temppath)
logger = FakeLogger()
def _mock_fsync(fd):
raise OSError(errno.EBADF, os.strerror(errno.EBADF))
with patch('swift.common.utils.fsync', _mock_fsync):
with mock.patch('swift.common.utils.logging', logger):
utils.fsync_dir(tempdir)
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
finally:
if fd is not None:
os.close(fd)
os.unlink(temppath)
if tempdir:
os.rmdir(tempdir)
def test_renamer_with_fsync_dir(self):
tempdir = None
try:
tempdir = mkdtemp(dir='/tmp')
# Simulate part of object path already existing
part_dir = os.path.join(tempdir, 'objects/1234/')
os.makedirs(part_dir)
obj_dir = os.path.join(part_dir, 'aaa', 'a' * 32)
obj_path = os.path.join(obj_dir, '1425276031.12345.data')
# Object dir had to be created
_m_os_rename = mock.Mock()
_m_fsync_dir = mock.Mock()
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
utils.renamer("fake_path", obj_path)
_m_os_rename.assert_called_once_with('fake_path', obj_path)
# fsync_dir on parents of all newly create dirs
self.assertEqual(_m_fsync_dir.call_count, 3)
# Object dir existed
_m_os_rename.reset_mock()
_m_fsync_dir.reset_mock()
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
utils.renamer("fake_path", obj_path)
_m_os_rename.assert_called_once_with('fake_path', obj_path)
# fsync_dir only on the leaf dir
self.assertEqual(_m_fsync_dir.call_count, 1)
finally:
if tempdir:
shutil.rmtree(tempdir)
def test_renamer_when_fsync_is_false(self):
_m_os_rename = mock.Mock()
_m_fsync_dir = mock.Mock()
_m_makedirs_count = mock.Mock(return_value=2)
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
with patch('swift.common.utils.makedirs_count',
_m_makedirs_count):
utils.renamer("fake_path", "/a/b/c.data", fsync=False)
_m_makedirs_count.assert_called_once_with("/a/b")
_m_os_rename.assert_called_once_with('fake_path', "/a/b/c.data")
self.assertFalse(_m_fsync_dir.called)
def test_makedirs_count(self):
tempdir = None
fd = None
try:
tempdir = mkdtemp(dir='/tmp')
os.makedirs(os.path.join(tempdir, 'a/b'))
# 4 new dirs created
dirpath = os.path.join(tempdir, 'a/b/1/2/3/4')
ret = utils.makedirs_count(dirpath)
self.assertEqual(ret, 4)
# no new dirs created - dir already exists
ret = utils.makedirs_count(dirpath)
self.assertEqual(ret, 0)
# path exists and is a file
fd, temppath = tempfile.mkstemp(dir=dirpath)
os.close(fd)
self.assertRaises(OSError, utils.makedirs_count, temppath)
finally:
if tempdir:
shutil.rmtree(tempdir)
def test_modify_priority(self):
pid = os.getpid()
logger = debug_logger()
called = {}
def _fake_setpriority(*args):
called['setpriority'] = args
def _fake_syscall(*args):
called['syscall'] = args
with patch('swift.common.utils._libc_setpriority',
_fake_setpriority), \
patch('swift.common.utils._posix_syscall', _fake_syscall):
called = {}
# not set / default
utils.modify_priority({}, logger)
self.assertEqual(called, {})
called = {}
# just nice
utils.modify_priority({'nice_priority': '1'}, logger)
self.assertEqual(called, {'setpriority': (0, pid, 1)})
called = {}
# just ionice class uses default priority 0
utils.modify_priority({'ionice_class': 'IOPRIO_CLASS_RT'}, logger)
self.assertEqual(called, {'syscall': (251, 1, pid, 1 << 13)})
called = {}
# just ionice priority is ignored
utils.modify_priority({'ionice_priority': '4'}, logger)
self.assertEqual(called, {})
called = {}
# bad ionice class
utils.modify_priority({'ionice_class': 'class_foo'}, logger)
self.assertEqual(called, {})
called = {}
# ionice class & priority
utils.modify_priority({
'ionice_class': 'IOPRIO_CLASS_BE',
'ionice_priority': '4',
}, logger)
self.assertEqual(called, {'syscall': (251, 1, pid, 2 << 13 | 4)})
called = {}
# all
utils.modify_priority({
'nice_priority': '-15',
'ionice_class': 'IOPRIO_CLASS_IDLE',
'ionice_priority': '6',
}, logger)
self.assertEqual(called, {
'setpriority': (0, pid, -15),
'syscall': (251, 1, pid, 3 << 13 | 6),
})
def test__NR_ioprio_set(self):
with patch('os.uname', return_value=('', '', '', '', 'x86_64')), \
patch('platform.architecture', return_value=('64bit', '')):
self.assertEqual(251, utils.NR_ioprio_set())
with patch('os.uname', return_value=('', '', '', '', 'x86_64')), \
patch('platform.architecture', return_value=('32bit', '')):
self.assertRaises(OSError, utils.NR_ioprio_set)
with patch('os.uname', return_value=('', '', '', '', 'alpha')), \
patch('platform.architecture', return_value=('64bit', '')):
self.assertRaises(OSError, utils.NR_ioprio_set)
@requires_o_tmpfile_support
def test_link_fd_to_path_linkat_success(self):
tempdir = mkdtemp(dir='/tmp')
fd = os.open(tempdir, utils.O_TMPFILE | os.O_WRONLY)
data = "I'm whatever Gotham needs me to be"
_m_fsync_dir = mock.Mock()
try:
os.write(fd, data)
# fd is O_WRONLY
self.assertRaises(OSError, os.read, fd, 1)
file_path = os.path.join(tempdir, uuid4().hex)
with mock.patch('swift.common.utils.fsync_dir', _m_fsync_dir):
utils.link_fd_to_path(fd, file_path, 1)
with open(file_path, 'r') as f:
self.assertEqual(f.read(), data)
self.assertEqual(_m_fsync_dir.call_count, 2)
finally:
os.close(fd)
shutil.rmtree(tempdir)
@requires_o_tmpfile_support
def test_link_fd_to_path_target_exists(self):
tempdir = mkdtemp(dir='/tmp')
# Create and write to a file
fd, path = tempfile.mkstemp(dir=tempdir)
os.write(fd, "hello world")
os.fsync(fd)
os.close(fd)
self.assertTrue(os.path.exists(path))
fd = os.open(tempdir, utils.O_TMPFILE | os.O_WRONLY)
try:
os.write(fd, "bye world")
os.fsync(fd)
utils.link_fd_to_path(fd, path, 0, fsync=False)
# Original file now should have been over-written
with open(path, 'r') as f:
self.assertEqual(f.read(), "bye world")
finally:
os.close(fd)
shutil.rmtree(tempdir)
@requires_o_tmpfile_support
def test_link_fd_to_path_errno_not_EEXIST_or_ENOENT(self):
_m_linkat = mock.Mock(
side_effect=IOError(errno.EACCES, os.strerror(errno.EACCES)))
with mock.patch('swift.common.utils.linkat', _m_linkat):
try:
utils.link_fd_to_path(0, '/path', 1)
except IOError as err:
self.assertEqual(err.errno, errno.EACCES)
else:
self.fail("Expecting IOError exception")
self.assertTrue(_m_linkat.called)
@requires_o_tmpfile_support
def test_linkat_race_dir_not_exists(self):
tempdir = mkdtemp(dir='/tmp')
target_dir = os.path.join(tempdir, uuid4().hex)
target_path = os.path.join(target_dir, uuid4().hex)
os.mkdir(target_dir)
fd = os.open(target_dir, utils.O_TMPFILE | os.O_WRONLY)
# Simulating directory deletion by other backend process
os.rmdir(target_dir)
self.assertFalse(os.path.exists(target_dir))
try:
utils.link_fd_to_path(fd, target_path, 1)
self.assertTrue(os.path.exists(target_dir))
self.assertTrue(os.path.exists(target_path))
finally:
os.close(fd)
shutil.rmtree(tempdir)
def test_safe_json_loads(self):
expectations = {
None: None,
'': None,
0: None,
1: None,
'"asdf"': 'asdf',
'[]': [],
'{}': {},
"{'foo': 'bar'}": None,
'{"foo": "bar"}': {'foo': 'bar'},
}
failures = []
for value, expected in expectations.items():
try:
result = utils.safe_json_loads(value)
except Exception as e:
# it's called safe, if it blows up the test blows up
self.fail('%r caused safe method to throw %r!' % (
value, e))
try:
self.assertEqual(expected, result)
except AssertionError:
failures.append('%r => %r (expected %r)' % (
value, result, expected))
if failures:
self.fail('Invalid results from pure function:\n%s' %
'\n'.join(failures))
class ResellerConfReader(unittest.TestCase):
def setUp(self):
self.default_rules = {'operator_roles': ['admin', 'swiftoperator'],
'service_roles': [],
'require_group': ''}
def test_defaults(self):
conf = {}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_'])
self.assertEqual(options['AUTH_'], self.default_rules)
def test_same_as_default(self):
conf = {'reseller_prefix': 'AUTH',
'operator_roles': 'admin, swiftoperator'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_'])
self.assertEqual(options['AUTH_'], self.default_rules)
def test_single_blank_reseller(self):
conf = {'reseller_prefix': ''}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
self.assertEqual(options[''], self.default_rules)
def test_single_blank_reseller_with_conf(self):
conf = {'reseller_prefix': '',
"''operator_roles": 'role1, role2'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
self.assertEqual(options[''].get('operator_roles'),
['role1', 'role2'])
self.assertEqual(options[''].get('service_roles'),
self.default_rules.get('service_roles'))
self.assertEqual(options[''].get('require_group'),
self.default_rules.get('require_group'))
def test_multiple_same_resellers(self):
conf = {'reseller_prefix': " '' , '' "}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
conf = {'reseller_prefix': '_, _'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['_'])
conf = {'reseller_prefix': 'AUTH, PRE2, AUTH, PRE2'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', 'PRE2_'])
def test_several_resellers_with_conf(self):
conf = {'reseller_prefix': 'PRE1, PRE2',
'PRE1_operator_roles': 'role1, role2',
'PRE1_service_roles': 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['PRE1_', 'PRE2_'])
self.assertEqual(set(['role1', 'role2']),
set(options['PRE1_'].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options['PRE1_'].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options['PRE1_'].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_several_resellers_first_blank(self):
conf = {'reseller_prefix': " '' , PRE2",
"''operator_roles": 'role1, role2',
"''service_roles": 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['', 'PRE2_'])
self.assertEqual(set(['role1', 'role2']),
set(options[''].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options[''].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options[''].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_several_resellers_with_blank_comma(self):
conf = {'reseller_prefix': "AUTH , '', PRE2",
"''operator_roles": 'role1, role2',
"''service_roles": 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', '', 'PRE2_'])
self.assertEqual(set(['admin', 'swiftoperator']),
set(options['AUTH_'].get('operator_roles')))
self.assertEqual(set(['role1', 'role2']),
set(options[''].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual([],
options['AUTH_'].get('service_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options[''].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options['AUTH_'].get('require_group'))
self.assertEqual('', options[''].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_stray_comma(self):
conf = {'reseller_prefix': "AUTH ,, PRE2",
"''operator_roles": 'role1, role2',
"''service_roles": 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', 'PRE2_'])
self.assertEqual(set(['admin', 'swiftoperator']),
set(options['AUTH_'].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual([],
options['AUTH_'].get('service_roles'))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options['AUTH_'].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_multiple_stray_commas_resellers(self):
conf = {'reseller_prefix': ' , , ,'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
self.assertEqual(options[''], self.default_rules)
def test_unprefixed_options(self):
conf = {'reseller_prefix': "AUTH , '', PRE2",
"operator_roles": 'role1, role2',
"service_roles": 'role3, role4',
'require_group': 'auth_blank_group',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', '', 'PRE2_'])
self.assertEqual(set(['role1', 'role2']),
set(options['AUTH_'].get('operator_roles')))
self.assertEqual(set(['role1', 'role2']),
set(options[''].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options['AUTH_'].get('service_roles')))
self.assertEqual(set(['role3', 'role4']),
set(options[''].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('auth_blank_group',
options['AUTH_'].get('require_group'))
self.assertEqual('auth_blank_group', options[''].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
class TestUnlinkOlder(unittest.TestCase):
def setUp(self):
self.tempdir = mkdtemp()
self.mtime = {}
def tearDown(self):
rmtree(self.tempdir, ignore_errors=True)
def touch(self, fpath, mtime=None):
self.mtime[fpath] = mtime or time.time()
open(fpath, 'w')
@contextlib.contextmanager
def high_resolution_getmtime(self):
orig_getmtime = os.path.getmtime
def mock_getmtime(fpath):
mtime = self.mtime.get(fpath)
if mtime is None:
mtime = orig_getmtime(fpath)
return mtime
with mock.patch('os.path.getmtime', mock_getmtime):
yield
def test_unlink_older_than_path_not_exists(self):
path = os.path.join(self.tempdir, 'does-not-exist')
# just make sure it doesn't blow up
utils.unlink_older_than(path, time.time())
def test_unlink_older_than_file(self):
path = os.path.join(self.tempdir, 'some-file')
self.touch(path)
with self.assertRaises(OSError) as ctx:
utils.unlink_older_than(path, time.time())
self.assertEqual(ctx.exception.errno, errno.ENOTDIR)
def test_unlink_older_than_now(self):
self.touch(os.path.join(self.tempdir, 'test'))
with self.high_resolution_getmtime():
utils.unlink_older_than(self.tempdir, time.time())
self.assertEqual([], os.listdir(self.tempdir))
def test_unlink_not_old_enough(self):
start = time.time()
self.touch(os.path.join(self.tempdir, 'test'))
with self.high_resolution_getmtime():
utils.unlink_older_than(self.tempdir, start)
self.assertEqual(['test'], os.listdir(self.tempdir))
def test_unlink_mixed(self):
self.touch(os.path.join(self.tempdir, 'first'))
cutoff = time.time()
self.touch(os.path.join(self.tempdir, 'second'))
with self.high_resolution_getmtime():
utils.unlink_older_than(self.tempdir, cutoff)
self.assertEqual(['second'], os.listdir(self.tempdir))
def test_unlink_paths(self):
paths = []
for item in ('first', 'second', 'third'):
path = os.path.join(self.tempdir, item)
self.touch(path)
paths.append(path)
# don't unlink everyone
with self.high_resolution_getmtime():
utils.unlink_paths_older_than(paths[:2], time.time())
self.assertEqual(['third'], os.listdir(self.tempdir))
def test_unlink_empty_paths(self):
# just make sure it doesn't blow up
utils.unlink_paths_older_than([], time.time())
def test_unlink_not_exists_paths(self):
path = os.path.join(self.tempdir, 'does-not-exist')
# just make sure it doesn't blow up
utils.unlink_paths_older_than([path], time.time())
class TestSwiftInfo(unittest.TestCase):
def tearDown(self):
utils._swift_info = {}
utils._swift_admin_info = {}
def test_register_swift_info(self):
utils.register_swift_info(foo='bar')
utils.register_swift_info(lorem='ipsum')
utils.register_swift_info('cap1', cap1_foo='cap1_bar')
utils.register_swift_info('cap1', cap1_lorem='cap1_ipsum')
self.assertTrue('swift' in utils._swift_info)
self.assertTrue('foo' in utils._swift_info['swift'])
self.assertEqual(utils._swift_info['swift']['foo'], 'bar')
self.assertTrue('lorem' in utils._swift_info['swift'])
self.assertEqual(utils._swift_info['swift']['lorem'], 'ipsum')
self.assertTrue('cap1' in utils._swift_info)
self.assertTrue('cap1_foo' in utils._swift_info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar')
self.assertTrue('cap1_lorem' in utils._swift_info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_lorem'], 'cap1_ipsum')
self.assertRaises(ValueError,
utils.register_swift_info, 'admin', foo='bar')
self.assertRaises(ValueError,
utils.register_swift_info, 'disallowed_sections',
disallowed_sections=None)
utils.register_swift_info('goodkey', foo='5.6')
self.assertRaises(ValueError,
utils.register_swift_info, 'bad.key', foo='5.6')
data = {'bad.key': '5.6'}
self.assertRaises(ValueError,
utils.register_swift_info, 'goodkey', **data)
def test_get_swift_info(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info()
self.assertTrue('admin' not in info)
self.assertTrue('swift' in info)
self.assertTrue('foo' in info['swift'])
self.assertEqual(utils._swift_info['swift']['foo'], 'bar')
self.assertTrue('cap1' in info)
self.assertTrue('cap1_foo' in info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar')
def test_get_swift_info_with_disallowed_sections(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'},
'cap2': {'cap2_foo': 'cap2_bar'},
'cap3': {'cap3_foo': 'cap3_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(disallowed_sections=['cap1', 'cap3'])
self.assertTrue('admin' not in info)
self.assertTrue('swift' in info)
self.assertTrue('foo' in info['swift'])
self.assertEqual(info['swift']['foo'], 'bar')
self.assertTrue('cap1' not in info)
self.assertTrue('cap2' in info)
self.assertTrue('cap2_foo' in info['cap2'])
self.assertEqual(info['cap2']['cap2_foo'], 'cap2_bar')
self.assertTrue('cap3' not in info)
def test_register_swift_admin_info(self):
utils.register_swift_info(admin=True, admin_foo='admin_bar')
utils.register_swift_info(admin=True, admin_lorem='admin_ipsum')
utils.register_swift_info('cap1', admin=True, ac1_foo='ac1_bar')
utils.register_swift_info('cap1', admin=True, ac1_lorem='ac1_ipsum')
self.assertTrue('swift' in utils._swift_admin_info)
self.assertTrue('admin_foo' in utils._swift_admin_info['swift'])
self.assertEqual(
utils._swift_admin_info['swift']['admin_foo'], 'admin_bar')
self.assertTrue('admin_lorem' in utils._swift_admin_info['swift'])
self.assertEqual(
utils._swift_admin_info['swift']['admin_lorem'], 'admin_ipsum')
self.assertTrue('cap1' in utils._swift_admin_info)
self.assertTrue('ac1_foo' in utils._swift_admin_info['cap1'])
self.assertEqual(
utils._swift_admin_info['cap1']['ac1_foo'], 'ac1_bar')
self.assertTrue('ac1_lorem' in utils._swift_admin_info['cap1'])
self.assertEqual(
utils._swift_admin_info['cap1']['ac1_lorem'], 'ac1_ipsum')
self.assertTrue('swift' not in utils._swift_info)
self.assertTrue('cap1' not in utils._swift_info)
def test_get_swift_admin_info(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(admin=True)
self.assertTrue('admin' in info)
self.assertTrue('admin_cap1' in info['admin'])
self.assertTrue('ac1_foo' in info['admin']['admin_cap1'])
self.assertEqual(info['admin']['admin_cap1']['ac1_foo'], 'ac1_bar')
self.assertTrue('swift' in info)
self.assertTrue('foo' in info['swift'])
self.assertEqual(utils._swift_info['swift']['foo'], 'bar')
self.assertTrue('cap1' in info)
self.assertTrue('cap1_foo' in info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar')
def test_get_swift_admin_info_with_disallowed_sections(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'},
'cap2': {'cap2_foo': 'cap2_bar'},
'cap3': {'cap3_foo': 'cap3_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(
admin=True, disallowed_sections=['cap1', 'cap3'])
self.assertTrue('admin' in info)
self.assertTrue('admin_cap1' in info['admin'])
self.assertTrue('ac1_foo' in info['admin']['admin_cap1'])
self.assertEqual(info['admin']['admin_cap1']['ac1_foo'], 'ac1_bar')
self.assertTrue('disallowed_sections' in info['admin'])
self.assertTrue('cap1' in info['admin']['disallowed_sections'])
self.assertTrue('cap2' not in info['admin']['disallowed_sections'])
self.assertTrue('cap3' in info['admin']['disallowed_sections'])
self.assertTrue('swift' in info)
self.assertTrue('foo' in info['swift'])
self.assertEqual(info['swift']['foo'], 'bar')
self.assertTrue('cap1' not in info)
self.assertTrue('cap2' in info)
self.assertTrue('cap2_foo' in info['cap2'])
self.assertEqual(info['cap2']['cap2_foo'], 'cap2_bar')
self.assertTrue('cap3' not in info)
def test_get_swift_admin_info_with_disallowed_sub_sections(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar',
'cap1_moo': 'cap1_baa'},
'cap2': {'cap2_foo': 'cap2_bar'},
'cap3': {'cap2_foo': 'cap2_bar'},
'cap4': {'a': {'b': {'c': 'c'},
'b.c': 'b.c'}}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(
admin=True, disallowed_sections=['cap1.cap1_foo', 'cap3',
'cap4.a.b.c'])
self.assertTrue('cap3' not in info)
self.assertEqual(info['cap1']['cap1_moo'], 'cap1_baa')
self.assertTrue('cap1_foo' not in info['cap1'])
self.assertTrue('c' not in info['cap4']['a']['b'])
self.assertEqual(info['cap4']['a']['b.c'], 'b.c')
def test_get_swift_info_with_unmatched_disallowed_sections(self):
cap1 = {'cap1_foo': 'cap1_bar',
'cap1_moo': 'cap1_baa'}
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': cap1}
# expect no exceptions
info = utils.get_swift_info(
disallowed_sections=['cap2.cap1_foo', 'cap1.no_match',
'cap1.cap1_foo.no_match.no_match'])
self.assertEqual(info['cap1'], cap1)
class TestFileLikeIter(unittest.TestCase):
def test_iter_file_iter(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
chunks = []
for chunk in utils.FileLikeIter(in_iter):
chunks.append(chunk)
self.assertEqual(chunks, in_iter)
def test_next(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
chunks = []
iter_file = utils.FileLikeIter(in_iter)
while True:
try:
chunk = next(iter_file)
except StopIteration:
break
chunks.append(chunk)
self.assertEqual(chunks, in_iter)
def test_read(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
iter_file = utils.FileLikeIter(in_iter)
self.assertEqual(iter_file.read(), b''.join(in_iter))
def test_read_with_size(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
chunks = []
iter_file = utils.FileLikeIter(in_iter)
while True:
chunk = iter_file.read(2)
if not chunk:
break
self.assertTrue(len(chunk) <= 2)
chunks.append(chunk)
self.assertEqual(b''.join(chunks), b''.join(in_iter))
def test_read_with_size_zero(self):
# makes little sense, but file supports it, so...
self.assertEqual(utils.FileLikeIter(b'abc').read(0), b'')
def test_readline(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
lines = []
iter_file = utils.FileLikeIter(in_iter)
while True:
line = iter_file.readline()
if not line:
break
lines.append(line)
self.assertEqual(
lines,
[v if v == b'trailing.' else v + b'\n'
for v in b''.join(in_iter).split(b'\n')])
def test_readline2(self):
self.assertEqual(
utils.FileLikeIter([b'abc', b'def\n']).readline(4),
b'abcd')
def test_readline3(self):
self.assertEqual(
utils.FileLikeIter([b'a' * 1111, b'bc\ndef']).readline(),
(b'a' * 1111) + b'bc\n')
def test_readline_with_size(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
lines = []
iter_file = utils.FileLikeIter(in_iter)
while True:
line = iter_file.readline(2)
if not line:
break
lines.append(line)
self.assertEqual(
lines,
[b'ab', b'c\n', b'd\n', b'ef', b'g\n', b'h\n', b'ij', b'\n', b'\n',
b'k\n', b'tr', b'ai', b'li', b'ng', b'.'])
def test_readlines(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
lines = utils.FileLikeIter(in_iter).readlines()
self.assertEqual(
lines,
[v if v == b'trailing.' else v + b'\n'
for v in b''.join(in_iter).split(b'\n')])
def test_readlines_with_size(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
iter_file = utils.FileLikeIter(in_iter)
lists_of_lines = []
while True:
lines = iter_file.readlines(2)
if not lines:
break
lists_of_lines.append(lines)
self.assertEqual(
lists_of_lines,
[[b'ab'], [b'c\n'], [b'd\n'], [b'ef'], [b'g\n'], [b'h\n'], [b'ij'],
[b'\n', b'\n'], [b'k\n'], [b'tr'], [b'ai'], [b'li'], [b'ng'],
[b'.']])
def test_close(self):
iter_file = utils.FileLikeIter([b'a', b'b', b'c'])
self.assertEqual(next(iter_file), b'a')
iter_file.close()
self.assertTrue(iter_file.closed)
self.assertRaises(ValueError, iter_file.next)
self.assertRaises(ValueError, iter_file.read)
self.assertRaises(ValueError, iter_file.readline)
self.assertRaises(ValueError, iter_file.readlines)
# Just make sure repeated close calls don't raise an Exception
iter_file.close()
self.assertTrue(iter_file.closed)
class TestStatsdLogging(unittest.TestCase):
def setUp(self):
def fake_getaddrinfo(host, port, *args):
# this is what a real getaddrinfo('localhost', port,
# socket.AF_INET) returned once
return [(socket.AF_INET, # address family
socket.SOCK_STREAM, # socket type
socket.IPPROTO_TCP, # socket protocol
'', # canonical name,
('127.0.0.1', port)), # socket address
(socket.AF_INET,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP,
'',
('127.0.0.1', port))]
self.real_getaddrinfo = utils.socket.getaddrinfo
self.getaddrinfo_patcher = mock.patch.object(
utils.socket, 'getaddrinfo', fake_getaddrinfo)
self.mock_getaddrinfo = self.getaddrinfo_patcher.start()
self.addCleanup(self.getaddrinfo_patcher.stop)
def test_get_logger_statsd_client_not_specified(self):
logger = utils.get_logger({}, 'some-name', log_route='some-route')
# white-box construction validation
self.assertIsNone(logger.logger.statsd_client)
def test_get_logger_statsd_client_defaults(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'},
'some-name', log_route='some-route')
# white-box construction validation
self.assertTrue(isinstance(logger.logger.statsd_client,
utils.StatsdClient))
self.assertEqual(logger.logger.statsd_client._host, 'some.host.com')
self.assertEqual(logger.logger.statsd_client._port, 8125)
self.assertEqual(logger.logger.statsd_client._prefix, 'some-name.')
self.assertEqual(logger.logger.statsd_client._default_sample_rate, 1)
logger.set_statsd_prefix('some-name.more-specific')
self.assertEqual(logger.logger.statsd_client._prefix,
'some-name.more-specific.')
logger.set_statsd_prefix('')
self.assertEqual(logger.logger.statsd_client._prefix, '')
def test_get_logger_statsd_client_non_defaults(self):
logger = utils.get_logger({
'log_statsd_host': 'another.host.com',
'log_statsd_port': '9876',
'log_statsd_default_sample_rate': '0.75',
'log_statsd_sample_rate_factor': '0.81',
'log_statsd_metric_prefix': 'tomato.sauce',
}, 'some-name', log_route='some-route')
self.assertEqual(logger.logger.statsd_client._prefix,
'tomato.sauce.some-name.')
logger.set_statsd_prefix('some-name.more-specific')
self.assertEqual(logger.logger.statsd_client._prefix,
'tomato.sauce.some-name.more-specific.')
logger.set_statsd_prefix('')
self.assertEqual(logger.logger.statsd_client._prefix, 'tomato.sauce.')
self.assertEqual(logger.logger.statsd_client._host, 'another.host.com')
self.assertEqual(logger.logger.statsd_client._port, 9876)
self.assertEqual(logger.logger.statsd_client._default_sample_rate,
0.75)
self.assertEqual(logger.logger.statsd_client._sample_rate_factor,
0.81)
def test_ipv4_or_ipv6_hostname_defaults_to_ipv4(self):
def stub_getaddrinfo_both_ipv4_and_ipv6(host, port, family, *rest):
if family == socket.AF_INET:
return [(socket.AF_INET, 'blah', 'blah', 'blah',
('127.0.0.1', int(port)))]
elif family == socket.AF_INET6:
# Implemented so an incorrectly ordered implementation (IPv6
# then IPv4) would realistically fail.
return [(socket.AF_INET6, 'blah', 'blah', 'blah',
('::1', int(port), 0, 0))]
with mock.patch.object(utils.socket, 'getaddrinfo',
new=stub_getaddrinfo_both_ipv4_and_ipv6):
logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET)
self.assertEqual(statsd_client._target, ('localhost', 9876))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET)
def test_ipv4_instantiation_and_socket_creation(self):
logger = utils.get_logger({
'log_statsd_host': '127.0.0.1',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET)
self.assertEqual(statsd_client._target, ('127.0.0.1', 9876))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET)
def test_ipv6_instantiation_and_socket_creation(self):
# We have to check the given hostname or IP for IPv4/IPv6 on logger
# instantiation so we don't call getaddrinfo() too often and don't have
# to call bind() on our socket to detect IPv4/IPv6 on every send.
#
# This test uses the real getaddrinfo, so we patch over the mock to
# put the real one back. If we just stop the mock, then
# unittest.exit() blows up, but stacking real-fake-real works okay.
with mock.patch.object(utils.socket, 'getaddrinfo',
self.real_getaddrinfo):
logger = utils.get_logger({
'log_statsd_host': '::1',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET6)
self.assertEqual(statsd_client._target, ('::1', 9876, 0, 0))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET6)
def test_bad_hostname_instantiation(self):
with mock.patch.object(utils.socket, 'getaddrinfo',
side_effect=utils.socket.gaierror("whoops")):
logger = utils.get_logger({
'log_statsd_host': 'i-am-not-a-hostname-or-ip',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET)
self.assertEqual(statsd_client._target,
('i-am-not-a-hostname-or-ip', 9876))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET)
# Maybe the DNS server gets fixed in a bit and it starts working... or
# maybe the DNS record hadn't propagated yet. In any case, failed
# statsd sends will warn in the logs until the DNS failure or invalid
# IP address in the configuration is fixed.
def test_sending_ipv6(self):
def fake_getaddrinfo(host, port, *args):
# this is what a real getaddrinfo('::1', port,
# socket.AF_INET6) returned once
return [(socket.AF_INET6,
socket.SOCK_STREAM,
socket.IPPROTO_TCP,
'', ('::1', port, 0, 0)),
(socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP,
'',
('::1', port, 0, 0))]
with mock.patch.object(utils.socket, 'getaddrinfo', fake_getaddrinfo):
logger = utils.get_logger({
'log_statsd_host': '::1',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
fl = FakeLogger()
statsd_client.logger = fl
mock_socket = MockUdpSocket()
statsd_client._open_socket = lambda *_: mock_socket
logger.increment('tunafish')
self.assertEqual(fl.get_lines_for_level('warning'), [])
self.assertEqual(mock_socket.sent,
[(b'some-name.tunafish:1|c', ('::1', 9876, 0, 0))])
def test_no_exception_when_cant_send_udp_packet(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'})
statsd_client = logger.logger.statsd_client
fl = FakeLogger()
statsd_client.logger = fl
mock_socket = MockUdpSocket(sendto_errno=errno.EPERM)
statsd_client._open_socket = lambda *_: mock_socket
logger.increment('tunafish')
expected = ["Error sending UDP message to ('some.host.com', 8125): "
"[Errno 1] test errno 1"]
self.assertEqual(fl.get_lines_for_level('warning'), expected)
def test_sample_rates(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'})
mock_socket = MockUdpSocket()
# encapsulation? what's that?
statsd_client = logger.logger.statsd_client
self.assertTrue(statsd_client.random is random.random)
statsd_client._open_socket = lambda *_: mock_socket
statsd_client.random = lambda: 0.50001
logger.increment('tribbles', sample_rate=0.5)
self.assertEqual(len(mock_socket.sent), 0)
statsd_client.random = lambda: 0.49999
logger.increment('tribbles', sample_rate=0.5)
self.assertEqual(len(mock_socket.sent), 1)
payload = mock_socket.sent[0][0]
self.assertTrue(payload.endswith(b"|@0.5"))
def test_sample_rates_with_sample_rate_factor(self):
logger = utils.get_logger({
'log_statsd_host': 'some.host.com',
'log_statsd_default_sample_rate': '0.82',
'log_statsd_sample_rate_factor': '0.91',
})
effective_sample_rate = 0.82 * 0.91
mock_socket = MockUdpSocket()
# encapsulation? what's that?
statsd_client = logger.logger.statsd_client
self.assertTrue(statsd_client.random is random.random)
statsd_client._open_socket = lambda *_: mock_socket
statsd_client.random = lambda: effective_sample_rate + 0.001
logger.increment('tribbles')
self.assertEqual(len(mock_socket.sent), 0)
statsd_client.random = lambda: effective_sample_rate - 0.001
logger.increment('tribbles')
self.assertEqual(len(mock_socket.sent), 1)
payload = mock_socket.sent[0][0]
suffix = "|@%s" % effective_sample_rate
if six.PY3:
suffix = suffix.encode('utf-8')
self.assertTrue(payload.endswith(suffix), payload)
effective_sample_rate = 0.587 * 0.91
statsd_client.random = lambda: effective_sample_rate - 0.001
logger.increment('tribbles', sample_rate=0.587)
self.assertEqual(len(mock_socket.sent), 2)
payload = mock_socket.sent[1][0]
suffix = "|@%s" % effective_sample_rate
if six.PY3:
suffix = suffix.encode('utf-8')
self.assertTrue(payload.endswith(suffix), payload)
def test_timing_stats(self):
class MockController(object):
def __init__(self, status):
self.status = status
self.logger = self
self.args = ()
self.called = 'UNKNOWN'
def timing_since(self, *args):
self.called = 'timing'
self.args = args
@utils.timing_stats()
def METHOD(controller):
return Response(status=controller.status)
mock_controller = MockController(200)
METHOD(mock_controller)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(404)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(412)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(416)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(401)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.errors.timing')
self.assertTrue(mock_controller.args[1] > 0)
class UnsafeXrange(object):
"""
Like xrange(limit), but with extra context switching to screw things up.
"""
def __init__(self, upper_bound):
self.current = 0
self.concurrent_calls = 0
self.upper_bound = upper_bound
self.concurrent_call = False
def __iter__(self):
return self
def next(self):
if self.concurrent_calls > 0:
self.concurrent_call = True
self.concurrent_calls += 1
try:
if self.current >= self.upper_bound:
raise StopIteration
else:
val = self.current
self.current += 1
eventlet.sleep() # yield control
return val
finally:
self.concurrent_calls -= 1
__next__ = next
class TestAffinityKeyFunction(unittest.TestCase):
def setUp(self):
self.nodes = [dict(id=0, region=1, zone=1),
dict(id=1, region=1, zone=2),
dict(id=2, region=2, zone=1),
dict(id=3, region=2, zone=2),
dict(id=4, region=3, zone=1),
dict(id=5, region=3, zone=2),
dict(id=6, region=4, zone=0),
dict(id=7, region=4, zone=1)]
def test_single_region(self):
keyfn = utils.affinity_key_function("r3=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([4, 5, 0, 1, 2, 3, 6, 7], ids)
def test_bogus_value(self):
self.assertRaises(ValueError,
utils.affinity_key_function, "r3")
self.assertRaises(ValueError,
utils.affinity_key_function, "r3=elephant")
def test_empty_value(self):
# Empty's okay, it just means no preference
keyfn = utils.affinity_key_function("")
self.assertTrue(callable(keyfn))
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids)
def test_all_whitespace_value(self):
# Empty's okay, it just means no preference
keyfn = utils.affinity_key_function(" \n")
self.assertTrue(callable(keyfn))
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids)
def test_with_zone_zero(self):
keyfn = utils.affinity_key_function("r4z0=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([6, 0, 1, 2, 3, 4, 5, 7], ids)
def test_multiple(self):
keyfn = utils.affinity_key_function("r1=100, r4=200, r3z1=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([4, 0, 1, 6, 7, 2, 3, 5], ids)
def test_more_specific_after_less_specific(self):
keyfn = utils.affinity_key_function("r2=100, r2z2=50")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([3, 2, 0, 1, 4, 5, 6, 7], ids)
class TestAffinityLocalityPredicate(unittest.TestCase):
def setUp(self):
self.nodes = [dict(id=0, region=1, zone=1),
dict(id=1, region=1, zone=2),
dict(id=2, region=2, zone=1),
dict(id=3, region=2, zone=2),
dict(id=4, region=3, zone=1),
dict(id=5, region=3, zone=2),
dict(id=6, region=4, zone=0),
dict(id=7, region=4, zone=1)]
def test_empty(self):
pred = utils.affinity_locality_predicate('')
self.assertTrue(pred is None)
def test_region(self):
pred = utils.affinity_locality_predicate('r1')
self.assertTrue(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0, 1], ids)
def test_zone(self):
pred = utils.affinity_locality_predicate('r1z1')
self.assertTrue(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0], ids)
def test_multiple(self):
pred = utils.affinity_locality_predicate('r1, r3, r4z0')
self.assertTrue(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0, 1, 4, 5, 6], ids)
def test_invalid(self):
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'falafel')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r8zQ')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r2d2')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r1z1=1')
class TestRateLimitedIterator(unittest.TestCase):
def run_under_pseudo_time(
self, func, *args, **kwargs):
curr_time = [42.0]
def my_time():
curr_time[0] += 0.001
return curr_time[0]
def my_sleep(duration):
curr_time[0] += 0.001
curr_time[0] += duration
with patch('time.time', my_time), \
patch('eventlet.sleep', my_sleep):
return func(*args, **kwargs)
def test_rate_limiting(self):
def testfunc():
limited_iterator = utils.RateLimitedIterator(range(9999), 100)
got = []
started_at = time.time()
try:
while time.time() - started_at < 0.1:
got.append(next(limited_iterator))
except StopIteration:
pass
return got
got = self.run_under_pseudo_time(testfunc)
# it's 11, not 10, because ratelimiting doesn't apply to the very
# first element.
self.assertEqual(len(got), 11)
def test_rate_limiting_sometimes(self):
def testfunc():
limited_iterator = utils.RateLimitedIterator(
range(9999), 100,
ratelimit_if=lambda item: item % 23 != 0)
got = []
started_at = time.time()
try:
while time.time() - started_at < 0.5:
got.append(next(limited_iterator))
except StopIteration:
pass
return got
got = self.run_under_pseudo_time(testfunc)
# we'd get 51 without the ratelimit_if, but because 0, 23 and 46
# weren't subject to ratelimiting, we get 54 instead
self.assertEqual(len(got), 54)
def test_limit_after(self):
def testfunc():
limited_iterator = utils.RateLimitedIterator(
range(9999), 100, limit_after=5)
got = []
started_at = time.time()
try:
while time.time() - started_at < 0.1:
got.append(next(limited_iterator))
except StopIteration:
pass
return got
got = self.run_under_pseudo_time(testfunc)
# it's 16, not 15, because ratelimiting doesn't apply to the very
# first element.
self.assertEqual(len(got), 16)
class TestGreenthreadSafeIterator(unittest.TestCase):
def increment(self, iterable):
plus_ones = []
for n in iterable:
plus_ones.append(n + 1)
return plus_ones
def test_setup_works(self):
# it should work without concurrent access
self.assertEqual([0, 1, 2, 3], list(UnsafeXrange(4)))
iterable = UnsafeXrange(10)
pile = eventlet.GreenPile(2)
for _ in range(2):
pile.spawn(self.increment, iterable)
sorted([resp for resp in pile])
self.assertTrue(
iterable.concurrent_call, 'test setup is insufficiently crazy')
def test_access_is_serialized(self):
pile = eventlet.GreenPile(2)
unsafe_iterable = UnsafeXrange(10)
iterable = utils.GreenthreadSafeIterator(unsafe_iterable)
for _ in range(2):
pile.spawn(self.increment, iterable)
response = sorted(sum([resp for resp in pile], []))
self.assertEqual(list(range(1, 11)), response)
self.assertTrue(
not unsafe_iterable.concurrent_call, 'concurrent call occurred')
class TestStatsdLoggingDelegation(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind(('localhost', 0))
self.port = self.sock.getsockname()[1]
self.queue = Queue()
self.reader_thread = threading.Thread(target=self.statsd_reader)
self.reader_thread.setDaemon(1)
self.reader_thread.start()
def tearDown(self):
# The "no-op when disabled" test doesn't set up a real logger, so
# create one here so we can tell the reader thread to stop.
if not getattr(self, 'logger', None):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
}, 'some-name')
self.logger.increment('STOP')
self.reader_thread.join(timeout=4)
self.sock.close()
del self.logger
def statsd_reader(self):
while True:
try:
payload = self.sock.recv(4096)
if payload and b'STOP' in payload:
return 42
self.queue.put(payload)
except Exception as e:
sys.stderr.write('statsd_reader thread: %r' % (e,))
break
def _send_and_get(self, sender_fn, *args, **kwargs):
"""
Because the client library may not actually send a packet with
sample_rate < 1, we keep trying until we get one through.
"""
got = None
while not got:
sender_fn(*args, **kwargs)
try:
got = self.queue.get(timeout=0.5)
except Empty:
pass
return got
def assertStat(self, expected, sender_fn, *args, **kwargs):
got = self._send_and_get(sender_fn, *args, **kwargs)
if six.PY3:
got = got.decode('utf-8')
return self.assertEqual(expected, got)
def assertStatMatches(self, expected_regexp, sender_fn, *args, **kwargs):
got = self._send_and_get(sender_fn, *args, **kwargs)
if six.PY3:
got = got.decode('utf-8')
return self.assertTrue(re.search(expected_regexp, got),
[got, expected_regexp])
def test_methods_are_no_ops_when_not_enabled(self):
logger = utils.get_logger({
# No "log_statsd_host" means "disabled"
'log_statsd_port': str(self.port),
}, 'some-name')
# Delegate methods are no-ops
self.assertIsNone(logger.update_stats('foo', 88))
self.assertIsNone(logger.update_stats('foo', 88, 0.57))
self.assertIsNone(logger.update_stats('foo', 88,
sample_rate=0.61))
self.assertIsNone(logger.increment('foo'))
self.assertIsNone(logger.increment('foo', 0.57))
self.assertIsNone(logger.increment('foo', sample_rate=0.61))
self.assertIsNone(logger.decrement('foo'))
self.assertIsNone(logger.decrement('foo', 0.57))
self.assertIsNone(logger.decrement('foo', sample_rate=0.61))
self.assertIsNone(logger.timing('foo', 88.048))
self.assertIsNone(logger.timing('foo', 88.57, 0.34))
self.assertIsNone(logger.timing('foo', 88.998, sample_rate=0.82))
self.assertIsNone(logger.timing_since('foo', 8938))
self.assertIsNone(logger.timing_since('foo', 8948, 0.57))
self.assertIsNone(logger.timing_since('foo', 849398,
sample_rate=0.61))
# Now, the queue should be empty (no UDP packets sent)
self.assertRaises(Empty, self.queue.get_nowait)
def test_delegate_methods_with_no_default_sample_rate(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
}, 'some-name')
self.assertStat('some-name.some.counter:1|c', self.logger.increment,
'some.counter')
self.assertStat('some-name.some.counter:-1|c', self.logger.decrement,
'some.counter')
self.assertStat('some-name.some.operation:4900.0|ms',
self.logger.timing, 'some.operation', 4.9 * 1000)
self.assertStatMatches('some-name\.another\.operation:\d+\.\d+\|ms',
self.logger.timing_since, 'another.operation',
time.time())
self.assertStat('some-name.another.counter:42|c',
self.logger.update_stats, 'another.counter', 42)
# Each call can override the sample_rate (also, bonus prefix test)
self.logger.set_statsd_prefix('pfx')
self.assertStat('pfx.some.counter:1|c|@0.972', self.logger.increment,
'some.counter', sample_rate=0.972)
self.assertStat('pfx.some.counter:-1|c|@0.972', self.logger.decrement,
'some.counter', sample_rate=0.972)
self.assertStat('pfx.some.operation:4900.0|ms|@0.972',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.972)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.972',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.972)
self.assertStat('pfx.another.counter:3|c|@0.972',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.972)
# Can override sample_rate with non-keyword arg
self.logger.set_statsd_prefix('')
self.assertStat('some.counter:1|c|@0.939', self.logger.increment,
'some.counter', 0.939)
self.assertStat('some.counter:-1|c|@0.939', self.logger.decrement,
'some.counter', 0.939)
self.assertStat('some.operation:4900.0|ms|@0.939',
self.logger.timing, 'some.operation',
4.9 * 1000, 0.939)
self.assertStatMatches('another\.op:\d+\.\d+\|ms|@0.939',
self.logger.timing_since, 'another.op',
time.time(), 0.939)
self.assertStat('another.counter:3|c|@0.939',
self.logger.update_stats, 'another.counter', 3, 0.939)
def test_delegate_methods_with_default_sample_rate(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
'log_statsd_default_sample_rate': '0.93',
}, 'pfx')
self.assertStat('pfx.some.counter:1|c|@0.93', self.logger.increment,
'some.counter')
self.assertStat('pfx.some.counter:-1|c|@0.93', self.logger.decrement,
'some.counter')
self.assertStat('pfx.some.operation:4760.0|ms|@0.93',
self.logger.timing, 'some.operation', 4.76 * 1000)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.93',
self.logger.timing_since, 'another.op',
time.time())
self.assertStat('pfx.another.counter:3|c|@0.93',
self.logger.update_stats, 'another.counter', 3)
# Each call can override the sample_rate
self.assertStat('pfx.some.counter:1|c|@0.9912', self.logger.increment,
'some.counter', sample_rate=0.9912)
self.assertStat('pfx.some.counter:-1|c|@0.9912', self.logger.decrement,
'some.counter', sample_rate=0.9912)
self.assertStat('pfx.some.operation:4900.0|ms|@0.9912',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.9912)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.9912',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.9912)
self.assertStat('pfx.another.counter:3|c|@0.9912',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.9912)
# Can override sample_rate with non-keyword arg
self.logger.set_statsd_prefix('')
self.assertStat('some.counter:1|c|@0.987654', self.logger.increment,
'some.counter', 0.987654)
self.assertStat('some.counter:-1|c|@0.987654', self.logger.decrement,
'some.counter', 0.987654)
self.assertStat('some.operation:4900.0|ms|@0.987654',
self.logger.timing, 'some.operation',
4.9 * 1000, 0.987654)
self.assertStatMatches('another\.op:\d+\.\d+\|ms|@0.987654',
self.logger.timing_since, 'another.op',
time.time(), 0.987654)
self.assertStat('another.counter:3|c|@0.987654',
self.logger.update_stats, 'another.counter',
3, 0.987654)
def test_delegate_methods_with_metric_prefix(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
'log_statsd_metric_prefix': 'alpha.beta',
}, 'pfx')
self.assertStat('alpha.beta.pfx.some.counter:1|c',
self.logger.increment, 'some.counter')
self.assertStat('alpha.beta.pfx.some.counter:-1|c',
self.logger.decrement, 'some.counter')
self.assertStat('alpha.beta.pfx.some.operation:4760.0|ms',
self.logger.timing, 'some.operation', 4.76 * 1000)
self.assertStatMatches(
'alpha\.beta\.pfx\.another\.op:\d+\.\d+\|ms',
self.logger.timing_since, 'another.op', time.time())
self.assertStat('alpha.beta.pfx.another.counter:3|c',
self.logger.update_stats, 'another.counter', 3)
self.logger.set_statsd_prefix('')
self.assertStat('alpha.beta.some.counter:1|c|@0.9912',
self.logger.increment, 'some.counter',
sample_rate=0.9912)
self.assertStat('alpha.beta.some.counter:-1|c|@0.9912',
self.logger.decrement, 'some.counter', 0.9912)
self.assertStat('alpha.beta.some.operation:4900.0|ms|@0.9912',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.9912)
self.assertStatMatches('alpha\.beta\.another\.op:\d+\.\d+\|ms|@0.9912',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.9912)
self.assertStat('alpha.beta.another.counter:3|c|@0.9912',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.9912)
def test_get_valid_utf8_str(self):
unicode_sample = u'\uc77c\uc601'
valid_utf8_str = unicode_sample.encode('utf-8')
invalid_utf8_str = unicode_sample.encode('utf-8')[::-1]
self.assertEqual(valid_utf8_str,
utils.get_valid_utf8_str(valid_utf8_str))
self.assertEqual(valid_utf8_str,
utils.get_valid_utf8_str(unicode_sample))
self.assertEqual(b'\xef\xbf\xbd\xef\xbf\xbd\xec\xbc\x9d\xef\xbf\xbd',
utils.get_valid_utf8_str(invalid_utf8_str))
@reset_logger_state
def test_thread_locals(self):
logger = utils.get_logger(None)
# test the setter
logger.thread_locals = ('id', 'ip')
self.assertEqual(logger.thread_locals, ('id', 'ip'))
# reset
logger.thread_locals = (None, None)
self.assertEqual(logger.thread_locals, (None, None))
logger.txn_id = '1234'
logger.client_ip = '1.2.3.4'
self.assertEqual(logger.thread_locals, ('1234', '1.2.3.4'))
logger.txn_id = '5678'
logger.client_ip = '5.6.7.8'
self.assertEqual(logger.thread_locals, ('5678', '5.6.7.8'))
def test_no_fdatasync(self):
called = []
class NoFdatasync(object):
pass
def fsync(fd):
called.append(fd)
with patch('swift.common.utils.os', NoFdatasync()):
with patch('swift.common.utils.fsync', fsync):
utils.fdatasync(12345)
self.assertEqual(called, [12345])
def test_yes_fdatasync(self):
called = []
class YesFdatasync(object):
def fdatasync(self, fd):
called.append(fd)
with patch('swift.common.utils.os', YesFdatasync()):
utils.fdatasync(12345)
self.assertEqual(called, [12345])
def test_fsync_bad_fullsync(self):
class FCNTL(object):
F_FULLSYNC = 123
def fcntl(self, fd, op):
raise IOError(18)
with patch('swift.common.utils.fcntl', FCNTL()):
self.assertRaises(OSError, lambda: utils.fsync(12345))
def test_fsync_f_fullsync(self):
called = []
class FCNTL(object):
F_FULLSYNC = 123
def fcntl(self, fd, op):
called[:] = [fd, op]
return 0
with patch('swift.common.utils.fcntl', FCNTL()):
utils.fsync(12345)
self.assertEqual(called, [12345, 123])
def test_fsync_no_fullsync(self):
called = []
class FCNTL(object):
pass
def fsync(fd):
called.append(fd)
with patch('swift.common.utils.fcntl', FCNTL()):
with patch('os.fsync', fsync):
utils.fsync(12345)
self.assertEqual(called, [12345])
class TestAuditLocationGenerator(unittest.TestCase):
def test_drive_tree_access(self):
orig_listdir = utils.listdir
def _mock_utils_listdir(path):
if 'bad_part' in path:
raise OSError(errno.EACCES)
elif 'bad_suffix' in path:
raise OSError(errno.EACCES)
elif 'bad_hash' in path:
raise OSError(errno.EACCES)
else:
return orig_listdir(path)
# Check Raise on Bad partition
tmpdir = mkdtemp()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
obj_path = os.path.join(data, "bad_part")
with open(obj_path, "w"):
pass
part1 = os.path.join(data, "partition1")
os.makedirs(part1)
part2 = os.path.join(data, "partition2")
os.makedirs(part2)
with patch('swift.common.utils.listdir', _mock_utils_listdir):
audit = lambda: list(utils.audit_location_generator(
tmpdir, "data", mount_check=False))
self.assertRaises(OSError, audit)
rmtree(tmpdir)
# Check Raise on Bad Suffix
tmpdir = mkdtemp()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
part1 = os.path.join(data, "partition1")
os.makedirs(part1)
part2 = os.path.join(data, "partition2")
os.makedirs(part2)
obj_path = os.path.join(part1, "bad_suffix")
with open(obj_path, 'w'):
pass
suffix = os.path.join(part2, "suffix")
os.makedirs(suffix)
with patch('swift.common.utils.listdir', _mock_utils_listdir):
audit = lambda: list(utils.audit_location_generator(
tmpdir, "data", mount_check=False))
self.assertRaises(OSError, audit)
rmtree(tmpdir)
# Check Raise on Bad Hash
tmpdir = mkdtemp()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
part1 = os.path.join(data, "partition1")
os.makedirs(part1)
suffix = os.path.join(part1, "suffix")
os.makedirs(suffix)
hash1 = os.path.join(suffix, "hash1")
os.makedirs(hash1)
obj_path = os.path.join(suffix, "bad_hash")
with open(obj_path, 'w'):
pass
with patch('swift.common.utils.listdir', _mock_utils_listdir):
audit = lambda: list(utils.audit_location_generator(
tmpdir, "data", mount_check=False))
self.assertRaises(OSError, audit)
rmtree(tmpdir)
def test_non_dir_drive(self):
with temptree([]) as tmpdir:
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
# Create a file, that represents a non-dir drive
open(os.path.join(tmpdir, 'asdf'), 'w')
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False, logger=logger
)
self.assertEqual(list(locations), [])
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
# Test without the logger
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False
)
self.assertEqual(list(locations), [])
def test_mount_check_drive(self):
with temptree([]) as tmpdir:
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
# Create a file, that represents a non-dir drive
open(os.path.join(tmpdir, 'asdf'), 'w')
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=True, logger=logger
)
self.assertEqual(list(locations), [])
self.assertEqual(2, len(logger.get_lines_for_level('warning')))
# Test without the logger
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=True
)
self.assertEqual(list(locations), [])
def test_non_dir_contents(self):
with temptree([]) as tmpdir:
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
with open(os.path.join(data, "partition1"), "w"):
pass
partition = os.path.join(data, "partition2")
os.makedirs(partition)
with open(os.path.join(partition, "suffix1"), "w"):
pass
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
with open(os.path.join(suffix, "hash1"), "w"):
pass
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False, logger=logger
)
self.assertEqual(list(locations), [])
def test_find_objects(self):
with temptree([]) as tmpdir:
expected_objs = list()
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
# Create a file, that represents a non-dir drive
open(os.path.join(tmpdir, 'asdf'), 'w')
partition = os.path.join(data, "partition1")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash")
os.makedirs(hash_path)
obj_path = os.path.join(hash_path, "obj1.db")
with open(obj_path, "w"):
pass
expected_objs.append((obj_path, 'drive', 'partition1'))
partition = os.path.join(data, "partition2")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash2")
os.makedirs(hash_path)
obj_path = os.path.join(hash_path, "obj2.db")
with open(obj_path, "w"):
pass
expected_objs.append((obj_path, 'drive', 'partition2'))
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False, logger=logger
)
got_objs = list(locations)
self.assertEqual(len(got_objs), len(expected_objs))
self.assertEqual(sorted(got_objs), sorted(expected_objs))
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
def test_ignore_metadata(self):
with temptree([]) as tmpdir:
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
partition = os.path.join(data, "partition2")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash2")
os.makedirs(hash_path)
obj_path = os.path.join(hash_path, "obj1.dat")
with open(obj_path, "w"):
pass
meta_path = os.path.join(hash_path, "obj1.meta")
with open(meta_path, "w"):
pass
locations = utils.audit_location_generator(
tmpdir, "data", ".dat", mount_check=False, logger=logger
)
self.assertEqual(list(locations),
[(obj_path, "drive", "partition2")])
class TestGreenAsyncPile(unittest.TestCase):
def test_runs_everything(self):
def run_test():
tests_ran[0] += 1
return tests_ran[0]
tests_ran = [0]
pile = utils.GreenAsyncPile(3)
for x in range(3):
pile.spawn(run_test)
self.assertEqual(sorted(x for x in pile), [1, 2, 3])
def test_is_asynchronous(self):
def run_test(index):
events[index].wait()
return index
pile = utils.GreenAsyncPile(3)
for order in ((1, 2, 0), (0, 1, 2), (2, 1, 0), (0, 2, 1)):
events = [eventlet.event.Event(), eventlet.event.Event(),
eventlet.event.Event()]
for x in range(3):
pile.spawn(run_test, x)
for x in order:
events[x].send()
self.assertEqual(next(pile), x)
def test_next_when_empty(self):
def run_test():
pass
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test)
self.assertEqual(next(pile), None)
self.assertRaises(StopIteration, lambda: next(pile))
def test_waitall_timeout_timesout(self):
def run_test(sleep_duration):
eventlet.sleep(sleep_duration)
completed[0] += 1
return sleep_duration
completed = [0]
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test, 0.1)
pile.spawn(run_test, 1.0)
self.assertEqual(pile.waitall(0.5), [0.1])
self.assertEqual(completed[0], 1)
def test_waitall_timeout_completes(self):
def run_test(sleep_duration):
eventlet.sleep(sleep_duration)
completed[0] += 1
return sleep_duration
completed = [0]
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test, 0.1)
pile.spawn(run_test, 0.1)
self.assertEqual(pile.waitall(0.5), [0.1, 0.1])
self.assertEqual(completed[0], 2)
def test_waitfirst_only_returns_first(self):
def run_test(name):
eventlet.sleep(0)
completed.append(name)
return name
completed = []
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test, 'first')
pile.spawn(run_test, 'second')
pile.spawn(run_test, 'third')
self.assertEqual(pile.waitfirst(0.5), completed[0])
# 3 still completed, but only the first was returned.
self.assertEqual(3, len(completed))
def test_wait_with_firstn(self):
def run_test(name):
eventlet.sleep(0)
completed.append(name)
return name
for first_n in [None] + list(range(6)):
completed = []
pile = utils.GreenAsyncPile(10)
for i in range(10):
pile.spawn(run_test, i)
actual = pile._wait(1, first_n)
expected_n = first_n if first_n else 10
self.assertEqual(completed[:expected_n], actual)
self.assertEqual(10, len(completed))
def test_pending(self):
pile = utils.GreenAsyncPile(3)
self.assertEqual(0, pile._pending)
for repeats in range(2):
# repeat to verify that pending will go again up after going down
for i in range(4):
pile.spawn(lambda: i)
self.assertEqual(4, pile._pending)
for i in range(3, -1, -1):
next(pile)
self.assertEqual(i, pile._pending)
# sanity check - the pile is empty
self.assertRaises(StopIteration, pile.next)
# pending remains 0
self.assertEqual(0, pile._pending)
class TestLRUCache(unittest.TestCase):
def test_maxsize(self):
@utils.LRUCache(maxsize=10)
def f(*args):
return math.sqrt(*args)
_orig_math_sqrt = math.sqrt
# setup cache [0-10)
for i in range(10):
self.assertEqual(math.sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# validate cache [0-10)
with patch('math.sqrt'):
for i in range(10):
self.assertEqual(_orig_math_sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# update cache [10-20)
for i in range(10, 20):
self.assertEqual(math.sqrt(i), f(i))
# cache size is fixed
self.assertEqual(f.size(), 10)
# validate cache [10-20)
with patch('math.sqrt'):
for i in range(10, 20):
self.assertEqual(_orig_math_sqrt(i), f(i))
# validate un-cached [0-10)
with patch('math.sqrt', new=None):
for i in range(10):
self.assertRaises(TypeError, f, i)
# cache unchanged
self.assertEqual(f.size(), 10)
with patch('math.sqrt'):
for i in range(10, 20):
self.assertEqual(_orig_math_sqrt(i), f(i))
self.assertEqual(f.size(), 10)
def test_maxtime(self):
@utils.LRUCache(maxtime=30)
def f(*args):
return math.sqrt(*args)
self.assertEqual(30, f.maxtime)
_orig_math_sqrt = math.sqrt
now = time.time()
the_future = now + 31
# setup cache [0-10)
with patch('time.time', lambda: now):
for i in range(10):
self.assertEqual(math.sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# validate cache [0-10)
with patch('math.sqrt'):
for i in range(10):
self.assertEqual(_orig_math_sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# validate expired [0-10)
with patch('math.sqrt', new=None):
with patch('time.time', lambda: the_future):
for i in range(10):
self.assertRaises(TypeError, f, i)
# validate repopulates [0-10)
with patch('time.time', lambda: the_future):
for i in range(10):
self.assertEqual(math.sqrt(i), f(i))
# reuses cache space
self.assertEqual(f.size(), 10)
def test_set_maxtime(self):
@utils.LRUCache(maxtime=30)
def f(*args):
return math.sqrt(*args)
self.assertEqual(30, f.maxtime)
self.assertEqual(2, f(4))
self.assertEqual(1, f.size())
# expire everything
f.maxtime = -1
# validate un-cached [0-10)
with patch('math.sqrt', new=None):
self.assertRaises(TypeError, f, 4)
def test_set_maxsize(self):
@utils.LRUCache(maxsize=10)
def f(*args):
return math.sqrt(*args)
for i in range(12):
f(i)
self.assertEqual(f.size(), 10)
f.maxsize = 4
for i in range(12):
f(i)
self.assertEqual(f.size(), 4)
class TestParseContentRange(unittest.TestCase):
def test_good(self):
start, end, total = utils.parse_content_range("bytes 100-200/300")
self.assertEqual(start, 100)
self.assertEqual(end, 200)
self.assertEqual(total, 300)
def test_bad(self):
self.assertRaises(ValueError, utils.parse_content_range,
"100-300/500")
self.assertRaises(ValueError, utils.parse_content_range,
"bytes 100-200/aardvark")
self.assertRaises(ValueError, utils.parse_content_range,
"bytes bulbous-bouffant/4994801")
class TestParseContentDisposition(unittest.TestCase):
def test_basic_content_type(self):
name, attrs = utils.parse_content_disposition('text/plain')
self.assertEqual(name, 'text/plain')
self.assertEqual(attrs, {})
def test_content_type_with_charset(self):
name, attrs = utils.parse_content_disposition(
'text/plain; charset=UTF8')
self.assertEqual(name, 'text/plain')
self.assertEqual(attrs, {'charset': 'UTF8'})
def test_content_disposition(self):
name, attrs = utils.parse_content_disposition(
'form-data; name="somefile"; filename="test.html"')
self.assertEqual(name, 'form-data')
self.assertEqual(attrs, {'name': 'somefile', 'filename': 'test.html'})
def test_content_disposition_without_white_space(self):
name, attrs = utils.parse_content_disposition(
'form-data;name="somefile";filename="test.html"')
self.assertEqual(name, 'form-data')
self.assertEqual(attrs, {'name': 'somefile', 'filename': 'test.html'})
class TestIterMultipartMimeDocuments(unittest.TestCase):
def test_bad_start(self):
it = utils.iter_multipart_mime_documents(StringIO('blah'), 'unique')
exc = None
try:
next(it)
except MimeInvalid as err:
exc = err
self.assertTrue('invalid starting boundary' in str(exc))
self.assertTrue('--unique' in str(exc))
def test_empty(self):
it = utils.iter_multipart_mime_documents(StringIO('--unique'),
'unique')
fp = next(it)
self.assertEqual(fp.read(), '')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_basic(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nabcdefg\r\n--unique--'), 'unique')
fp = next(it)
self.assertEqual(fp.read(), 'abcdefg')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_basic2(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
'unique')
fp = next(it)
self.assertEqual(fp.read(), 'abcdefg')
fp = next(it)
self.assertEqual(fp.read(), 'hijkl')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_tiny_reads(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
'unique')
fp = next(it)
self.assertEqual(fp.read(2), 'ab')
self.assertEqual(fp.read(2), 'cd')
self.assertEqual(fp.read(2), 'ef')
self.assertEqual(fp.read(2), 'g')
self.assertEqual(fp.read(2), '')
fp = next(it)
self.assertEqual(fp.read(), 'hijkl')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_big_reads(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
'unique')
fp = next(it)
self.assertEqual(fp.read(65536), 'abcdefg')
self.assertEqual(fp.read(), '')
fp = next(it)
self.assertEqual(fp.read(), 'hijkl')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_leading_crlfs(self):
it = utils.iter_multipart_mime_documents(
StringIO('\r\n\r\n\r\n--unique\r\nabcdefg\r\n'
'--unique\r\nhijkl\r\n--unique--'),
'unique')
fp = next(it)
self.assertEqual(fp.read(65536), 'abcdefg')
self.assertEqual(fp.read(), '')
fp = next(it)
self.assertEqual(fp.read(), 'hijkl')
self.assertRaises(StopIteration, it.next)
def test_broken_mid_stream(self):
# We go ahead and accept whatever is sent instead of rejecting the
# whole request, in case the partial form is still useful.
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nabc'), 'unique')
fp = next(it)
self.assertEqual(fp.read(), 'abc')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_readline(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n\r\n'
'jkl\r\n\r\n--unique--'), 'unique')
fp = next(it)
self.assertEqual(fp.readline(), 'ab\r\n')
self.assertEqual(fp.readline(), 'cd\ref\ng')
self.assertEqual(fp.readline(), '')
fp = next(it)
self.assertEqual(fp.readline(), 'hi\r\n')
self.assertEqual(fp.readline(), '\r\n')
self.assertEqual(fp.readline(), 'jkl\r\n')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_readline_with_tiny_chunks(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n'
'\r\njkl\r\n\r\n--unique--'),
'unique',
read_chunk_size=2)
fp = next(it)
self.assertEqual(fp.readline(), 'ab\r\n')
self.assertEqual(fp.readline(), 'cd\ref\ng')
self.assertEqual(fp.readline(), '')
fp = next(it)
self.assertEqual(fp.readline(), 'hi\r\n')
self.assertEqual(fp.readline(), '\r\n')
self.assertEqual(fp.readline(), 'jkl\r\n')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
class TestParseMimeHeaders(unittest.TestCase):
def test_parse_mime_headers(self):
doc_file = BytesIO(b"""Content-Disposition: form-data; name="file_size"
Foo: Bar
NOT-title-cAsED: quux
Connexion: =?iso8859-1?q?r=E9initialis=E9e_par_l=27homologue?=
Status: =?utf-8?b?5byA5aeL6YCa6L+H5a+56LGh5aSN5Yi2?=
Latin-1: Resincronizaci\xf3n realizada con \xe9xito
Utf-8: \xd0\xba\xd0\xbe\xd0\xbd\xd1\x82\xd0\xb5\xd0\xb9\xd0\xbd\xd0\xb5\xd1\x80
This is the body
""")
headers = utils.parse_mime_headers(doc_file)
utf8 = u'\u043a\u043e\u043d\u0442\u0435\u0439\u043d\u0435\u0440'
if six.PY2:
utf8 = utf8.encode('utf-8')
expected_headers = {
'Content-Disposition': 'form-data; name="file_size"',
'Foo': "Bar",
'Not-Title-Cased': "quux",
# Encoded-word or non-ASCII values are treated just like any other
# bytestring (at least for now)
'Connexion': "=?iso8859-1?q?r=E9initialis=E9e_par_l=27homologue?=",
'Status': "=?utf-8?b?5byA5aeL6YCa6L+H5a+56LGh5aSN5Yi2?=",
'Latin-1': "Resincronizaci\xf3n realizada con \xe9xito",
'Utf-8': utf8,
}
self.assertEqual(expected_headers, headers)
self.assertEqual(b"This is the body\n", doc_file.read())
class FakeResponse(object):
def __init__(self, status, headers, body):
self.status = status
self.headers = HeaderKeyDict(headers)
self.body = StringIO(body)
def getheader(self, header_name):
return str(self.headers.get(header_name, ''))
def getheaders(self):
return self.headers.items()
def read(self, length=None):
return self.body.read(length)
def readline(self, length=None):
return self.body.readline(length)
class TestDocumentItersToHTTPResponseBody(unittest.TestCase):
def test_no_parts(self):
body = utils.document_iters_to_http_response_body(
iter([]), 'dontcare',
multipart=False, logger=FakeLogger())
self.assertEqual(body, '')
def test_single_part(self):
body = "time flies like an arrow; fruit flies like a banana"
doc_iters = [{'part_iter': iter(StringIO(body).read, '')}]
resp_body = ''.join(
utils.document_iters_to_http_response_body(
iter(doc_iters), 'dontcare',
multipart=False, logger=FakeLogger()))
self.assertEqual(resp_body, body)
def test_multiple_parts(self):
part1 = "two peanuts were walking down a railroad track"
part2 = "and one was a salted. ... peanut."
doc_iters = [{
'start_byte': 88,
'end_byte': 133,
'content_type': 'application/peanut',
'entity_length': 1024,
'part_iter': iter(StringIO(part1).read, ''),
}, {
'start_byte': 500,
'end_byte': 532,
'content_type': 'application/salted',
'entity_length': 1024,
'part_iter': iter(StringIO(part2).read, ''),
}]
resp_body = ''.join(
utils.document_iters_to_http_response_body(
iter(doc_iters), 'boundaryboundary',
multipart=True, logger=FakeLogger()))
self.assertEqual(resp_body, (
"--boundaryboundary\r\n" +
# This is a little too strict; we don't actually care that the
# headers are in this order, but the test is much more legible
# this way.
"Content-Type: application/peanut\r\n" +
"Content-Range: bytes 88-133/1024\r\n" +
"\r\n" +
part1 + "\r\n" +
"--boundaryboundary\r\n"
"Content-Type: application/salted\r\n" +
"Content-Range: bytes 500-532/1024\r\n" +
"\r\n" +
part2 + "\r\n" +
"--boundaryboundary--"))
def test_closed_part_iterator(self):
print('test')
useful_iter_mock = mock.MagicMock()
useful_iter_mock.__iter__.return_value = ['']
body_iter = utils.document_iters_to_http_response_body(
iter([{'part_iter': useful_iter_mock}]), 'dontcare',
multipart=False, logger=FakeLogger())
body = ''
for s in body_iter:
body += s
self.assertEqual(body, '')
useful_iter_mock.close.assert_called_once_with()
# Calling "close" on the mock will now raise an AttributeError
del useful_iter_mock.close
body_iter = utils.document_iters_to_http_response_body(
iter([{'part_iter': useful_iter_mock}]), 'dontcare',
multipart=False, logger=FakeLogger())
body = ''
for s in body_iter:
body += s
class TestPairs(unittest.TestCase):
def test_pairs(self):
items = [10, 20, 30, 40, 50, 60]
got_pairs = set(utils.pairs(items))
self.assertEqual(got_pairs,
set([(10, 20), (10, 30), (10, 40), (10, 50), (10, 60),
(20, 30), (20, 40), (20, 50), (20, 60),
(30, 40), (30, 50), (30, 60),
(40, 50), (40, 60),
(50, 60)]))
class TestSocketStringParser(unittest.TestCase):
def test_socket_string_parser(self):
default = 1337
addrs = [('1.2.3.4', '1.2.3.4', default),
('1.2.3.4:5000', '1.2.3.4', 5000),
('[dead:beef::1]', 'dead:beef::1', default),
('[dead:beef::1]:5000', 'dead:beef::1', 5000),
('example.com', 'example.com', default),
('example.com:5000', 'example.com', 5000),
('foo.1-2-3.bar.com:5000', 'foo.1-2-3.bar.com', 5000),
('1.2.3.4:10:20', None, None),
('dead:beef::1:5000', None, None)]
for addr, expected_host, expected_port in addrs:
if expected_host:
host, port = utils.parse_socket_string(addr, default)
self.assertEqual(expected_host, host)
self.assertEqual(expected_port, int(port))
else:
with self.assertRaises(ValueError):
utils.parse_socket_string(addr, default)
if __name__ == '__main__':
unittest.main()
|
cronjob.py | from pythontools.core import logger
from threading import Thread
import time, traceback, re, sys
from datetime import datetime, timedelta
_MANAGER = None
_CRONJOBS = []
_UPDATE_INTERVAL = 60
class CronJob:
def __init__(self, name, interval, function, wait_for_last_job=True):
self.name = name
self.interval = interval
self.function = function
self.wait_for_last_job = wait_for_last_job
self._current_thread = None
self._last_run = 0
self._next_run = 0
self._calc_next_run()
def run(self):
if self._current_thread is None:
def _function(self):
try:
self.function()
except Exception as e:
logger.log(f"ยงcCronJob '{self.name}' throw exception: {e}")
traceback.print_exc()
if self.wait_for_last_job is True:
self._current_thread = None
self._current_thread = Thread(target=_function, args=[self])
self._current_thread.start()
self._last_run = time.time()
if self.wait_for_last_job is False:
self._current_thread = None
self._calc_next_run()
def _calc_next_run(self):
if type(self.interval) is int:
self._next_run = self._last_run + self.interval
return
if type(self.interval) is str:
if ":" in self.interval:
regex = re.compile(r'((?P<hours>\d+?):)?((?P<minutes>\d+?):)?((?P<seconds>\d+?):)?')
parts = regex.match(self.interval + ":")
if parts:
parts = parts.groupdict()
global _UPDATE_INTERVAL
if parts["minutes"] and _UPDATE_INTERVAL > 60:
_UPDATE_INTERVAL = 60
if parts["seconds"] and _UPDATE_INTERVAL > 1:
_UPDATE_INTERVAL = 1
for name, val in parts.items():
parts[name] = int(val) if val else 0
today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
delta = timedelta(**parts)
self._next_run = (today + delta).timestamp()
if time.time() > self._next_run:
self._next_run = (today + timedelta(days=1) + delta).timestamp()
return
else:
if self.interval.lower() in ["monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday"]:
wday_ofs = time.strptime(self.interval, "%A").tm_wday
today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
self._next_run = (today - timedelta(days=today.weekday()) + timedelta(days=wday_ofs)).timestamp()
if time.time() > self._next_run:
self._next_run = (today - timedelta(days=today.weekday()) + timedelta(weeks=1, days=wday_ofs)).timestamp()
return
self._next_run = sys.maxsize
logger.log(f"ยงcCronJob '{self.name}' will never execute!")
return
def register_cron_job(cronjob: CronJob):
global _CRONJOBS, _MANAGER, _UPDATE_INTERVAL
if type(cronjob.interval) is int and cronjob.interval < _UPDATE_INTERVAL:
_UPDATE_INTERVAL = cronjob.interval
_CRONJOBS.append(cronjob)
if _MANAGER is None:
def _manager():
while True:
for job in _CRONJOBS:
if time.time() > job._next_run:
job.run()
time.sleep(_UPDATE_INTERVAL)
_MANAGER = Thread(target=_manager)
_MANAGER.start()
# --- deprecated --- #
registerCronJob = register_cron_job
# --- deprecated --- #
|
server.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""An example Flight Python server."""
import argparse
import ast
import threading
import time
import pyarrow
import pyarrow.flight
class FlightServer(pyarrow.flight.FlightServerBase):
def __init__(self):
super(FlightServer, self).__init__()
self.flights = {}
@classmethod
def descriptor_to_key(self, descriptor):
return (descriptor.descriptor_type.value, descriptor.command,
tuple(descriptor.path or tuple()))
def list_flights(self, context, criteria):
for key, table in self.flights.items():
if key[1] is not None:
descriptor = \
pyarrow.flight.FlightDescriptor.for_command(key[1])
else:
descriptor = pyarrow.flight.FlightDescriptor.for_path(*key[2])
endpoints = [
pyarrow.flight.FlightEndpoint(repr(key),
[('localhost', 5005)]),
]
yield pyarrow.flight.FlightInfo(table.schema,
descriptor, endpoints,
table.num_rows, 0)
def get_flight_info(self, context, descriptor):
key = FlightServer.descriptor_to_key(descriptor)
if key in self.flights:
table = self.flights[key]
endpoints = [
pyarrow.flight.FlightEndpoint(repr(key),
[('localhost', 5005)]),
]
return pyarrow.flight.FlightInfo(table.schema,
descriptor, endpoints,
table.num_rows, 0)
raise KeyError('Flight not found.')
def do_put(self, context, descriptor, reader):
key = FlightServer.descriptor_to_key(descriptor)
print(key)
self.flights[key] = reader.read_all()
print(self.flights[key])
def do_get(self, context, ticket):
key = ast.literal_eval(ticket.ticket.decode())
if key not in self.flights:
return None
return pyarrow.flight.RecordBatchStream(self.flights[key])
def list_actions(self, context):
return [
("clear", "Clear the stored flights."),
("shutdown", "Shut down this server."),
]
def do_action(self, context, action):
if action.type == "clear":
raise NotImplementedError(
"{} is not implemented.".format(action.type))
elif action.type == "healthcheck":
pass
elif action.type == "shutdown":
yield pyarrow.flight.Result(pyarrow.py_buffer(b'Shutdown!'))
# Shut down on background thread to avoid blocking current
# request
threading.Thread(target=self._shutdown).start()
else:
raise KeyError(f"Unknown action {action.type!r}")
def _shutdown(self):
"""Shut down after a delay."""
print("Server is shutting down...")
time.sleep(2)
self.shutdown()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--port", type=int, default=5005)
parser.add_argument("--tls", nargs=2, default=None)
args = parser.parse_args()
server = FlightServer()
kwargs = {}
scheme = "grpc+tcp"
if args.tls:
scheme = "grpc+tls"
with open(args.tls[0], "rb") as cert_file:
kwargs["tls_cert_chain"] = cert_file.read()
with open(args.tls[1], "rb") as key_file:
kwargs["tls_private_key"] = key_file.read()
location = "{}://0.0.0.0:{}".format(scheme, args.port)
server.init(location, **kwargs)
print("Serving on", location)
server.run()
if __name__ == '__main__':
main()
|
dag_processing.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import importlib
import logging
import multiprocessing
import os
import re
import signal
import sys
import time
import zipfile
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from collections import namedtuple
from importlib import import_module
import enum
from typing import Optional
import psutil
from setproctitle import setproctitle
from tabulate import tabulate
# To avoid circular imports
import airflow.models
from airflow import configuration as conf
from airflow.dag.base_dag import BaseDag, BaseDagBag
from airflow.exceptions import AirflowException
from airflow.models import errors
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.helpers import reap_process_group
from airflow.utils.db import provide_session
from airflow.utils.log.logging_mixin import LoggingMixin
class SimpleDag(BaseDag):
"""
A simplified representation of a DAG that contains all attributes
required for instantiating and scheduling its associated tasks.
"""
def __init__(self, dag, pickle_id=None):
"""
:param dag: the DAG
:type dag: airflow.models.DAG
:param pickle_id: ID associated with the pickled version of this DAG.
:type pickle_id: unicode
"""
self._dag_id = dag.dag_id
self._task_ids = [task.task_id for task in dag.tasks]
self._full_filepath = dag.full_filepath
self._is_paused = dag.is_paused
self._concurrency = dag.concurrency
self._pickle_id = pickle_id
self._task_special_args = {}
for task in dag.tasks:
special_args = {}
if task.task_concurrency is not None:
special_args['task_concurrency'] = task.task_concurrency
if len(special_args) > 0:
self._task_special_args[task.task_id] = special_args
@property
def dag_id(self):
"""
:return: the DAG ID
:rtype: unicode
"""
return self._dag_id
@property
def task_ids(self):
"""
:return: A list of task IDs that are in this DAG
:rtype: list[unicode]
"""
return self._task_ids
@property
def full_filepath(self):
"""
:return: The absolute path to the file that contains this DAG's definition
:rtype: unicode
"""
return self._full_filepath
@property
def concurrency(self):
"""
:return: maximum number of tasks that can run simultaneously from this DAG
:rtype: int
"""
return self._concurrency
@property
def is_paused(self):
"""
:return: whether this DAG is paused or not
:rtype: bool
"""
return self._is_paused
@property
def pickle_id(self):
"""
:return: The pickle ID for this DAG, if it has one. Otherwise None.
:rtype: unicode
"""
return self._pickle_id
@property
def task_special_args(self):
return self._task_special_args
def get_task_special_arg(self, task_id, special_arg_name):
if task_id in self._task_special_args and special_arg_name in self._task_special_args[task_id]:
return self._task_special_args[task_id][special_arg_name]
else:
return None
class SimpleTaskInstance:
def __init__(self, ti):
self._dag_id = ti.dag_id
self._task_id = ti.task_id
self._execution_date = ti.execution_date
self._start_date = ti.start_date
self._end_date = ti.end_date
self._try_number = ti.try_number
self._state = ti.state
self._executor_config = ti.executor_config
if hasattr(ti, 'run_as_user'):
self._run_as_user = ti.run_as_user
else:
self._run_as_user = None
if hasattr(ti, 'pool'):
self._pool = ti.pool
else:
self._pool = None
if hasattr(ti, 'priority_weight'):
self._priority_weight = ti.priority_weight
else:
self._priority_weight = None
self._queue = ti.queue
self._key = ti.key
@property
def dag_id(self):
return self._dag_id
@property
def task_id(self):
return self._task_id
@property
def execution_date(self):
return self._execution_date
@property
def start_date(self):
return self._start_date
@property
def end_date(self):
return self._end_date
@property
def try_number(self):
return self._try_number
@property
def state(self):
return self._state
@property
def pool(self):
return self._pool
@property
def priority_weight(self):
return self._priority_weight
@property
def queue(self):
return self._queue
@property
def key(self):
return self._key
@property
def executor_config(self):
return self._executor_config
@provide_session
def construct_task_instance(self, session=None, lock_for_update=False):
"""
Construct a TaskInstance from the database based on the primary key
:param session: DB session.
:param lock_for_update: if True, indicates that the database should
lock the TaskInstance (issuing a FOR UPDATE clause) until the
session is committed.
"""
TI = airflow.models.TaskInstance
qry = session.query(TI).filter(
TI.dag_id == self._dag_id,
TI.task_id == self._task_id,
TI.execution_date == self._execution_date)
if lock_for_update:
ti = qry.with_for_update().first()
else:
ti = qry.first()
return ti
class SimpleDagBag(BaseDagBag):
"""
A collection of SimpleDag objects with some convenience methods.
"""
def __init__(self, simple_dags):
"""
Constructor.
:param simple_dags: SimpleDag objects that should be in this
:type list(airflow.utils.dag_processing.SimpleDagBag)
"""
self.simple_dags = simple_dags
self.dag_id_to_simple_dag = {}
for simple_dag in simple_dags:
self.dag_id_to_simple_dag[simple_dag.dag_id] = simple_dag
@property
def dag_ids(self):
"""
:return: IDs of all the DAGs in this
:rtype: list[unicode]
"""
return self.dag_id_to_simple_dag.keys()
def get_dag(self, dag_id):
"""
:param dag_id: DAG ID
:type dag_id: unicode
:return: if the given DAG ID exists in the bag, return the BaseDag
corresponding to that ID. Otherwise, throw an Exception
:rtype: airflow.utils.dag_processing.SimpleDag
"""
if dag_id not in self.dag_id_to_simple_dag:
raise AirflowException("Unknown DAG ID {}".format(dag_id))
return self.dag_id_to_simple_dag[dag_id]
def correct_maybe_zipped(fileloc):
"""
If the path contains a folder with a .zip suffix, then
the folder is treated as a zip archive and path to zip is returned.
"""
_, archive, _ = re.search(r'((.*\.zip){})?(.*)'.format(re.escape(os.sep)), fileloc).groups()
if archive and zipfile.is_zipfile(archive):
return archive
else:
return fileloc
COMMENT_PATTERN = re.compile(r"\s*#.*")
def list_py_file_paths(directory, safe_mode=conf.getboolean('core', 'DAG_DISCOVERY_SAFE_MODE', fallback=True),
include_examples=None):
"""
Traverse a directory and look for Python files.
:param directory: the directory to traverse
:type directory: unicode
:param safe_mode: whether to use a heuristic to determine whether a file
contains Airflow DAG definitions. If not provided, use the
core.DAG_DISCOVERY_SAFE_MODE configuration setting. If not set, default
to safe.
:return: a list of paths to Python files in the specified directory
:rtype: list[unicode]
"""
if include_examples is None:
include_examples = conf.getboolean('core', 'LOAD_EXAMPLES')
file_paths = []
if directory is None:
return []
elif os.path.isfile(directory):
return [directory]
elif os.path.isdir(directory):
patterns_by_dir = {}
for root, dirs, files in os.walk(directory, followlinks=True):
patterns = patterns_by_dir.get(root, [])
ignore_file = os.path.join(root, '.airflowignore')
if os.path.isfile(ignore_file):
with open(ignore_file, 'r') as file:
# If we have new patterns create a copy so we don't change
# the previous list (which would affect other subdirs)
lines_no_comments = [COMMENT_PATTERN.sub("", line) for line in file.read().split("\n")]
patterns += [re.compile(line) for line in lines_no_comments if line]
# If we can ignore any subdirs entirely we should - fewer paths
# to walk is better. We have to modify the ``dirs`` array in
# place for this to affect os.walk
dirs[:] = [
d
for d in dirs
if not any(p.search(os.path.join(root, d)) for p in patterns)
]
# We want patterns defined in a parent folder's .airflowignore to
# apply to subdirs too
for d in dirs:
patterns_by_dir[os.path.join(root, d)] = patterns
for f in files:
try:
file_path = os.path.join(root, f)
if not os.path.isfile(file_path):
continue
_, file_ext = os.path.splitext(os.path.split(file_path)[-1])
if file_ext != '.py' and not zipfile.is_zipfile(file_path):
continue
if any([re.findall(p, file_path) for p in patterns]):
continue
# Heuristic that guesses whether a Python file contains an
# Airflow DAG definition.
might_contain_dag = True
if safe_mode and not zipfile.is_zipfile(file_path):
with open(file_path, 'rb') as fp:
content = fp.read()
might_contain_dag = all(
[s in content for s in (b'DAG', b'airflow')])
if not might_contain_dag:
continue
file_paths.append(file_path)
except Exception:
log = LoggingMixin().log
log.exception("Error while examining %s", f)
if include_examples:
import airflow.example_dags
example_dag_folder = airflow.example_dags.__path__[0]
file_paths.extend(list_py_file_paths(example_dag_folder, safe_mode, False))
return file_paths
class AbstractDagFileProcessor(metaclass=ABCMeta):
"""
Processes a DAG file. See SchedulerJob.process_file() for more details.
"""
@abstractmethod
def start(self):
"""
Launch the process to process the file
"""
raise NotImplementedError()
@abstractmethod
def terminate(self, sigkill=False):
"""
Terminate (and then kill) the process launched to process the file
"""
raise NotImplementedError()
@property
@abstractmethod
def pid(self):
"""
:return: the PID of the process launched to process the given file
"""
raise NotImplementedError()
@property
@abstractmethod
def exit_code(self):
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
raise NotImplementedError()
@property
@abstractmethod
def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
raise NotImplementedError()
@property
@abstractmethod
def result(self):
"""
:return: result of running SchedulerJob.process_file()
:rtype: list[airflow.utils.dag_processing.SimpleDag]
"""
raise NotImplementedError()
@property
@abstractmethod
def start_time(self):
"""
:return: When this started to process the file
:rtype: datetime
"""
raise NotImplementedError()
@property
@abstractmethod
def file_path(self):
"""
:return: the path to the file that this is processing
:rtype: unicode
"""
raise NotImplementedError()
DagParsingStat = namedtuple('DagParsingStat', ['file_paths', 'done', 'all_files_processed'])
class DagParsingSignal(enum.Enum):
AGENT_HEARTBEAT = 'agent_heartbeat'
TERMINATE_MANAGER = 'terminate_manager'
END_MANAGER = 'end_manager'
class DagFileProcessorAgent(LoggingMixin):
"""
Agent for DAG file processing. It is responsible for all DAG parsing
related jobs in scheduler process. Mainly it can spin up DagFileProcessorManager
in a subprocess, collect DAG parsing results from it and communicate
signal/DAG parsing stat with it.
This class runs in the main `airflow scheduler` process.
"""
def __init__(self,
dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
async_mode):
"""
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: unicode
:param file_paths: list of file paths that contain DAG definitions
:type file_paths: list[unicode]
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path, log_file_path)
:type processor_factory: (unicode, unicode, list) -> (AbstractDagFileProcessor)
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param async_mode: Whether to start agent in async mode
:type async_mode: bool
"""
self._file_paths = file_paths
self._file_path_queue = []
self._dag_directory = dag_directory
self._max_runs = max_runs
self._processor_factory = processor_factory
self._processor_timeout = processor_timeout
self._async_mode = async_mode
# Map from file path to the processor
self._processors = {}
# Map from file path to the last runtime
self._last_runtime = {}
# Map from file path to the last finish time
self._last_finish_time = {}
# Map from file path to the number of runs
self._run_count = defaultdict(int)
# Pipe for communicating signals
self._process = None
self._done = False
# Initialized as true so we do not deactivate w/o any actual DAG parsing.
self._all_files_processed = True
self._parent_signal_conn = None
self._collected_dag_buffer = []
def start(self):
"""
Launch DagFileProcessorManager processor and start DAG parsing loop in manager.
"""
self._parent_signal_conn, child_signal_conn = multiprocessing.Pipe()
self._process = multiprocessing.Process(
target=type(self)._run_processor_manager,
args=(
self._dag_directory,
self._file_paths,
self._max_runs,
self._processor_factory,
self._processor_timeout,
child_signal_conn,
self._async_mode,
)
)
self._process.start()
self.log.info("Launched DagFileProcessorManager with pid: %s", self._process.pid)
def heartbeat(self):
"""
Should only be used when launched DAG file processor manager in sync mode.
Send agent heartbeat signal to the manager, requesting that it runs one
processing "loop".
Call wait_until_finished to ensure that any launched processors have
finished before continuing
"""
if not self._process.is_alive():
return
try:
self._parent_signal_conn.send(DagParsingSignal.AGENT_HEARTBEAT)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_simple_dags calls _heartbeat_manager.
pass
def wait_until_finished(self):
while self._parent_signal_conn.poll():
try:
result = self._parent_signal_conn.recv()
except EOFError:
break
self._process_message(result)
if isinstance(result, DagParsingStat):
# In sync mode we don't send this message from the Manager
# until all the running processors have finished
return
@staticmethod
def _run_processor_manager(dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
async_mode):
# Make this process start as a new process group - that makes it easy
# to kill all sub-process of this at the OS-level, rather than having
# to iterate the child processes
os.setpgid(0, 0)
setproctitle("airflow scheduler -- DagFileProcessorManager")
# Reload configurations and settings to avoid collision with parent process.
# Because this process may need custom configurations that cannot be shared,
# e.g. RotatingFileHandler. And it can cause connection corruption if we
# do not recreate the SQLA connection pool.
os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER'] = 'True'
os.environ['AIRFLOW__CORE__COLORED_CONSOLE_LOG'] = 'False'
# Replicating the behavior of how logging module was loaded
# in logging_config.py
importlib.reload(import_module(airflow.settings.LOGGING_CLASS_PATH.rsplit('.', 1)[0]))
importlib.reload(airflow.settings)
airflow.settings.initialize()
del os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER']
processor_manager = DagFileProcessorManager(dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
async_mode)
processor_manager.start()
def harvest_simple_dags(self):
"""
Harvest DAG parsing results from result queue and sync metadata from stat queue.
:return: List of parsing result in SimpleDag format.
"""
# Receive any pending messages before checking if the process has exited.
while self._parent_signal_conn.poll():
try:
result = self._parent_signal_conn.recv()
except (EOFError, ConnectionError):
break
self._process_message(result)
simple_dags = self._collected_dag_buffer
self._collected_dag_buffer = []
# If it died unexpectedly restart the manager process
self._heartbeat_manager()
return simple_dags
def _process_message(self, message):
self.log.debug("Received message of type %s", type(message).__name__)
if isinstance(message, DagParsingStat):
self._sync_metadata(message)
else:
self._collected_dag_buffer.append(message)
def _heartbeat_manager(self):
"""
Heartbeat DAG file processor and restart it if we are not done.
"""
if self._process and not self._process.is_alive():
self._process.join(timeout=0)
if not self.done:
self.log.warning(
"DagFileProcessorManager (PID=%d) exited with exit code %d - re-launching",
self._process.pid, self._process.exitcode
)
self.start()
def _sync_metadata(self, stat):
"""
Sync metadata from stat queue and only keep the latest stat.
"""
self._file_paths = stat.file_paths
self._done = stat.done
self._all_files_processed = stat.all_files_processed
@property
def file_paths(self):
return self._file_paths
@property
def done(self):
return self._done
@property
def all_files_processed(self):
return self._all_files_processed
def terminate(self):
"""
Send termination signal to DAG parsing processor manager
and expect it to terminate all DAG file processors.
"""
if self._process and self._process.is_alive():
self.log.info("Sending termination message to manager.")
self._parent_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER)
def end(self):
"""
Terminate (and then kill) the manager process launched.
:return:
"""
if not self._process:
self.log.warning('Ending without manager process.')
return
reap_process_group(self._process.pid, log=self.log)
self._parent_signal_conn.close()
class DagFileProcessorManager(LoggingMixin):
"""
Given a list of DAG definition files, this kicks off several processors
in parallel to process them and put the results to a multiprocessing.Queue
for DagFileProcessorAgent to harvest. The parallelism is limited and as the
processors finish, more are launched. The files are processed over and
over again, but no more often than the specified interval.
:type _file_path_queue: list[unicode]
:type _processors: dict[unicode, AbstractDagFileProcessor]
:type _last_runtime: dict[unicode, float]
:type _last_finish_time: dict[unicode, datetime.datetime]
"""
def __init__(self,
dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
async_mode=True):
"""
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: unicode
:param file_paths: list of file paths that contain DAG definitions
:type file_paths: list[unicode]
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path)
:type processor_factory: (unicode, unicode, list) -> (AbstractDagFileProcessor)
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param signal_conn: connection to communicate signal with processor agent.
:type signal_conn: airflow.models.connection.Connection
:param async_mode: whether to start the manager in async mode
:type async_mode: bool
"""
self._file_paths = file_paths
self._file_path_queue = []
self._dag_directory = dag_directory
self._max_runs = max_runs
self._processor_factory = processor_factory
self._signal_conn = signal_conn
self._async_mode = async_mode
self._parallelism = conf.getint('scheduler', 'max_threads')
if 'sqlite' in conf.get('core', 'sql_alchemy_conn') and self._parallelism > 1:
self.log.error("Cannot use more than 1 thread when using sqlite. "
"Setting parallelism to 1")
self._parallelism = 1
# Parse and schedule each file no faster than this interval.
self._file_process_interval = conf.getint('scheduler',
'min_file_process_interval')
# How often to print out DAG file processing stats to the log. Default to
# 30 seconds.
self.print_stats_interval = conf.getint('scheduler',
'print_stats_interval')
# Map from file path to the processor
self._processors = {}
# Map from file path to the last runtime
self._last_runtime = {}
# Map from file path to the last finish time
self._last_finish_time = {}
#self._last_zombie_query_time = timezone.utcnow()
# Last time that the DAG dir was traversed to look for files
self.last_dag_dir_refresh_time = timezone.utcnow()
# Last time stats were printed
self.last_stat_print_time = timezone.datetime(2000, 1, 1)
# TODO: Remove magic number
self._zombie_query_interval = 10
# Map from file path to the number of runs
self._run_count = defaultdict(int)
# Manager heartbeat key.
self._heart_beat_key = 'heart-beat'
# How long to wait before timing out a process to parse a DAG file
self._processor_timeout = processor_timeout
# How often to scan the DAGs directory for new files. Default to 5 minutes.
self.dag_dir_list_interval = conf.getint('scheduler',
'dag_dir_list_interval')
self._log = logging.getLogger('airflow.processor_manager')
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame):
"""
Helper method to clean up DAG file processors to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
self.terminate()
self.end()
self.log.debug("Finished terminating DAG processors.")
sys.exit(os.EX_OK)
def start(self):
"""
Use multiple processes to parse and generate tasks for the
DAGs in parallel. By processing them in separate processes,
we can get parallelism and isolation from potentially harmful
user code.
"""
self.log.info("Processing files using up to %s processes at a time ", self._parallelism)
self.log.info("Process each file at most once every %s seconds", self._file_process_interval)
self.log.info(
"Checking for new files in %s every %s seconds", self._dag_directory, self.dag_dir_list_interval
)
# In sync mode we want timeout=None -- wait forever until a message is received
poll_time = None # type: Optional[float]
if self._async_mode:
poll_time = 0.0
self.log.debug("Starting DagFileProcessorManager in async mode")
else:
poll_time = None
self.log.debug("Starting DagFileProcessorManager in sync mode")
while True:
loop_start_time = time.time()
if self._signal_conn.poll(poll_time):
agent_signal = self._signal_conn.recv()
self.log.debug("Recived %s singal from DagFileProcessorAgent", agent_signal)
if agent_signal == DagParsingSignal.TERMINATE_MANAGER:
self.terminate()
break
elif agent_signal == DagParsingSignal.END_MANAGER:
self.end()
sys.exit(os.EX_OK)
elif agent_signal == DagParsingSignal.AGENT_HEARTBEAT:
# continue the loop to parse dags
pass
elif not self._async_mode:
# In "sync" mode we don't want to parse the DAGs until we
# are told to (as that would open another connection to the
# SQLite DB which isn't a good practice
continue
self._refresh_dag_dir()
simple_dags = self.heartbeat()
for simple_dag in simple_dags:
self._signal_conn.send(simple_dag)
if not self._async_mode:
self.log.debug(
"Waiting for processors to finish since we're using sqlite")
# Wait until the running DAG processors are finished before
# sending a DagParsingStat message back. This means the Agent
# can tell we've got to the end of this iteration when it sees
# this type of message
self.wait_until_finished()
# Collect anything else that has finished, but don't kick off any more processors
simple_dags = self.collect_results()
for simple_dag in simple_dags:
self._signal_conn.send(simple_dag)
self._print_stat()
all_files_processed = all(self.get_last_finish_time(x) is not None
for x in self.file_paths)
max_runs_reached = self.max_runs_reached()
dag_parsing_stat = DagParsingStat(self._file_paths,
max_runs_reached,
all_files_processed,
)
self._signal_conn.send(dag_parsing_stat)
if max_runs_reached:
self.log.info("Exiting dag parsing loop as all files "
"have been processed %s times", self._max_runs)
break
if self._async_mode:
loop_duration = time.time() - loop_start_time
if loop_duration < 1:
poll_time = 1 - loop_duration
else:
poll_time = 0.0
def _refresh_dag_dir(self):
"""
Refresh file paths from dag dir if we haven't done it for too long.
"""
elapsed_time_since_refresh = (timezone.utcnow() -
self.last_dag_dir_refresh_time).total_seconds()
if elapsed_time_since_refresh > self.dag_dir_list_interval:
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self._dag_directory)
self._file_paths = list_py_file_paths(self._dag_directory)
self.last_dag_dir_refresh_time = timezone.utcnow()
self.log.info("There are %s files in %s", len(self._file_paths), self._dag_directory)
self.set_file_paths(self._file_paths)
try:
self.log.debug("Removing old import errors")
self.clear_nonexistent_import_errors()
except Exception:
self.log.exception("Error removing old import errors")
def _print_stat(self):
"""
Occasionally print out stats about how fast the files are getting processed
"""
if ((timezone.utcnow() - self.last_stat_print_time).total_seconds() >
self.print_stats_interval):
if len(self._file_paths) > 0:
self._log_file_processing_stats(self._file_paths)
self.last_stat_print_time = timezone.utcnow()
@provide_session
def clear_nonexistent_import_errors(self, session):
"""
Clears import errors for files that no longer exist.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
"""
query = session.query(errors.ImportError)
if self._file_paths:
query = query.filter(
~errors.ImportError.filename.in_(self._file_paths)
)
query.delete(synchronize_session='fetch')
session.commit()
def _log_file_processing_stats(self, known_file_paths):
"""
Print out stats about how files are getting processed.
:param known_file_paths: a list of file paths that may contain Airflow
DAG definitions
:type known_file_paths: list[unicode]
:return: None
"""
# File Path: Path to the file containing the DAG definition
# PID: PID associated with the process that's processing the file. May
# be empty.
# Runtime: If the process is currently running, how long it's been
# running for in seconds.
# Last Runtime: If the process ran before, how long did it take to
# finish in seconds
# Last Run: When the file finished processing in the previous run.
headers = ["File Path",
"PID",
"Runtime",
"Last Runtime",
"Last Run"]
rows = []
for file_path in known_file_paths:
last_runtime = self.get_last_runtime(file_path)
file_name = os.path.basename(file_path)
file_name = os.path.splitext(file_name)[0].replace(os.sep, '.')
if last_runtime:
Stats.gauge(
'dag_processing.last_runtime.{}'.format(file_name),
last_runtime
)
processor_pid = self.get_pid(file_path)
processor_start_time = self.get_start_time(file_path)
runtime = ((timezone.utcnow() - processor_start_time).total_seconds()
if processor_start_time else None)
last_run = self.get_last_finish_time(file_path)
if last_run:
seconds_ago = (timezone.utcnow() - last_run).total_seconds()
Stats.gauge(
'dag_processing.last_run.seconds_ago.{}'.format(file_name),
seconds_ago
)
rows.append((file_path,
processor_pid,
runtime,
last_runtime,
last_run))
# Sort by longest last runtime. (Can't sort None values in python3)
rows = sorted(rows, key=lambda x: x[3] or 0.0)
formatted_rows = []
for file_path, pid, runtime, last_runtime, last_run in rows:
formatted_rows.append((file_path,
pid,
"{:.2f}s".format(runtime)
if runtime else None,
"{:.2f}s".format(last_runtime)
if last_runtime else None,
last_run.strftime("%Y-%m-%dT%H:%M:%S")
if last_run else None))
log_str = ("\n" +
"=" * 80 +
"\n" +
"DAG File Processing Stats\n\n" +
tabulate(formatted_rows, headers=headers) +
"\n" +
"=" * 80)
self.log.info(log_str)
@property
def file_paths(self):
return self._file_paths
def get_pid(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the PID of the process processing the given file or None if
the specified file is not being processed
:rtype: int
"""
if file_path in self._processors:
return self._processors[file_path].pid
return None
def get_all_pids(self):
"""
:return: a list of the PIDs for the processors that are running
:rtype: List[int]
"""
return [x.pid for x in self._processors.values()]
def get_runtime(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the current runtime (in seconds) of the process that's
processing the specified file or None if the file is not currently
being processed
"""
if file_path in self._processors:
return (timezone.utcnow() - self._processors[file_path].start_time)\
.total_seconds()
return None
def get_last_runtime(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the runtime (in seconds) of the process of the last run, or
None if the file was never processed.
:rtype: float
"""
return self._last_runtime.get(file_path)
def get_last_finish_time(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the finish time of the process of the last run, or None if the
file was never processed.
:rtype: datetime
"""
return self._last_finish_time.get(file_path)
def get_start_time(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the start time of the process that's processing the
specified file or None if the file is not currently being processed
:rtype: datetime
"""
if file_path in self._processors:
return self._processors[file_path].start_time
return None
def set_file_paths(self, new_file_paths):
"""
Update this with a new set of paths to DAG definition files.
:param new_file_paths: list of paths to DAG definition files
:type new_file_paths: list[unicode]
:return: None
"""
self._file_paths = new_file_paths
self._file_path_queue = [x for x in self._file_path_queue
if x in new_file_paths]
# Stop processors that are working on deleted files
filtered_processors = {}
for file_path, processor in self._processors.items():
if file_path in new_file_paths:
filtered_processors[file_path] = processor
else:
self.log.warning("Stopping processor for %s", file_path)
processor.terminate()
self._processors = filtered_processors
def wait_until_finished(self):
"""
Sleeps until all the processors are done.
"""
for processor in self._processors.values():
while not processor.done:
time.sleep(0.1)
def collect_results(self):
"""
Collect the result from any finished DAG processors
:return: a list of SimpleDags that were produced by processors that
have finished since the last time this was called
:rtype: list[airflow.utils.dag_processing.SimpleDag]
"""
self._kill_timed_out_processors()
finished_processors = {}
""":type : dict[unicode, AbstractDagFileProcessor]"""
running_processors = {}
""":type : dict[unicode, AbstractDagFileProcessor]"""
for file_path, processor in self._processors.items():
if processor.done:
self.log.debug("Processor for %s finished", file_path)
now = timezone.utcnow()
finished_processors[file_path] = processor
self._last_runtime[file_path] = (now -
processor.start_time).total_seconds()
self._last_finish_time[file_path] = now
self._run_count[file_path] += 1
else:
running_processors[file_path] = processor
self._processors = running_processors
self.log.debug("%s/%s DAG parsing processes running",
len(self._processors), self._parallelism)
self.log.debug("%s file paths queued for processing",
len(self._file_path_queue))
# Collect all the DAGs that were found in the processed files
simple_dags = []
for file_path, processor in finished_processors.items():
if processor.result is None:
self.log.warning(
"Processor for %s exited with return code %s.",
processor.file_path, processor.exit_code
)
else:
for simple_dag in processor.result:
simple_dags.append(simple_dag)
return simple_dags
def heartbeat(self):
"""
This should be periodically called by the manager loop. This method will
kick off new processes to process DAG definition files and read the
results from the finished processors.
:return: a list of SimpleDags that were produced by processors that
have finished since the last time this was called
:rtype: list[airflow.utils.dag_processing.SimpleDag]
"""
simple_dags = self.collect_results()
# Generate more file paths to process if we processed all the files
# already.
if len(self._file_path_queue) == 0:
# If the file path is already being processed, or if a file was
# processed recently, wait until the next batch
file_paths_in_progress = self._processors.keys()
now = timezone.utcnow()
file_paths_recently_processed = []
for file_path in self._file_paths:
last_finish_time = self.get_last_finish_time(file_path)
if (last_finish_time is not None and
(now - last_finish_time).total_seconds() <
self._file_process_interval):
file_paths_recently_processed.append(file_path)
files_paths_at_run_limit = [file_path
for file_path, num_runs in self._run_count.items()
if num_runs == self._max_runs]
files_paths_to_queue = list(set(self._file_paths) -
set(file_paths_in_progress) -
set(file_paths_recently_processed) -
set(files_paths_at_run_limit))
for file_path, processor in self._processors.items():
self.log.debug(
"File path %s is still being processed (started: %s)",
processor.file_path, processor.start_time.isoformat()
)
self.log.debug(
"Queuing the following files for processing:\n\t%s",
"\n\t".join(files_paths_to_queue)
)
self._file_path_queue.extend(files_paths_to_queue)
# Start more processors if we have enough slots and files to process
while (self._parallelism - len(self._processors) > 0 and
len(self._file_path_queue) > 0):
file_path = self._file_path_queue.pop(0)
processor = self._processor_factory(file_path)
processor.start()
self.log.debug(
"Started a process (PID: %s) to generate tasks for %s",
processor.pid, file_path
)
self._processors[file_path] = processor
# Update heartbeat count.
self._run_count[self._heart_beat_key] += 1
return simple_dags
def _kill_timed_out_processors(self):
"""
Kill any file processors that timeout to defend against process hangs.
"""
now = timezone.utcnow()
for file_path, processor in self._processors.items():
duration = now - processor.start_time
if duration > self._processor_timeout:
self.log.info(
"Processor for %s with PID %s started at %s has timed out, "
"killing it.",
processor.file_path, processor.pid, processor.start_time.isoformat())
Stats.incr('dag_file_processor_timeouts', 1, 1)
processor.kill()
def max_runs_reached(self):
"""
:return: whether all file paths have been processed max_runs times
"""
if self._max_runs == -1: # Unlimited runs.
return False
for file_path in self._file_paths:
if self._run_count[file_path] < self._max_runs:
return False
if self._run_count[self._heart_beat_key] < self._max_runs:
return False
return True
def terminate(self):
"""
Stops all running processors
:return: None
"""
for processor in self._processors.values():
processor.terminate()
def end(self):
"""
Kill all child processes on exit since we don't want to leave
them as orphaned.
"""
pids_to_kill = self.get_all_pids()
if len(pids_to_kill) > 0:
# First try SIGTERM
this_process = psutil.Process(os.getpid())
# Only check child processes to ensure that we don't have a case
# where we kill the wrong process because a child process died
# but the PID got reused.
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
for child in child_processes:
self.log.info("Terminating child PID: %s", child.pid)
child.terminate()
# TODO: Remove magic number
timeout = 5
self.log.info("Waiting up to %s seconds for processes to exit...", timeout)
try:
psutil.wait_procs(
child_processes, timeout=timeout,
callback=lambda x: self.log.info('Terminated PID %s', x.pid))
except psutil.TimeoutExpired:
self.log.debug("Ran out of time while waiting for processes to exit")
# Then SIGKILL
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
if len(child_processes) > 0:
self.log.info("SIGKILL processes that did not terminate gracefully")
for child in child_processes:
self.log.info("Killing child PID: %s", child.pid)
child.kill()
child.wait()
|
sauce.py | import csv
import os
import subprocess
import threading
# Gather the packages to test.
PREFIX = './packages/node_modules/'
CISCOSPARK = os.path.join(PREFIX, '@ciscospark')
WEBEX = os.path.join(PREFIX, '@webex')
PROD_ENV_VARS = {
# 'ACL_SERVICE_URL': 'https://acl-a.wbx2.com/acl/api/v1', ?
'ATLAS_SERVICE_URL': 'https://atlas-a.wbx2.com/admin/api/v1',
'CONVERSATION_SERVICE': 'https://conv-a.wbx2.com/conversation/api/v1',
'ENCRYPTION_SERVICE_URL': 'https://encryption-a.wbx2.com',
'IDBROKER_BASE_URL': 'https://idbroker.webex.com',
'IDENTITY_BASE_URL': 'https://identity.webex.com',
'WDM_SERVICE_URL': 'https://wdm-a.wbx2.com/wdm/api/v1',
# Logging
'ENABLE_VERBOSE_NETWORK_LOGGING': 'true',
# Enable CI for Sauce Labs
'CI': 'true'
}
INT_ENV_VARS = {
# Environments
'ACL_SERVICE_URL': 'https://acl-intb.ciscospark.com/acl/api/v1',
'ATLAS_SERVICE_URL': 'https://atlas-intb.ciscospark.com/admin/api/v1',
'CONVERSATION_SERVICE': 'https://conversation-intb.ciscospark.com/conversation/api/v1',
'ENCRYPTION_SERVICE_URL': 'https://encryption-intb.ciscospark.com/encryption/api/v1',
# Do not use 'https://hydra-intb.ciscospark.com/v1' for Hydra. CI expects 'apialpha'.
'HYDRA_SERVICE_URL': 'https://apialpha.ciscospark.com/v1/',
'IDBROKER_BASE_URL': 'https://idbrokerbts.webex.com',
'IDENTITY_BASE_URL': 'https://identitybts.webex.com',
'WDM_SERVICE_URL': 'https://wdm-intb.ciscospark.com/wdm/api/v1',
'WHISTLER_API_SERVICE_URL': 'https://whistler.onint.ciscospark.com/api/v1',
# Logging
'ENABLE_VERBOSE_NETWORK_LOGGING': 'true',
# Enable CI for Sauce Labs
'CI': 'true'
}
OUTPUT_DIR = 'output'
OUTPUT_FILE_PATH = os.path.join(OUTPUT_DIR, 'test-comparison.csv')
TEST_COMMAND = 'npm run sauce:run -- npm test -- --packages %s'
SKIP_PACKAGES = [
'@webex/test-helper-server' # no tests
'@webex/internal-plugin-calendar', # no tests
'@webex/plugin-webhooks' # no tests
]
def should_include_package(path_name, name):
scoped_name = os.path.join(os.path.basename(path_name), name)
return os.path.isdir(os.path.join(path_name, name)) and scoped_name not in SKIP_PACKAGES
def get_package_names(path_name):
namespace = path_name.replace(PREFIX, '')
return [os.path.join(namespace, name) for name in os.listdir(path_name) if should_include_package(path_name, name)]
def run_subprocess(bash_command, env_vars):
env = os.environ.copy()
env.update(env_vars)
process = subprocess.Popen(bash_command.split(), stdout=subprocess.PIPE, env=env)
output, error = process.communicate()
return process.returncode # , output, error
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def print_result(return_code, prefix='Tests are a...'):
if return_code == 0:
print(bcolors.OKGREEN + prefix + 'success.' + bcolors.ENDC)
else:
print(bcolors.FAIL + prefix + 'failure.' + bcolors.ENDC)
def run_test(package, environment):
env_vars = INT_ENV_VARS if environment is 'integration' else PROD_ENV_VARS
print(bcolors.OKBLUE + 'Testing `%s` on %s...' % (package, environment) + bcolors.ENDC)
bash_command = TEST_COMMAND % package
return_code = run_subprocess(bash_command, env_vars)
print_result(return_code, prefix='Testing `%s` on %s...' % (package, environment))
return return_code
def run_env_tests(package, writer, csv_file):
prod_return_code = run_test(package, 'production')
int_return_code = run_test(package, 'integration')
writer.writerow([package, prod_return_code, int_return_code])
csv_file.flush()
def run_tests_in_sequence(packages, writer, csv_file):
for package in packages:
run_env_tests(package, writer, csv_file)
def run_tests_in_parallel(packages, writer, csv_file):
threads = [threading.Thread(target=run_env_tests, args=(package, writer, csv_file)) for package in packages]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def main():
ciscospark_packages = get_package_names(CISCOSPARK)
webex_packages = get_package_names(WEBEX)
packages = ciscospark_packages + webex_packages
print ('Skipping %d packages: %s' % (len(SKIP_PACKAGES), ', '.join(SKIP_PACKAGES)))
print('Testing %d packages...' % len(packages))
try:
os.mkdir(OUTPUT_DIR)
except OSError:
pass
threads = []
with open(OUTPUT_FILE_PATH, 'wb') as csv_file:
writer = csv.writer(csv_file, quoting=csv.QUOTE_MINIMAL)
writer.writerow(['Package', 'Production exit code', 'Integration exit code'])
run_tests_in_sequence(packages, writer, csv_file)
print('Wrote output to: %s' % OUTPUT_FILE_PATH)
print('Done.')
if __name__ == "__main__":
main()
|
brownian.py | # Brownian motion -- an example of a multi-threaded Tkinter program.
from tkinter import *
import random
import threading
import time
import sys
WIDTH = 400
HEIGHT = 300
SIGMA = 10
BUZZ = 2
RADIUS = 2
LAMBDA = 10
FILL = 'red'
stop = 0 # Set when main loop exits
def particle(canvas):
r = RADIUS
x = random.gauss(WIDTH/2.0, SIGMA)
y = random.gauss(HEIGHT/2.0, SIGMA)
p = canvas.create_oval(x-r, y-r, x+r, y+r, fill=FILL)
while not stop:
dx = random.gauss(0, BUZZ)
dy = random.gauss(0, BUZZ)
dt = random.expovariate(LAMBDA)
try:
canvas.move(p, dx, dy)
except TclError:
break
time.sleep(dt)
def main():
global stop
root = Tk()
canvas = Canvas(root, width=WIDTH, height=HEIGHT)
canvas.pack(fill='both', expand=1)
np = 30
if sys.argv[1:]:
np = int(sys.argv[1])
for i in range(np):
t = threading.Thread(target=particle, args=(canvas,))
t.start()
try:
root.mainloop()
finally:
stop = 1
main()
|
pc2pc.py | from enum import Enum
from os import system
from threading import Thread
import numpy as np
import pygame, time
from pygame.locals import *
pygame.init()
class Mode(Enum):
Select = 1
Rotate = 2
Solve = 3
class User(Enum):
Null = 0
PC1 = 1
PC2 = 2
SOLVER = "solvers/hashsolver.o"
def draw():
disp.fill((0,150,255))
pygame.draw.line(disp, (0,0,0), (W//2,0), (W//2,W), 5)
pygame.draw.line(disp, (0,0,0), (0,W//2), (W,W//2), 5)
for i in range(6):
for j in range(6):
q = board[i,j]
pygame.draw.circle(disp, cmap[q], (ax[i],ay[j]), r)
def rotdraw():
w, h = rimg.get_size()
for i in range(2):
for j in range(2):
x1 = rx[i*2]
x2 = rx[i*2+1]
y = ry[j]
disp.blit(limg, (x1-w//2,y-h//2))
disp.blit(rimg, (x2-w//2,y-h//2))
def getcell(xm, ym):
dx = (ax - xm)**2
dy = (ay - ym)**2
for i in range(6):
for j in range(6):
if dx[i] + dy[j] <= r**2 and board[i,j] == User.Null:
return i,j
return None
def getrot(xm, ym):
w, h = rimg.get_size()
for i in range(2):
for j in range(2):
x1 = rx[i*2]
x2 = rx[i*2+1]
y = ry[j]
if y-h//2 <= ym <= y+h//2:
if x1-w//2 <= xm <= x1+w//2:
return i,j,+1
if x2-w//2 <= xm <= x2+w//2:
return i,j,-1
return None
def solve(cur):
global board, points, last
cp = np.zeros((6, 6), dtype=str)
cp[board==User.Null] = '_'
if cur == User.PC1:
cp[board==User.PC1] = 'X'
cp[board==User.PC2] = 'O'
else:
cp[board==User.PC2] = 'X'
cp[board==User.PC1] = 'O'
np.savetxt("log/in.txt", cp, fmt = '%s', delimiter=' ')
cmd = "./" + SOLVER + "< ./log/in.txt > ./log/out.txt"
system(cmd)
file = open("log/out.txt", "r")
data = file.readlines()
points = list(map(int, data[0].split()))
for i in range(1, 7):
cp = np.array(data[i].split(), dtype=str)
board[i-1, cp=='_'] = User.Null
if cur == User.PC1:
board[i-1, cp=='X'] = User.PC1
board[i-1, cp=='O'] = User.PC2
else:
board[i-1, cp=='X'] = User.PC2
board[i-1, cp=='O'] = User.PC1
return
def wait():
pygame.draw.circle(disp, (255,0,0), (W//2, W//2), 50)
pygame.display.update()
time.sleep(1)
cmap = {User.Null : (0,0,200),
User.PC1 : (255,255,255),
User.PC2: (0,0,0)}
W = 600
r = 30
rimg = pygame.image.load("img/iright.png")
limg = pygame.image.load("img/ileft.png")
disp = pygame.display.set_mode((W, W))
font = pygame.font.Font(None, 128)
clock = pygame.time.Clock()
board = np.zeros((6,6), dtype=User)
board[:][:] = User.Null
last = None
ax = np.linspace(0, W, 13, dtype=np.int32)[1:-1:2]
ay = np.linspace(0, W, 13, dtype=np.int32)[1:-1:2]
rx = np.linspace(0, W, 7, dtype=np.int32)[[1,2,4,5]]
ry = np.linspace(0, W, 5, dtype=np.int32)[[1,3]]
draw()
pygame.display.update()
now = Mode.Solve
current = User.PC1
points = [0, 0, 0]
running = True
while running:
if sum(points) > 0:
running = False
break
for e in pygame.event.get():
if e.type == QUIT:
running = False
if e.type == MOUSEMOTION and now == Mode.Select:
xm, ym = e.pos
ind = getcell(xm, ym)
if ind is not None:
i, j = ind
pygame.draw.circle(disp, cmap[current], (ax[i], ay[j]), r)
else:
draw()
if e.type == MOUSEBUTTONDOWN and now == Mode.Select:
xm, ym = e.pos
ind = getcell(xm, ym)
if ind is not None:
last = board.copy()
i, j = ind
board[i, j] = User.Human
now = Mode.Rotate
rotdraw()
break
if e.type == MOUSEBUTTONDOWN and now == Mode.Rotate:
xm, ym = e.pos
ind = getrot(xm, ym)
if ind is not None:
i, j, k = ind
pl = i + j*2
q = board[i*3:(i+1)*3, j*3:(j+1)*3]
q[:] = np.rot90(q, -k)
now = Mode.Solve
current = User.PC
draw()
if e.type == KEYDOWN:
if e.key == K_z and last is not None:
board[:] = last
pygame.display.update()
if current != User.Null and now == Mode.Solve:
thr = Thread(target=solve, args=(current,))
thr.start()
wait()
thr.join()
if current == User.PC1:
current = User.PC2
else:
current = User.PC1
now = Mode.Solve
draw()
pygame.display.update()
clock.tick(60)
text = 'null'
print(points)
if (points[0] == points[1] and points[0] != 0) or points[2] == 1:
text = "draw"
elif points[0] > points[1]:
text = "PC1 wins"
elif points[1] > points[0]:
text = "PC2 wins"
else:
text = "Press again"
box = font.render(text, True, (255,128,0))
wb, hb = box.get_size()
disp.blit(box, ((W-wb)//2, (W-hb)//2))
pygame.display.update()
print("End of Game, Press Quit Button!")
while True:
for e in pygame.event.get():
if e.type == QUIT:
pygame.quit()
|
reindex.py | #!/usr/bin/env python
import time
import threading
import subprocess
import os
import logging
import sys
from assemblyline.al.common import forge, log as al_log, queue
config = forge.get_config()
# Run config
DATABASE_NUM = 3
RETRY_PRINT_THRESHOLD = 1000
PROCESSES_COUNT = 50
COUNT_INCREMENT = 1000
LOW_THRESHOLD = 10000
HIGH_THRESHOLD = 50000
DEBUG = False
DO_SYS_BUCKETS = True
# Logger
al_log.init_logging('reindex')
log = logging.getLogger('assemblyline.reindex')
# Globals
ds = forge.get_datastore()
reindex_queue = queue.NamedQueue('r-index', db=DATABASE_NUM)
done_queue = queue.NamedQueue("r-done", db=DATABASE_NUM)
bucket_error = []
bucket_map = {
"node": ds.nodes,
"profile": ds.profiles,
"signature": ds.signatures,
"user": ds.users,
"alert": ds.alerts,
"file": ds.files,
"result": ds.results,
"error": ds.errors,
"submission": ds.submissions,
"filescore": ds.filescores
}
def cleanup_queues():
# TODO: restart from last place instead of cleaning up and restart from start...
log.info("Cleaning up reindex and done queues...")
reindex_queue.delete()
for _ in xrange(PROCESSES_COUNT):
reindex_queue.push({"is_done": True})
time.sleep(5)
reindex_queue.delete()
done_queue.delete()
# noinspection PyProtectedMember,PyBroadException
def process_bucket(b_name, bucket):
try:
count = 0
for key in ds._stream_bucket_debug_keys(bucket):
reindex_queue.push({"bucket_name": b_name, "key": key})
count += 1
if count % COUNT_INCREMENT == 0:
if reindex_queue.length() > HIGH_THRESHOLD:
retry = 0
while reindex_queue.length() > LOW_THRESHOLD:
if retry % RETRY_PRINT_THRESHOLD == 0:
log.info("Re-Index queue reached max threshold (%s). Waiting for queue size to "
"reach %s before sending more keys... [%s]" % (HIGH_THRESHOLD,
LOW_THRESHOLD,
reindex_queue.length()))
time.sleep(0.1)
retry += 1
except:
log.error("Error occurred while processing bucket %s." % b_name)
bucket_error.append(b_name)
def done_thread():
global bucket_error
map_count = {}
t_count = 0
e_count = 0
t0 = time.time()
t_last = t0
done_count = 0
while True:
_, data = queue.select(done_queue)
if data.get("is_done", False):
done_count += 1
else:
if data.get('success', False):
t_count += 1
bucket_name = data['bucket_name']
if bucket_name not in map_count:
map_count[bucket_name] = 0
map_count[bucket_name] += 1
if t_count % COUNT_INCREMENT == 0:
new_t = time.time()
log.info("%s (%s at %s keys/sec) Q:%s ==> %s" % (t_count,
new_t-t_last,
int(COUNT_INCREMENT/(new_t-t_last)),
reindex_queue.length(),
map_count))
t_last = new_t
else:
e_count += 1
if done_count == PROCESSES_COUNT:
break
summary = ""
summary += "Re-Index DONE! (%s keys re-indexed - %s errors - %s secs)\n" % (t_count, e_count, time.time()-t0)
summary += "\n############################################\n"
summary += "########## RE-INDEX SUMMARY ################\n"
summary += "############################################\n\n"
for k, v in map_count.iteritems():
summary += "\t%15s: %s\n" % (k.upper(), v)
if len(bucket_error) > 0:
summary += "\nThese buckets failed to index completely: %s\n" % bucket_error
log.info(summary)
if __name__ == "__main__":
p_list = []
try:
args = sys.argv[1:]
buckets = []
for a in args:
if a in bucket_map:
buckets.append(a)
if len(buckets) == 0:
log.info("You need to specify buckets to re-index. reindex.py BUCKET1 BUCKET2 ... BUCKETN")
exit()
log.info("Full data re-indexer is starting on buckets: %s" % ", ".join(buckets))
# Cleanup the queues before starting the workers
cleanup_queues()
# Start reindex workers
log.info("Spawning %s Re-Indexer workers..." % PROCESSES_COUNT)
DEVNULL = open(os.devnull, 'w')
for _ in xrange(PROCESSES_COUNT):
run_dir = os.path.abspath(__file__).replace("reindex.py", "")
p = subprocess.Popen([os.path.join(run_dir, "invoke.sh"),
os.path.join(run_dir, 'reindex_worker.py')],
stderr=DEVNULL,
stdout=DEVNULL)
p_list.append(p)
log.info("All Re-Indexer workers started!")
# Start done thread
t = threading.Thread(target=done_thread, name="Done thread")
t.start()
# Process data buckets
log.info("Processing buckets...")
for name in buckets:
log.info("Processing bucket: %s" % name)
process_bucket(name, bucket_map[name])
# Push kill message to all workers
log.info("All queues done. Sending kill command to workers and waiting for them to finish...")
for _ in xrange(PROCESSES_COUNT):
reindex_queue.push({"is_done": True})
# Wait for workers to finish
t.join()
finally:
log.info("Re-Indexer terminated.")
|
thread.py | # Copyright 2017, Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from concurrent import futures
from queue import Queue
import logging
import threading
import grpc
from google.cloud.pubsub_v1 import types
from google.cloud.pubsub_v1.subscriber import _helper_threads
from google.cloud.pubsub_v1.subscriber.policy import base
from google.cloud.pubsub_v1.subscriber.message import Message
logger = logging.getLogger(__name__)
class Policy(base.BasePolicy):
"""A consumer class based on :class:`threading.Thread`.
This consumer handles the connection to the Pub/Sub service and all of
the concurrency needs.
"""
def __init__(self, client, subscription, flow_control=types.FlowControl(),
executor=None, queue=None):
"""Instantiate the policy.
Args:
client (~.pubsub_v1.subscriber.client): The subscriber client used
to create this instance.
subscription (str): The name of the subscription. The canonical
format for this is
``projects/{project}/subscriptions/{subscription}``.
flow_control (~google.cloud.pubsub_v1.types.FlowControl): The flow
control settings.
executor (~concurrent.futures.ThreadPoolExecutor): (Optional.) A
ThreadPoolExecutor instance, or anything duck-type compatible
with it.
queue (~queue.Queue): (Optional.) A Queue instance, appropriate
for crossing the concurrency boundary implemented by
``executor``.
"""
# Default the callback to a no-op; it is provided by `.open`.
self._callback = lambda message: None
# Create a queue for keeping track of shared state.
if queue is None:
queue = Queue()
self._request_queue = queue
# Call the superclass constructor.
super(Policy, self).__init__(
client=client,
flow_control=flow_control,
subscription=subscription,
)
# Also maintain a request queue and an executor.
logger.debug('Creating callback requests thread (not starting).')
if executor is None:
executor = futures.ThreadPoolExecutor(max_workers=10)
self._executor = executor
self._callback_requests = _helper_threads.QueueCallbackThread(
self._request_queue,
self.on_callback_request,
)
def close(self):
"""Close the existing connection."""
# Close the main subscription connection.
self._consumer.helper_threads.stop('callback requests worker')
self._consumer.stop_consuming()
def open(self, callback):
"""Open a streaming pull connection and begin receiving messages.
For each message received, the ``callback`` function is fired with
a :class:`~.pubsub_v1.subscriber.message.Message` as its only
argument.
Args:
callback (Callable): The callback function.
"""
# Start the thread to pass the requests.
logger.debug('Starting callback requests worker.')
self._callback = callback
self._consumer.helper_threads.start(
'callback requests worker',
self._request_queue,
self._callback_requests,
)
# Actually start consuming messages.
self._consumer.start_consuming()
# Spawn a helper thread that maintains all of the leases for
# this policy.
logger.debug('Spawning lease maintenance worker.')
self._leaser = threading.Thread(target=self.maintain_leases)
self._leaser.daemon = True
self._leaser.start()
def on_callback_request(self, callback_request):
"""Map the callback request to the appropriate GRPC request."""
action, kwargs = callback_request[0], callback_request[1]
getattr(self, action)(**kwargs)
def on_exception(self, exception):
"""Bubble the exception.
This will cause the stream to exit loudly.
"""
# If this is DEADLINE_EXCEEDED, then we want to retry.
# That entails just returning None.
deadline_exceeded = grpc.StatusCode.DEADLINE_EXCEEDED
if getattr(exception, 'code', lambda: None)() == deadline_exceeded:
return
# Raise any other exception.
raise exception
def on_response(self, response):
"""Process all received Pub/Sub messages.
For each message, schedule a callback with the executor.
"""
for msg in response.received_messages:
logger.debug('New message received from Pub/Sub: %r', msg)
logger.debug(self._callback)
message = Message(msg.message, msg.ack_id, self._request_queue)
future = self._executor.submit(self._callback, message)
logger.debug('Result: %s' % future.result())
|
target_bigquery.py | #!/usr/bin/env python3
import argparse
import io
import sys
import json
import logging
import collections
import threading
import http.client
import urllib
import pkg_resources
from jsonschema import validate
import singer
from oauth2client import tools
from tempfile import TemporaryFile
from google.cloud import bigquery
from google.cloud.bigquery.job import SourceFormat
from google.cloud.bigquery import Dataset, WriteDisposition
from google.cloud.bigquery import SchemaField
from google.cloud.bigquery import LoadJobConfig, CopyJobConfig
from google.api_core import exceptions
try:
parser = argparse.ArgumentParser(parents=[tools.argparser])
parser.add_argument('-c', '--config', help='Config file', required=True)
flags = parser.parse_args()
except ImportError:
flags = None
logging.getLogger('googleapiclient.discovery_cache').setLevel(logging.ERROR)
logger = singer.get_logger()
SCOPES = ['https://www.googleapis.com/auth/bigquery','https://www.googleapis.com/auth/bigquery.insertdata']
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Singer BigQuery Target'
StreamMeta = collections.namedtuple('StreamMeta', ['schema', 'key_properties', 'bookmark_properties'])
def emit_state(state):
if state is not None:
line = json.dumps(state)
logger.debug('Emitting state {}'.format(line))
sys.stdout.write("{}\n".format(line))
sys.stdout.flush()
def clear_dict_hook(items):
return {k: v if v is not None else '' for k, v in items}
def define_schema(field, name):
schema_name = name
schema_type = "STRING"
schema_mode = "NULLABLE"
schema_description = None
schema_fields = ()
if 'type' not in field and 'anyOf' in field:
for types in field['anyOf']:
if types['type'] == 'null':
schema_mode = 'NULLABLE'
else:
field = types
if isinstance(field['type'], list):
if field['type'][0] == "null":
schema_mode = 'NULLABLE'
else:
schema_mode = 'required'
schema_type = field['type'][-1]
else:
schema_type = field['type']
if schema_type == "object":
schema_type = "RECORD"
schema_fields = tuple(build_schema(field))
if schema_type == "array":
schema_type = field.get('items').get('type')
schema_mode = "REPEATED"
if schema_type == "object":
schema_type = "RECORD"
schema_fields = tuple(build_schema(field.get('items')))
if schema_type == "string":
if "format" in field:
if field['format'] == "date-time":
schema_type = "timestamp"
if schema_type == 'number':
schema_type = 'FLOAT'
return (schema_name, schema_type, schema_mode, schema_description, schema_fields)
def build_schema(schema):
SCHEMA = []
for key in schema['properties'].keys():
if not (bool(schema['properties'][key])):
# if we endup with an empty record.
continue
schema_name, schema_type, schema_mode, schema_description, schema_fields = define_schema(schema['properties'][key], key)
SCHEMA.append(SchemaField(schema_name, schema_type, schema_mode, schema_description, schema_fields))
return SCHEMA
def persist_lines_job(project_id, dataset_id, lines=None, truncate=False, validate_records=True):
state = None
schemas = {}
key_properties = {}
tables = {}
rows = {}
errors = {}
bigquery_client = bigquery.Client(project=project_id)
# try:
# dataset = bigquery_client.create_dataset(Dataset(dataset_ref)) or Dataset(dataset_ref)
# except exceptions.Conflict:
# pass
for line in lines:
try:
msg = singer.parse_message(line)
except json.decoder.JSONDecodeError:
logger.error("Unable to parse:\n{}".format(line))
raise
if isinstance(msg, singer.RecordMessage):
if msg.stream not in schemas:
raise Exception("A record for stream {} was encountered before a corresponding schema".format(msg.stream))
schema = schemas[msg.stream]
if validate_records:
validate(msg.record, schema)
# NEWLINE_DELIMITED_JSON expects literal JSON formatted data, with a newline character splitting each row.
dat = bytes(json.dumps(msg.record) + '\n', 'UTF-8')
rows[msg.stream].write(dat)
#rows[msg.stream].write(bytes(str(msg.record) + '\n', 'UTF-8'))
state = None
elif isinstance(msg, singer.StateMessage):
logger.debug('Setting state to {}'.format(msg.value))
state = msg.value
elif isinstance(msg, singer.SchemaMessage):
table = msg.stream
schemas[table] = msg.schema
key_properties[table] = msg.key_properties
#tables[table] = bigquery.Table(dataset.table(table), schema=build_schema(schemas[table]))
rows[table] = TemporaryFile(mode='w+b')
errors[table] = None
# try:
# tables[table] = bigquery_client.create_table(tables[table])
# except exceptions.Conflict:
# pass
elif isinstance(msg, singer.ActivateVersionMessage):
# This is experimental and won't be used yet
pass
else:
raise Exception("Unrecognized message {}".format(msg))
for table in rows.keys():
table_ref = bigquery_client.dataset(dataset_id).table(table)
SCHEMA = build_schema(schemas[table])
load_config = LoadJobConfig()
load_config.schema = SCHEMA
load_config.source_format = SourceFormat.NEWLINE_DELIMITED_JSON
if truncate:
load_config.write_disposition = WriteDisposition.WRITE_TRUNCATE
rows[table].seek(0)
logger.info("loading {} to Bigquery.\n".format(table))
load_job = bigquery_client.load_table_from_file(
rows[table], table_ref, job_config=load_config)
logger.info("loading job {}".format(load_job.job_id))
logger.info(load_job.result())
# for table in errors.keys():
# if not errors[table]:
# print('Loaded {} row(s) into {}:{}'.format(rows[table], dataset_id, table), tables[table].path)
# else:
# print('Errors:', errors[table], sep=" ")
return state
def persist_lines_stream(project_id, dataset_id, lines=None, validate_records=True):
state = None
schemas = {}
key_properties = {}
tables = {}
rows = {}
errors = {}
bigquery_client = bigquery.Client(project=project_id)
dataset_ref = bigquery_client.dataset(dataset_id)
dataset = Dataset(dataset_ref)
try:
dataset = bigquery_client.create_dataset(Dataset(dataset_ref)) or Dataset(dataset_ref)
except exceptions.Conflict:
pass
for line in lines:
try:
msg = singer.parse_message(line)
except json.decoder.JSONDecodeError:
logger.error("Unable to parse:\n{}".format(line))
raise
if isinstance(msg, singer.RecordMessage):
if msg.stream not in schemas:
raise Exception("A record for stream {} was encountered before a corresponding schema".format(msg.stream))
schema = schemas[msg.stream]
if validate_records:
validate(msg.record, schema)
errors[msg.stream] = bigquery_client.insert_rows_json(tables[msg.stream], [msg.record])
rows[msg.stream] += 1
state = None
elif isinstance(msg, singer.StateMessage):
logger.debug('Setting state to {}'.format(msg.value))
state = msg.value
elif isinstance(msg, singer.SchemaMessage):
table = msg.stream
schemas[table] = msg.schema
key_properties[table] = msg.key_properties
tables[table] = bigquery.Table(dataset.table(table), schema=build_schema(schemas[table]))
rows[table] = 0
errors[table] = None
## Let this recreateed:
bigquery_client.delete_table(tables[table], not_found_ok=True)
tables[table] = bigquery_client.create_table(tables[table], exists_ok=True)
elif isinstance(msg, singer.ActivateVersionMessage):
# This is experimental and won't be used yet
pass
else:
raise Exception("Unrecognized message {}".format(msg))
for table in errors.keys():
if not errors[table]:
logging.info('Loaded {} row(s) into {}:{}'.format(rows[table], dataset_id, table, tables[table].path))
emit_state(state)
else:
logging.error('Errors:', errors[table], sep=" ")
return state
def collect():
try:
version = pkg_resources.get_distribution('target-bigquery').version
conn = http.client.HTTPConnection('collector.singer.io', timeout=10)
conn.connect()
params = {
'e': 'se',
'aid': 'singer',
'se_ca': 'target-bigquery',
'se_ac': 'open',
'se_la': version,
}
conn.request('GET', '/i?' + urllib.parse.urlencode(params))
conn.getresponse()
conn.close()
except:
logger.debug('Collection request failed')
def main():
with open(flags.config) as input:
config = json.load(input)
if not config.get('disable_collection', False):
logger.info('Sending version information to stitchdata.com. ' +
'To disable sending anonymous usage data, set ' +
'the config parameter "disable_collection" to true')
threading.Thread(target=collect).start()
if config.get('replication_method') == 'FULL_TABLE':
truncate = True
else:
truncate = False
validate_records = config.get('validate_records', True)
input = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8')
if config.get('stream_data', True):
state = persist_lines_stream(config['project_id'], config['dataset_id'], input, validate_records=validate_records)
else:
state = persist_lines_job(config['project_id'], config['dataset_id'], input, truncate=truncate, validate_records=validate_records)
emit_state(state)
logger.debug("Exiting normally")
if __name__ == '__main__':
main()
|
test_system.py | # Copyright 2016 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import math
import operator
import os
import struct
import threading
import time
import unittest
from google.api_core import exceptions
from google.cloud.spanner_v1.proto.type_pb2 import ARRAY
from google.cloud.spanner_v1.proto.type_pb2 import BOOL
from google.cloud.spanner_v1.proto.type_pb2 import BYTES
from google.cloud.spanner_v1.proto.type_pb2 import DATE
from google.cloud.spanner_v1.proto.type_pb2 import FLOAT64
from google.cloud.spanner_v1.proto.type_pb2 import INT64
from google.cloud.spanner_v1.proto.type_pb2 import STRING
from google.cloud.spanner_v1.proto.type_pb2 import TIMESTAMP
from google.cloud.spanner_v1.proto.type_pb2 import Type
from google.cloud._helpers import UTC
from google.cloud.spanner_v1._helpers import TimestampWithNanoseconds
from google.cloud.spanner import Client
from google.cloud.spanner import KeyRange
from google.cloud.spanner import KeySet
from google.cloud.spanner import BurstyPool
from test_utils.retry import RetryErrors
from test_utils.retry import RetryInstanceState
from test_utils.retry import RetryResult
from test_utils.system import unique_resource_id
from tests._fixtures import DDL_STATEMENTS
CREATE_INSTANCE = os.getenv(
'GOOGLE_CLOUD_TESTS_CREATE_SPANNER_INSTANCE') is not None
if CREATE_INSTANCE:
INSTANCE_ID = 'google-cloud' + unique_resource_id('-')
else:
INSTANCE_ID = os.environ.get('GOOGLE_CLOUD_TESTS_SPANNER_INSTANCE',
'google-cloud-python-systest')
EXISTING_INSTANCES = []
COUNTERS_TABLE = 'counters'
COUNTERS_COLUMNS = ('name', 'value')
class Config(object):
"""Run-time configuration to be modified at set-up.
This is a mutable stand-in to allow test set-up to modify
global state.
"""
CLIENT = None
INSTANCE_CONFIG = None
INSTANCE = None
def _has_all_ddl(database):
return len(database.ddl_statements) == len(DDL_STATEMENTS)
def _list_instances():
return list(Config.CLIENT.list_instances())
def setUpModule():
Config.CLIENT = Client()
retry = RetryErrors(exceptions.ServiceUnavailable)
configs = list(retry(Config.CLIENT.list_instance_configs)())
# Defend against back-end returning configs for regions we aren't
# actually allowed to use.
configs = [config for config in configs if '-us-' in config.name]
if len(configs) < 1:
raise ValueError('List instance configs failed in module set up.')
Config.INSTANCE_CONFIG = configs[0]
config_name = configs[0].name
instances = retry(_list_instances)()
EXISTING_INSTANCES[:] = instances
if CREATE_INSTANCE:
Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID, config_name)
created_op = Config.INSTANCE.create()
created_op.result(30) # block until completion
else:
Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID)
Config.INSTANCE.reload()
def tearDownModule():
if CREATE_INSTANCE:
Config.INSTANCE.delete()
class TestInstanceAdminAPI(unittest.TestCase):
def setUp(self):
self.instances_to_delete = []
def tearDown(self):
for instance in self.instances_to_delete:
instance.delete()
def test_list_instances(self):
instances = list(Config.CLIENT.list_instances())
# We have added one new instance in `setUpModule`.
if CREATE_INSTANCE:
self.assertEqual(len(instances), len(EXISTING_INSTANCES) + 1)
for instance in instances:
instance_existence = (instance in EXISTING_INSTANCES or
instance == Config.INSTANCE)
self.assertTrue(instance_existence)
def test_reload_instance(self):
# Use same arguments as Config.INSTANCE (created in `setUpModule`)
# so we can use reload() on a fresh instance.
instance = Config.CLIENT.instance(
INSTANCE_ID, Config.INSTANCE_CONFIG.name)
# Make sure metadata unset before reloading.
instance.display_name = None
instance.reload()
self.assertEqual(instance.display_name, Config.INSTANCE.display_name)
@unittest.skipUnless(CREATE_INSTANCE, 'Skipping instance creation')
def test_create_instance(self):
ALT_INSTANCE_ID = 'new' + unique_resource_id('-')
instance = Config.CLIENT.instance(
ALT_INSTANCE_ID, Config.INSTANCE_CONFIG.name)
operation = instance.create()
# Make sure this instance gets deleted after the test case.
self.instances_to_delete.append(instance)
# We want to make sure the operation completes.
operation.result(30) # raises on failure / timeout.
# Create a new instance instance and make sure it is the same.
instance_alt = Config.CLIENT.instance(
ALT_INSTANCE_ID, Config.INSTANCE_CONFIG.name)
instance_alt.reload()
self.assertEqual(instance, instance_alt)
self.assertEqual(instance.display_name, instance_alt.display_name)
def test_update_instance(self):
OLD_DISPLAY_NAME = Config.INSTANCE.display_name
NEW_DISPLAY_NAME = 'Foo Bar Baz'
Config.INSTANCE.display_name = NEW_DISPLAY_NAME
operation = Config.INSTANCE.update()
# We want to make sure the operation completes.
operation.result(30) # raises on failure / timeout.
# Create a new instance instance and reload it.
instance_alt = Config.CLIENT.instance(INSTANCE_ID, None)
self.assertNotEqual(instance_alt.display_name, NEW_DISPLAY_NAME)
instance_alt.reload()
self.assertEqual(instance_alt.display_name, NEW_DISPLAY_NAME)
# Make sure to put the instance back the way it was for the
# other test cases.
Config.INSTANCE.display_name = OLD_DISPLAY_NAME
Config.INSTANCE.update()
class _TestData(object):
TABLE = 'contacts'
COLUMNS = ('contact_id', 'first_name', 'last_name', 'email')
ROW_DATA = (
(1, u'Phred', u'Phlyntstone', u'phred@example.com'),
(2, u'Bharney', u'Rhubble', u'bharney@example.com'),
(3, u'Wylma', u'Phlyntstone', u'wylma@example.com'),
)
ALL = KeySet(all_=True)
SQL = 'SELECT * FROM contacts ORDER BY contact_id'
def _assert_timestamp(self, value, nano_value):
self.assertIsInstance(value, datetime.datetime)
self.assertIsNone(value.tzinfo)
self.assertIs(nano_value.tzinfo, UTC)
self.assertEqual(value.year, nano_value.year)
self.assertEqual(value.month, nano_value.month)
self.assertEqual(value.day, nano_value.day)
self.assertEqual(value.hour, nano_value.hour)
self.assertEqual(value.minute, nano_value.minute)
self.assertEqual(value.second, nano_value.second)
self.assertEqual(value.microsecond, nano_value.microsecond)
if isinstance(value, TimestampWithNanoseconds):
self.assertEqual(value.nanosecond, nano_value.nanosecond)
else:
self.assertEqual(value.microsecond * 1000, nano_value.nanosecond)
def _check_rows_data(self, rows_data, expected=None):
if expected is None:
expected = self.ROW_DATA
self.assertEqual(len(rows_data), len(expected))
for row, expected in zip(rows_data, expected):
self._check_row_data(row, expected)
def _check_row_data(self, row_data, expected):
self.assertEqual(len(row_data), len(expected))
for found_cell, expected_cell in zip(row_data, expected):
if isinstance(found_cell, TimestampWithNanoseconds):
self._assert_timestamp(expected_cell, found_cell)
elif isinstance(found_cell, float) and math.isnan(found_cell):
self.assertTrue(math.isnan(expected_cell))
else:
self.assertEqual(found_cell, expected_cell)
class TestDatabaseAPI(unittest.TestCase, _TestData):
DATABASE_NAME = 'test_database' + unique_resource_id('_')
@classmethod
def setUpClass(cls):
pool = BurstyPool()
cls._db = Config.INSTANCE.database(
cls.DATABASE_NAME, ddl_statements=DDL_STATEMENTS, pool=pool)
operation = cls._db.create()
operation.result(30) # raises on failure / timeout.
@classmethod
def tearDownClass(cls):
cls._db.drop()
def setUp(self):
self.to_delete = []
def tearDown(self):
for doomed in self.to_delete:
doomed.drop()
def test_list_databases(self):
# Since `Config.INSTANCE` is newly created in `setUpModule`, the
# database created in `setUpClass` here will be the only one.
database_names = [
database.name for database in Config.INSTANCE.list_databases()]
self.assertTrue(self._db.name in database_names)
def test_create_database(self):
pool = BurstyPool()
temp_db_id = 'temp_db' + unique_resource_id('_')
temp_db = Config.INSTANCE.database(temp_db_id, pool=pool)
operation = temp_db.create()
self.to_delete.append(temp_db)
# We want to make sure the operation completes.
operation.result(30) # raises on failure / timeout.
database_ids = [
database.database_id
for database in Config.INSTANCE.list_databases()]
self.assertIn(temp_db_id, database_ids)
def test_table_not_found(self):
temp_db_id = 'temp_db' + unique_resource_id('_')
correct_table = 'MyTable'
incorrect_table = 'NotMyTable'
self.assertNotEqual(correct_table, incorrect_table)
create_table = (
'CREATE TABLE {} (\n'
' Id STRING(36) NOT NULL,\n'
' Field1 STRING(36) NOT NULL\n'
') PRIMARY KEY (Id)').format(correct_table)
index = 'CREATE INDEX IDX ON {} (Field1)'.format(incorrect_table)
temp_db = Config.INSTANCE.database(
temp_db_id,
ddl_statements=[
create_table,
index,
],
)
self.to_delete.append(temp_db)
with self.assertRaises(exceptions.NotFound) as exc_info:
temp_db.create()
expected = 'Table not found: {0}'.format(incorrect_table)
self.assertEqual(exc_info.exception.args, (expected,))
def test_update_database_ddl(self):
pool = BurstyPool()
temp_db_id = 'temp_db' + unique_resource_id('_')
temp_db = Config.INSTANCE.database(temp_db_id, pool=pool)
create_op = temp_db.create()
self.to_delete.append(temp_db)
# We want to make sure the operation completes.
create_op.result(120) # raises on failure / timeout.
operation = temp_db.update_ddl(DDL_STATEMENTS)
# We want to make sure the operation completes.
operation.result(120) # raises on failure / timeout.
temp_db.reload()
self.assertEqual(len(temp_db.ddl_statements), len(DDL_STATEMENTS))
def test_db_batch_insert_then_db_snapshot_read(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
batch.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
from_snap = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(from_snap)
def test_db_run_in_transaction_then_snapshot_execute_sql(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
def _unit_of_work(transaction, test):
rows = list(transaction.read(test.TABLE, test.COLUMNS, self.ALL))
test.assertEqual(rows, [])
transaction.insert_or_update(
test.TABLE, test.COLUMNS, test.ROW_DATA)
self._db.run_in_transaction(_unit_of_work, test=self)
with self._db.snapshot() as after:
rows = list(after.execute_sql(self.SQL))
self._check_rows_data(rows)
def test_db_run_in_transaction_twice(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
def _unit_of_work(transaction, test):
transaction.insert_or_update(
test.TABLE, test.COLUMNS, test.ROW_DATA)
self._db.run_in_transaction(_unit_of_work, test=self)
self._db.run_in_transaction(_unit_of_work, test=self)
with self._db.snapshot() as after:
rows = list(after.execute_sql(self.SQL))
self._check_rows_data(rows)
def test_db_run_in_transaction_twice_4181(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(COUNTERS_TABLE, self.ALL)
def _unit_of_work(transaction, name):
transaction.insert(COUNTERS_TABLE, COUNTERS_COLUMNS, [[name, 0]])
self._db.run_in_transaction(_unit_of_work, name='id_1')
with self.assertRaises(exceptions.AlreadyExists):
self._db.run_in_transaction(_unit_of_work, name='id_1')
self._db.run_in_transaction(_unit_of_work, name='id_2')
with self._db.snapshot() as after:
rows = list(after.read(
COUNTERS_TABLE, COUNTERS_COLUMNS, self.ALL))
self.assertEqual(len(rows), 2)
class TestSessionAPI(unittest.TestCase, _TestData):
DATABASE_NAME = 'test_sessions' + unique_resource_id('_')
ALL_TYPES_TABLE = 'all_types'
ALL_TYPES_COLUMNS = (
'list_goes_on',
'are_you_sure',
'raw_data',
'hwhen',
'approx_value',
'eye_d',
'description',
'exactly_hwhen',
)
SOME_DATE = datetime.date(2011, 1, 17)
SOME_TIME = datetime.datetime(1989, 1, 17, 17, 59, 12, 345612)
NANO_TIME = TimestampWithNanoseconds(1995, 8, 31, nanosecond=987654321)
OTHER_NAN, = struct.unpack('<d', b'\x01\x00\x01\x00\x00\x00\xf8\xff')
BYTES_1 = b'Ymlu'
BYTES_2 = b'Ym9vdHM='
ALL_TYPES_ROWDATA = (
([], False, None, None, 0.0, None, None, None),
([1], True, BYTES_1, SOME_DATE, 0.0, 19, u'dog', SOME_TIME),
([5, 10], True, BYTES_1, None, 1.25, 99, u'cat', None),
([], False, BYTES_2, None, float('inf'), 107, u'frog', None),
([3, None, 9], False, None, None, float('-inf'), 207, u'bat', None),
([], False, None, None, float('nan'), 1207, u'owl', None),
([], False, None, None, OTHER_NAN, 2000, u'virus', NANO_TIME),
)
@classmethod
def setUpClass(cls):
pool = BurstyPool()
cls._db = Config.INSTANCE.database(
cls.DATABASE_NAME, ddl_statements=DDL_STATEMENTS, pool=pool)
operation = cls._db.create()
operation.result(30) # raises on failure / timeout.
@classmethod
def tearDownClass(cls):
cls._db.drop()
def setUp(self):
self.to_delete = []
def tearDown(self):
for doomed in self.to_delete:
doomed.delete()
def test_session_crud(self):
retry_true = RetryResult(operator.truth)
retry_false = RetryResult(operator.not_)
session = self._db.session()
self.assertFalse(session.exists())
session.create()
retry_true(session.exists)()
session.delete()
retry_false(session.exists)()
def test_batch_insert_then_read(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
batch.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows)
def test_batch_insert_then_read_string_array_of_string(self):
TABLE = 'string_plus_array_of_string'
COLUMNS = ['id', 'name', 'tags']
ROWDATA = [
(0, None, None),
(1, 'phred', ['yabba', 'dabba', 'do']),
(2, 'bharney', []),
(3, 'wylma', ['oh', None, 'phred']),
]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(TABLE, self.ALL)
batch.insert(TABLE, COLUMNS, ROWDATA)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.read(TABLE, COLUMNS, self.ALL))
self._check_rows_data(rows, expected=ROWDATA)
def test_batch_insert_then_read_all_datatypes(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.ALL_TYPES_TABLE, self.ALL)
batch.insert(
self.ALL_TYPES_TABLE,
self.ALL_TYPES_COLUMNS,
self.ALL_TYPES_ROWDATA)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.read(
self.ALL_TYPES_TABLE, self.ALL_TYPES_COLUMNS, self.ALL))
self._check_rows_data(rows, expected=self.ALL_TYPES_ROWDATA)
def test_batch_insert_or_update_then_query(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.insert_or_update(self.TABLE, self.COLUMNS, self.ROW_DATA)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.execute_sql(self.SQL))
self._check_rows_data(rows)
@RetryErrors(exception=exceptions.ServerError)
def test_transaction_read_and_insert_then_rollback(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
transaction = session.transaction()
transaction.begin()
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
transaction.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
# Inserted rows can't be read until after commit.
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
transaction.rollback()
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
def _transaction_read_then_raise(self, transaction):
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(len(rows), 0)
transaction.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
raise CustomException()
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Conflict)
def test_transaction_read_and_insert_then_exception(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
with self.assertRaises(CustomException):
self._db.run_in_transaction(self._transaction_read_then_raise)
# Transaction was rolled back.
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Conflict)
def test_transaction_read_and_insert_or_update_then_commit(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
with session.transaction() as transaction:
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
transaction.insert_or_update(
self.TABLE, self.COLUMNS, self.ROW_DATA)
# Inserted rows can't be read until after commit.
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows)
def _transaction_concurrency_helper(self, unit_of_work, pkey):
INITIAL_VALUE = 123
NUM_THREADS = 3 # conforms to equivalent Java systest.
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.insert_or_update(
COUNTERS_TABLE, COUNTERS_COLUMNS, [[pkey, INITIAL_VALUE]])
# We don't want to run the threads' transactions in the current
# session, which would fail.
txn_sessions = []
for _ in range(NUM_THREADS):
txn_sessions.append(self._db)
threads = [
threading.Thread(
target=txn_session.run_in_transaction,
args=(unit_of_work, pkey))
for txn_session in txn_sessions]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
with self._db.snapshot() as snapshot:
keyset = KeySet(keys=[(pkey,)])
rows = list(snapshot.read(
COUNTERS_TABLE, COUNTERS_COLUMNS, keyset))
self.assertEqual(len(rows), 1)
_, value = rows[0]
self.assertEqual(value, INITIAL_VALUE + len(threads))
def _read_w_concurrent_update(self, transaction, pkey):
keyset = KeySet(keys=[(pkey,)])
rows = list(transaction.read(
COUNTERS_TABLE, COUNTERS_COLUMNS, keyset))
self.assertEqual(len(rows), 1)
pkey, value = rows[0]
transaction.update(
COUNTERS_TABLE, COUNTERS_COLUMNS, [[pkey, value + 1]])
def test_transaction_read_w_concurrent_updates(self):
PKEY = 'read_w_concurrent_updates'
self._transaction_concurrency_helper(
self._read_w_concurrent_update, PKEY)
def _query_w_concurrent_update(self, transaction, pkey):
SQL = 'SELECT * FROM counters WHERE name = @name'
rows = list(transaction.execute_sql(
SQL,
params={'name': pkey},
param_types={'name': Type(code=STRING)},
))
self.assertEqual(len(rows), 1)
pkey, value = rows[0]
transaction.update(
COUNTERS_TABLE, COUNTERS_COLUMNS, [[pkey, value + 1]])
def test_transaction_query_w_concurrent_updates(self):
PKEY = 'query_w_concurrent_updates'
self._transaction_concurrency_helper(
self._query_w_concurrent_update, PKEY)
def test_transaction_read_w_abort(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
trigger = _ReadAbortTrigger()
with self._db.batch() as batch:
batch.delete(COUNTERS_TABLE, self.ALL)
batch.insert(
COUNTERS_TABLE,
COUNTERS_COLUMNS,
[[trigger.KEY1, 0], [trigger.KEY2, 0]])
provoker = threading.Thread(
target=trigger.provoke_abort, args=(self._db,))
handler = threading.Thread(
target=trigger.handle_abort, args=(self._db,))
provoker.start()
trigger.provoker_started.wait()
handler.start()
trigger.handler_done.wait()
provoker.join()
handler.join()
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(
COUNTERS_TABLE,
COUNTERS_COLUMNS,
self.ALL)
)
self._check_row_data(
rows, expected=[[trigger.KEY1, 1], [trigger.KEY2, 1]])
@staticmethod
def _row_data(max_index):
for index in range(max_index):
yield [
index,
'First%09d' % (index,),
'Last%09d' % (max_index - index),
'test-%09d@example.com' % (index,),
]
def _set_up_table(self, row_count, database=None):
if database is None:
database = self._db
retry = RetryInstanceState(_has_all_ddl)
retry(database.reload)()
def _unit_of_work(transaction, test):
transaction.delete(test.TABLE, test.ALL)
transaction.insert(
test.TABLE, test.COLUMNS, test._row_data(row_count))
committed = database.run_in_transaction(_unit_of_work, test=self)
return committed
def test_read_with_single_keys_index(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
committed = self._set_up_table(row_count)
expected = [[row[1], row[2]] for row in self._row_data(row_count)]
row = 5
keyset = [[expected[row][0], expected[row][1]]]
with self._db.snapshot() as snapshot:
results_iter = snapshot.read(
self.TABLE,
columns,
KeySet(keys=keyset),
index='name'
)
rows = list(results_iter)
self.assertEqual(rows, [expected[row]])
def test_empty_read_with_single_keys_index(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
committed = self._set_up_table(row_count)
keyset = [["Non", "Existent"]]
with self._db.snapshot() as snapshot:
results_iter = snapshot.read(
self.TABLE,
columns,
KeySet(keys=keyset),
index='name'
)
rows = list(results_iter)
self.assertEqual(rows, [])
def test_read_with_multiple_keys_index(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
committed = self._set_up_table(row_count)
expected = [[row[1], row[2]] for row in self._row_data(row_count)]
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(
self.TABLE,
columns,
KeySet(keys=expected),
index='name')
)
self.assertEqual(rows, expected)
def test_snapshot_read_w_various_staleness(self):
from datetime import datetime
from google.cloud._helpers import UTC
ROW_COUNT = 400
committed = self._set_up_table(ROW_COUNT)
all_data_rows = list(self._row_data(ROW_COUNT))
before_reads = datetime.utcnow().replace(tzinfo=UTC)
# Test w/ read timestamp
with self._db.snapshot(read_timestamp=committed) as read_tx:
rows = list(read_tx.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
# Test w/ min read timestamp
with self._db.snapshot(min_read_timestamp=committed) as min_read_ts:
rows = list(min_read_ts.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
staleness = datetime.utcnow().replace(tzinfo=UTC) - before_reads
# Test w/ max staleness
with self._db.snapshot(max_staleness=staleness) as max_staleness:
rows = list(max_staleness.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
# Test w/ exact staleness
with self._db.snapshot(exact_staleness=staleness) as exact_staleness:
rows = list(exact_staleness.read(
self.TABLE,
self.COLUMNS,
self.ALL)
)
self._check_row_data(rows, all_data_rows)
# Test w/ strong
with self._db.snapshot() as strong:
rows = list(strong.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
def test_multiuse_snapshot_read_isolation_strong(self):
ROW_COUNT = 40
committed = self._set_up_table(ROW_COUNT)
all_data_rows = list(self._row_data(ROW_COUNT))
with self._db.snapshot(multi_use=True) as strong:
before = list(strong.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(strong.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(after, all_data_rows)
def test_multiuse_snapshot_read_isolation_read_timestamp(self):
ROW_COUNT = 40
committed = self._set_up_table(ROW_COUNT)
all_data_rows = list(self._row_data(ROW_COUNT))
with self._db.snapshot(
read_timestamp=committed,
multi_use=True) as read_ts:
before = list(read_ts.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(read_ts.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(after, all_data_rows)
def test_multiuse_snapshot_read_isolation_exact_staleness(self):
ROW_COUNT = 40
committed = self._set_up_table(ROW_COUNT)
all_data_rows = list(self._row_data(ROW_COUNT))
time.sleep(1)
delta = datetime.timedelta(microseconds=1000)
with self._db.snapshot(exact_staleness=delta, multi_use=True) as exact:
before = list(exact.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(exact.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(after, all_data_rows)
def test_read_w_index(self):
ROW_COUNT = 2000
# Indexed reads cannot return non-indexed columns
MY_COLUMNS = self.COLUMNS[0], self.COLUMNS[2]
EXTRA_DDL = [
'CREATE INDEX contacts_by_last_name ON contacts(last_name)',
]
pool = BurstyPool()
temp_db = Config.INSTANCE.database(
'test_read' + unique_resource_id('_'),
ddl_statements=DDL_STATEMENTS + EXTRA_DDL,
pool=pool)
operation = temp_db.create()
self.to_delete.append(_DatabaseDropper(temp_db))
# We want to make sure the operation completes.
operation.result(30) # raises on failure / timeout.
committed = self._set_up_table(ROW_COUNT, database=temp_db)
with temp_db.snapshot(read_timestamp=committed) as snapshot:
rows = list(snapshot.read(
self.TABLE,
MY_COLUMNS,
self.ALL,
index='contacts_by_last_name')
)
expected = list(reversed(
[(row[0], row[2]) for row in self._row_data(ROW_COUNT)]))
self._check_rows_data(rows, expected)
def test_read_w_single_key(self):
ROW_COUNT = 40
committed = self._set_up_table(ROW_COUNT)
with self._db.snapshot(read_timestamp=committed) as snapshot:
rows = list(snapshot.read(
self.TABLE, self.COLUMNS, KeySet(keys=[(0,)])))
all_data_rows = list(self._row_data(ROW_COUNT))
expected = [all_data_rows[0]]
self._check_row_data(rows, expected)
def test_empty_read(self):
ROW_COUNT = 40
committed = self._set_up_table(ROW_COUNT)
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(
self.TABLE, self.COLUMNS, KeySet(keys=[(40,)])))
self._check_row_data(rows, [])
def test_read_w_multiple_keys(self):
ROW_COUNT = 40
indices = [0, 5, 17]
committed = self._set_up_table(ROW_COUNT)
with self._db.snapshot(read_timestamp=committed) as snapshot:
rows = list(snapshot.read(
self.TABLE, self.COLUMNS,
KeySet(keys=[(index,) for index in indices])))
all_data_rows = list(self._row_data(ROW_COUNT))
expected = [row for row in all_data_rows if row[0] in indices]
self._check_row_data(rows, expected)
def test_read_w_limit(self):
ROW_COUNT = 3000
LIMIT = 100
committed = self._set_up_table(ROW_COUNT)
with self._db.snapshot(read_timestamp=committed) as snapshot:
rows = list(snapshot.read(
self.TABLE, self.COLUMNS, self.ALL, limit=LIMIT))
all_data_rows = list(self._row_data(ROW_COUNT))
expected = all_data_rows[:LIMIT]
self._check_row_data(rows, expected)
def test_read_w_ranges(self):
ROW_COUNT = 3000
START = 1000
END = 2000
committed = self._set_up_table(ROW_COUNT)
with self._db.snapshot(
read_timestamp=committed,
multi_use=True) as snapshot:
all_data_rows = list(self._row_data(ROW_COUNT))
single_key = KeyRange(start_closed=[START], end_open=[START + 1])
keyset = KeySet(ranges=(single_key,))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[START : START+1]
self._check_rows_data(rows, expected)
closed_closed = KeyRange(start_closed=[START], end_closed=[END])
keyset = KeySet(ranges=(closed_closed,))
rows = list(snapshot.read(
self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[START : END+1]
self._check_row_data(rows, expected)
closed_open = KeyRange(start_closed=[START], end_open=[END])
keyset = KeySet(ranges=(closed_open,))
rows = list(snapshot.read(
self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[START : END]
self._check_row_data(rows, expected)
open_open = KeyRange(start_open=[START], end_open=[END])
keyset = KeySet(ranges=(open_open,))
rows = list(snapshot.read(
self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[START+1 : END]
self._check_row_data(rows, expected)
open_closed = KeyRange(start_open=[START], end_closed=[END])
keyset = KeySet(ranges=(open_closed,))
rows = list(snapshot.read(
self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[START+1 : END+1]
self._check_row_data(rows, expected)
def test_read_partial_range_until_end(self):
row_count = 3000
start = 1000
committed = self._set_up_table(row_count)
with self._db.snapshot(
read_timestamp=committed,
multi_use=True) as snapshot:
all_data_rows = list(self._row_data(row_count))
expected_map = {
('start_closed', 'end_closed'): all_data_rows[start:],
('start_closed', 'end_open'): [],
('start_open', 'end_closed'): all_data_rows[start+1:],
('start_open', 'end_open'): [],
}
for start_arg in ('start_closed', 'start_open'):
for end_arg in ('end_closed', 'end_open'):
range_kwargs = {start_arg: [start], end_arg: []}
keyset = KeySet(
ranges=(
KeyRange(**range_kwargs),
),
)
rows = list(snapshot.read(
self.TABLE, self.COLUMNS, keyset))
expected = expected_map[(start_arg, end_arg)]
self._check_row_data(rows, expected)
def test_read_partial_range_from_beginning(self):
row_count = 3000
end = 2000
committed = self._set_up_table(row_count)
all_data_rows = list(self._row_data(row_count))
expected_map = {
('start_closed', 'end_closed'): all_data_rows[:end+1],
('start_closed', 'end_open'): all_data_rows[:end],
('start_open', 'end_closed'): [],
('start_open', 'end_open'): [],
}
for start_arg in ('start_closed', 'start_open'):
for end_arg in ('end_closed', 'end_open'):
range_kwargs = {start_arg: [], end_arg: [end]}
keyset = KeySet(
ranges=(
KeyRange(**range_kwargs),
),
)
with self._db.snapshot(
read_timestamp=committed,
multi_use=True) as snapshot:
rows = list(snapshot.read(
self.TABLE, self.COLUMNS, keyset))
expected = expected_map[(start_arg, end_arg)]
self._check_row_data(rows, expected)
def test_read_with_range_keys_index_single_key(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start = 3
krange = KeyRange(start_closed=data[start], end_open=data[start + 1])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(
self.TABLE, columns, keyset, index='name'))
self.assertEqual(rows, data[start : start+1])
def test_read_with_range_keys_index_closed_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end = 3, 7
krange = KeyRange(start_closed=data[start], end_closed=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(
self.TABLE,
columns,
keyset,
index='name')
)
self.assertEqual(rows, data[start : end+1])
def test_read_with_range_keys_index_closed_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end = 3, 7
krange = KeyRange(start_closed=data[start], end_open=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(
self.TABLE,
columns,
keyset,
index='name')
)
self.assertEqual(rows, data[start:end])
def test_read_with_range_keys_index_open_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end = 3, 7
krange = KeyRange(start_open=data[start], end_closed=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns,
keyset, index='name'))
self.assertEqual(rows, data[start+1 : end+1])
def test_read_with_range_keys_index_open_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end = 3, 7
krange = KeyRange(start_open=data[start], end_open=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns,
keyset, index='name'))
self.assertEqual(rows, data[start+1 : end])
def test_read_with_range_keys_index_limit_closed_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end, limit = 3, 7, 2
krange = KeyRange(start_closed=data[start], end_closed=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(
self.TABLE,
columns,
keyset,
index='name',
limit=limit)
)
expected = data[start : end+1]
self.assertEqual(rows, expected[:limit])
def test_read_with_range_keys_index_limit_closed_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end, limit = 3, 7, 2
krange = KeyRange(start_closed=data[start], end_open=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(
self.TABLE,
columns,
keyset,
index='name',
limit=limit)
)
expected = data[start:end]
self.assertEqual(rows, expected[:limit])
def test_read_with_range_keys_index_limit_open_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end, limit = 3, 7, 2
krange = KeyRange(start_open=data[start], end_closed=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(
self.TABLE,
columns,
keyset,
index='name',
limit=limit)
)
expected = data[start+1 : end+1]
self.assertEqual(rows, expected[:limit])
def test_read_with_range_keys_index_limit_open_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end, limit = 3, 7, 2
krange = KeyRange(start_open=data[start], end_open=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(
self.TABLE,
columns,
keyset,
index='name',
limit=limit))
expected = data[start+1 : end]
self.assertEqual(rows, expected[:limit])
def test_read_with_range_keys_and_index_closed_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
committed = self._set_up_table(row_count)
data = [[row[1], row[2]] for row in self._row_data(row_count)]
keyrow, start, end = 1, 3, 7
closed_closed = KeyRange(start_closed=data[start],
end_closed=data[end])
keys = [data[keyrow],]
keyset = KeySet(keys=keys, ranges=(closed_closed,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(
self.TABLE,
columns,
keyset,
index='name')
)
expected = ([data[keyrow]] + data[start : end+1])
self.assertEqual(rows, expected)
def test_read_with_range_keys_and_index_closed_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
committed = self._set_up_table(row_count)
data = [[row[1], row[2]] for row in self._row_data(row_count)]
keyrow, start, end = 1, 3, 7
closed_open = KeyRange(start_closed=data[start],
end_open=data[end])
keys = [data[keyrow],]
keyset = KeySet(keys=keys, ranges=(closed_open,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE,
columns,
keyset,
index='name')
)
expected = ([data[keyrow]] + data[start : end])
self.assertEqual(rows, expected)
def test_read_with_range_keys_and_index_open_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
committed = self._set_up_table(row_count)
data = [[row[1], row[2]] for row in self._row_data(row_count)]
keyrow, start, end = 1, 3, 7
open_closed = KeyRange(start_open=data[start],
end_closed=data[end])
keys = [data[keyrow],]
keyset = KeySet(keys=keys, ranges=(open_closed,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(
self.TABLE,
columns,
keyset,
index='name')
)
expected = ([data[keyrow]] + data[start+1 : end+1])
self.assertEqual(rows, expected)
def test_read_with_range_keys_and_index_open_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
committed = self._set_up_table(row_count)
data = [[row[1], row[2]] for row in self._row_data(row_count)]
keyrow, start, end = 1, 3, 7
open_open = KeyRange(start_open=data[start],
end_open=data[end])
keys = [data[keyrow],]
keyset = KeySet(keys=keys, ranges=(open_open,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(
self.TABLE,
columns,
keyset,
index='name')
)
expected = ([data[keyrow]] + data[start+1 : end])
self.assertEqual(rows, expected)
def test_execute_sql_w_manual_consume(self):
ROW_COUNT = 3000
committed = self._set_up_table(ROW_COUNT)
with self._db.snapshot(read_timestamp=committed) as snapshot:
streamed = snapshot.execute_sql(self.SQL)
keyset = KeySet(all_=True)
with self._db.snapshot(read_timestamp=committed) as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
self.assertEqual(list(streamed), rows)
self.assertEqual(streamed._current_row, [])
self.assertEqual(streamed._pending_chunk, None)
def _check_sql_results(
self, database, sql, params, param_types, expected, order=True):
if order and 'ORDER' not in sql:
sql += ' ORDER BY eye_d'
with database.snapshot() as snapshot:
rows = list(snapshot.execute_sql(
sql, params=params, param_types=param_types))
self._check_rows_data(rows, expected=expected)
def test_multiuse_snapshot_execute_sql_isolation_strong(self):
ROW_COUNT = 40
SQL = 'SELECT * FROM {}'.format(self.TABLE)
committed = self._set_up_table(ROW_COUNT)
all_data_rows = list(self._row_data(ROW_COUNT))
with self._db.snapshot(multi_use=True) as strong:
before = list(strong.execute_sql(SQL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(strong.execute_sql(SQL))
self._check_row_data(after, all_data_rows)
def test_execute_sql_returning_array_of_struct(self):
SQL = (
"SELECT ARRAY(SELECT AS STRUCT C1, C2 "
"FROM (SELECT 'a' AS C1, 1 AS C2 "
"UNION ALL SELECT 'b' AS C1, 2 AS C2) "
"ORDER BY C1 ASC)"
)
self._check_sql_results(
self._db,
sql=SQL,
params=None,
param_types=None,
expected=[
[[['a', 1], ['b', 2]]],
])
def test_invalid_type(self):
table = 'counters'
columns = ('name', 'value')
valid_input = (('', 0),)
with self._db.batch() as batch:
batch.delete(table, self.ALL)
batch.insert(table, columns, valid_input)
invalid_input = ((0, ''),)
with self.assertRaises(exceptions.FailedPrecondition) as exc_info:
with self._db.batch() as batch:
batch.delete(table, self.ALL)
batch.insert(table, columns, invalid_input)
error_msg = (
'Invalid value for column value in table '
'counters: Expected INT64.')
self.assertIn(error_msg, str(exc_info.exception))
def test_execute_sql_w_query_param(self):
with self._db.batch() as batch:
batch.delete(self.ALL_TYPES_TABLE, self.ALL)
batch.insert(
self.ALL_TYPES_TABLE,
self.ALL_TYPES_COLUMNS,
self.ALL_TYPES_ROWDATA)
snapshot = self._db.snapshot(
read_timestamp=batch.committed, multi_use=True)
# Cannot equality-test array values. See below for a test w/
# array of IDs.
self._check_sql_results(
self._db,
sql='SELECT eye_d FROM all_types WHERE are_you_sure = @sure',
params={'sure': True},
param_types={'sure': Type(code=BOOL)},
expected=[(19,), (99,)],
)
self._check_sql_results(
self._db,
sql='SELECT eye_d FROM all_types WHERE raw_data = @bytes_1',
params={'bytes_1': self.BYTES_1},
param_types={'bytes_1': Type(code=BYTES)},
expected=[(19,), (99,)],
)
self._check_sql_results(
self._db,
sql='SELECT eye_d FROM all_types WHERE hwhen = @hwhen',
params={'hwhen': self.SOME_DATE},
param_types={'hwhen': Type(code=DATE)},
expected=[(19,)],
)
self._check_sql_results(
self._db,
sql='SELECT eye_d FROM all_types WHERE exactly_hwhen = @hwhen',
params={'hwhen': self.SOME_TIME},
param_types={'hwhen': Type(code=TIMESTAMP)},
expected=[(19,)],
)
self._check_sql_results(
self._db,
sql=('SELECT eye_d FROM all_types WHERE approx_value >= @lower'
' AND approx_value < @upper '),
params={'lower': 0.0, 'upper': 1.0},
param_types={
'lower': Type(code=FLOAT64), 'upper': Type(code=FLOAT64)},
expected=[(None,), (19,)],
)
self._check_sql_results(
self._db,
sql='SELECT description FROM all_types WHERE eye_d = @my_id',
params={'my_id': 19},
param_types={'my_id': Type(code=INT64)},
expected=[(u'dog',)],
)
self._check_sql_results(
self._db,
sql='SELECT description FROM all_types WHERE eye_d = @my_id',
params={'my_id': None},
param_types={'my_id': Type(code=INT64)},
expected=[],
)
self._check_sql_results(
self._db,
sql='SELECT eye_d FROM all_types WHERE description = @description',
params={'description': u'dog'},
param_types={'description': Type(code=STRING)},
expected=[(19,)],
)
self._check_sql_results(
self._db,
sql='SELECT eye_d FROM all_types WHERE exactly_hwhen = @hwhen',
params={'hwhen': self.SOME_TIME},
param_types={'hwhen': Type(code=TIMESTAMP)},
expected=[(19,)],
)
int_array_type = Type(code=ARRAY, array_element_type=Type(code=INT64))
self._check_sql_results(
self._db,
sql=('SELECT description FROM all_types '
'WHERE eye_d in UNNEST(@my_list)'),
params={'my_list': [19, 99]},
param_types={'my_list': int_array_type},
expected=[(u'dog',), (u'cat',)],
)
str_array_type = Type(code=ARRAY, array_element_type=Type(code=STRING))
self._check_sql_results(
self._db,
sql=('SELECT eye_d FROM all_types '
'WHERE description in UNNEST(@my_list)'),
params={'my_list': []},
param_types={'my_list': str_array_type},
expected=[],
)
self._check_sql_results(
self._db,
sql=('SELECT eye_d FROM all_types '
'WHERE description in UNNEST(@my_list)'),
params={'my_list': [u'dog', u'cat']},
param_types={'my_list': str_array_type},
expected=[(19,), (99,)],
)
self._check_sql_results(
self._db,
sql='SELECT @v',
params={'v': None},
param_types={'v': Type(code=STRING)},
expected=[(None,)],
order=False,
)
def test_execute_sql_w_query_param_transfinite(self):
with self._db.batch() as batch:
batch.delete(self.ALL_TYPES_TABLE, self.ALL)
batch.insert(
self.ALL_TYPES_TABLE,
self.ALL_TYPES_COLUMNS,
self.ALL_TYPES_ROWDATA)
# Find -inf
self._check_sql_results(
self._db,
sql='SELECT eye_d FROM all_types WHERE approx_value = @neg_inf',
params={'neg_inf': float('-inf')},
param_types={'neg_inf': Type(code=FLOAT64)},
expected=[(207,)],
)
# Find +inf
self._check_sql_results(
self._db,
sql='SELECT eye_d FROM all_types WHERE approx_value = @pos_inf',
params={'pos_inf': float('+inf')},
param_types={'pos_inf': Type(code=FLOAT64)},
expected=[(107,)],
)
with self._db.snapshot(
read_timestamp=batch.committed,
multi_use=True) as snapshot:
rows = list(snapshot.execute_sql(
'SELECT'
' [CAST("-inf" AS FLOAT64),'
' CAST("+inf" AS FLOAT64),'
' CAST("NaN" AS FLOAT64)]'))
self.assertEqual(len(rows), 1)
float_array, = rows[0]
self.assertEqual(float_array[0], float('-inf'))
self.assertEqual(float_array[1], float('+inf'))
# NaNs cannot be searched for by equality.
self.assertTrue(math.isnan(float_array[2]))
class TestStreamingChunking(unittest.TestCase, _TestData):
@classmethod
def setUpClass(cls):
from tests.system.utils.streaming_utils import INSTANCE_NAME
from tests.system.utils.streaming_utils import DATABASE_NAME
instance = Config.CLIENT.instance(INSTANCE_NAME)
if not instance.exists():
raise unittest.SkipTest(
"Run 'tests/system/utils/populate_streaming.py' to enable.")
database = instance.database(DATABASE_NAME)
if not instance.exists():
raise unittest.SkipTest(
"Run 'tests/system/utils/populate_streaming.py' to enable.")
cls._db = database
def _verify_one_column(self, table_desc):
sql = 'SELECT chunk_me FROM {}'.format(table_desc.table)
with self._db.snapshot() as snapshot:
rows = list(snapshot.execute_sql(sql))
self.assertEqual(len(rows), table_desc.row_count)
expected = table_desc.value()
for row in rows:
self.assertEqual(row[0], expected)
def _verify_two_columns(self, table_desc):
sql = 'SELECT chunk_me, chunk_me_2 FROM {}'.format(table_desc.table)
with self._db.snapshot() as snapshot:
rows = list(snapshot.execute_sql(sql))
self.assertEqual(len(rows), table_desc.row_count)
expected = table_desc.value()
for row in rows:
self.assertEqual(row[0], expected)
self.assertEqual(row[1], expected)
def test_four_kay(self):
from tests.system.utils.streaming_utils import FOUR_KAY
self._verify_one_column(FOUR_KAY)
def test_forty_kay(self):
from tests.system.utils.streaming_utils import FORTY_KAY
self._verify_one_column(FORTY_KAY)
def test_four_hundred_kay(self):
from tests.system.utils.streaming_utils import FOUR_HUNDRED_KAY
self._verify_one_column(FOUR_HUNDRED_KAY)
def test_four_meg(self):
from tests.system.utils.streaming_utils import FOUR_MEG
self._verify_two_columns(FOUR_MEG)
class CustomException(Exception):
"""Placeholder for any user-defined exception."""
class _DatabaseDropper(object):
"""Helper for cleaning up databases created on-the-fly."""
def __init__(self, db):
self._db = db
def delete(self):
self._db.drop()
class _ReadAbortTrigger(object):
"""Helper for tests provoking abort-during-read."""
KEY1 = 'key1'
KEY2 = 'key2'
def __init__(self):
self.provoker_started = threading.Event()
self.provoker_done = threading.Event()
self.handler_running = threading.Event()
self.handler_done = threading.Event()
def _provoke_abort_unit_of_work(self, transaction):
keyset = KeySet(keys=[(self.KEY1,)])
rows = list(
transaction.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset))
assert len(rows) == 1
row = rows[0]
value = row[1]
self.provoker_started.set()
self.handler_running.wait()
transaction.update(
COUNTERS_TABLE, COUNTERS_COLUMNS, [[self.KEY1, value + 1]])
def provoke_abort(self, database):
database.run_in_transaction(self._provoke_abort_unit_of_work)
self.provoker_done.set()
def _handle_abort_unit_of_work(self, transaction):
keyset_1 = KeySet(keys=[(self.KEY1,)])
rows_1 = list(
transaction.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset_1))
assert len(rows_1) == 1
row_1 = rows_1[0]
value_1 = row_1[1]
self.handler_running.set()
self.provoker_done.wait()
keyset_2 = KeySet(keys=[(self.KEY2,)])
rows_2 = list(
transaction.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset_2))
assert len(rows_2) == 1
row_2 = rows_2[0]
value_2 = row_2[1]
transaction.update(
COUNTERS_TABLE, COUNTERS_COLUMNS, [[self.KEY2, value_1 + value_2]])
def handle_abort(self, database):
database.run_in_transaction(self._handle_abort_unit_of_work)
self.handler_done.set()
|
impinj_xarray_itemsense.py | from interrogator import *
import requests
import base64
import threading
import json
import sys
from httplib2 import Http
import os
import queue
from time import sleep
import collections
import dateutil.parser
class ImpinjXArray(Interrogator):
def __init__(self, _ip_address, _db_host, _db_password, _cert_path, _debug, _apiusername, _apipassword, _dispatchsleep=0):
Interrogator.__init__(self, _db_host, _db_password,
_cert_path, _debug, _dispatchsleep)
self.exiting = False
self.ip_address = _ip_address
self.baseurl = "http://%s/itemsense" % self.ip_address
self.apiusername = _apiusername
self.apipassword = _apipassword
if self.cert_path != 'NONE':
self.http_obj = Http(ca_certs=self.cert_path)
else:
self.http_obj = Http(disable_ssl_certificate_validation=True)
self.start_timestamp = -1
self.out('Initializing XArray Interrogator client')
def out(self, x):
if self.debug:
sys.stdout.write(str(x) + '\n')
def start_server(self):
self.out('Starting Impinj XArray Interrogator client')
self.tag_dicts_queue = queue.Queue()
self.handler_thread = threading.Thread(
target=self.handler_thread, args=())
self.handler_thread.start()
# Create Clients and set them to connect
authstr = "%s:%s" % (self.apiusername, self.apipassword)
basicenc = base64.b64encode(authstr.encode())
self.basicauth = 'Basic ' + basicenc.decode()
facility = 'MESS'
recipe = 'IMPINJ_Fast_Location'
# Get a Token
url = self.baseurl + '/authentication/v1/token/' + self.apiusername
Headers = {}
Headers['Authorization'] = self.basicauth
response = requests.put(url, headers=Headers)
self.token = response.json()['token']
self.tokenauth = 'Token {"token":\"' + self.token + '\"}'
# Start a Job
url = self.baseurl + '/control/v1/jobs/start'
Data = {}
Data['startDelay'] = 'PT1S' # 1 second job start delay
Data['facility'] = facility
Data['recipeName'] = recipe
Headers = {}
Headers['Authorization'] = self.tokenauth
Headers['Content-Type'] = 'application/json'
response = requests.post(url, data=json.dumps(Data), headers=Headers)
jobId = response.json()['id'] # if id is not in response, need to stop existing running jobs
self.out("Job ID: %s" % jobId)
self.jobId = jobId
self.count = 0
while not self.exiting:
done = False
while (not done):
sleep(5)
url = self.baseurl + '/data/v1/items/show'
urlh = self.baseurl + '/data/v1/items/show/history'
Data = {}
Data['facility'] = facility
Data['jobId'] = jobId
Headers = {}
Headers['Content-Type'] = 'application/json'
Headers['Authorization'] = self.tokenauth
response = requests.get(
url, data=json.dumps(Data), headers=Headers)
# responseh = requests.get(
# urlh, data=json.dumps(Data), headers=Headers)
responsejson = response.json()
# responsehjson = responseh.json()
# self.out("==========================================================")
# self.out("DATA")
# self.out(response)
# self.out(response.text)
# self.out("==========================================================")
# self.out("timestamp\t\tepc\txLocation\tyLocation\tzLocation")
# t = 0
# for i in responsejson["items"]:
# self.out(t)
# t += 1
# timestamp = i['lastModifiedTime']
# epc = i['epc']
# x = i["xLocation"]
# y = i["yLocation"]
# self.out("%s\t%s\t%d\t\t%d" % (timestamp, epc[-4:], x, y))
# self.out("==========================================================")
# self.out("timestamp\t\tepc\txLocation\tyLocation")
# for i in responsehjson["history"]:
# timestamp = i['observationTime']
# epc = i['epc']
# x = i["toX"] if i["toX"] is not None else 0
# y = i["toY"] if i["toY"] is not None else 0
# self.out("%s\t%s\t%d\t\t%d" % (timestamp, epc[-4:], x, y))
# self.out("==========================================================")
# self.out("HISTORY")
# self.out(responseh)
# self.out(responseh.text)
if not "nextPageMarker" in responsejson:
done = True
elif responsejson['nextPageMarker'] is None:
done = True
else:
Data['pageMarker'] = responsejson['nextPageMarker']
self.tag_dicts_queue.put(responsejson)
self.count = self.count + 1
def handler_thread(self):
while not self.exiting:
responsearray = []
responsejson = self.tag_dicts_queue.get(block=True)
responsearray.append(responsejson)
# http://stackoverflow.com/questions/156360/get-all-items-from-thread-queue
# while we're here, try to pick up any more items that were inserted into the queue
while 1:
try:
responsejson = self.tag_dicts_queue.get_nowait()
responsearray.append(responsejson)
except queue.Empty:
break
self.insert_tag(responsearray)
def start(self):
self.out('XArray: start')
self.start_server()
def close_server(self):
self.exiting = True
# Stop the Job
url = self.baseurl + '/control/v1/jobs/stop/' + self.jobId
Headers = {}
Headers['Content-Type'] = 'application/json'
Headers['Authorization'] = self.tokenauth
response = requests.post(url, headers=Headers)
self.out(response)
# Revoke the Token
url = self.baseurl + '/authentication/v1/revokeToken'
Headers = {}
Headers['Content-Type'] = 'application/json'
Headers['Authorization'] = self.basicauth
Data = {}
Data['token'] = self.token
response = requests.put(url, headers=Headers, data=json.dumps(Data))
print(response)
def __del__(self):
self.close_server()
def insert_tag(self, tagarray):
input_dicts = []
if self.start_timestamp == -1:
min_timestamp = -1
for entry in tagarray:
items = entry['items']
for freeform in items:
# convert the timestamp from a string to numeric
timestamp = freeform['lastModifiedTime']
timestampdt = dateutil.parser.parse(timestamp)
timestampmicro = timestampdt.timestamp() * 1000
if int(timestampmicro) < min_timestamp or min_timestamp == -1:
min_timestamp = timestampmicro
self.start_timestamp = int(min_timestamp)
for entry in tagarray:
items = entry['items']
for freeform in items:
timestamp = freeform['lastModifiedTime']
epc = freeform['epc']
xPos = freeform["xLocation"]
yPos = freeform["yLocation"]
zPos = freeform["zLocation"]
# convert the timestamp from a string to numeric
timestampdt = dateutil.parser.parse(timestamp)
timestampmicro = timestampdt.timestamp() * 1000
self.out("Adding tag / collection %s with timestamp %s and epc %s and xPosition %s and yPosition %s and zPosition %s" % (
str(self.count), str(timestampmicro), str(epc), str(xPos), str(yPos), str(zPos)))
input_dict = dict()
input_dict['data'] = dict()
input_dict['data']['db_password'] = self.db_password
input_dict['data']['freeform'] = freeform
input_dict['data']['relative_time'] = int(timestampmicro) - self.start_timestamp
input_dict['data']['interrogator_time'] = timestampmicro
self.out("Input dict is: %s" % input_dict)
input_dicts.append(input_dict)
url = self.db_host + '/api/rssi'
resp, content = self.http_obj.request(uri=url, method='PUT', headers={
'Content-Type': 'application/json; charset=UTF-8'}, body=json.dumps(input_dicts))
if self.dispatchsleep > 0:
# if desired, sleep the dispatcher for a short time to queue up some inserts and give the producer some CPU time
sleep(self.dispatchsleep)
# Requires:
# easy_install httplib2 (not pip)
|
ytdl.py |
import asyncio
import datetime
import functools
import logging
import os
import pathlib
import platform
import re
import subprocess
import tempfile
import threading
import traceback
import fakeredis
import filetype
import youtube_dl
from hachoir.metadata import extractMetadata
from hachoir.metadata.audio import FlacMetadata, MpegAudioMetadata
from hachoir.metadata.video import MkvMetadata, MP4Metadata
from hachoir.parser import createParser
from telethon import Button, TelegramClient, events
from telethon.tl.types import (DocumentAttributeAudio,
DocumentAttributeFilename,
DocumentAttributeVideo)
from telethon.utils import get_input_media
from tgbot_ping import get_runtime
from youtube_dl.utils import DownloadError
from FastTelethon import download_file, upload_file
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(filename)s [%(levelname)s]: %(message)s')
logging.getLogger('telethon').setLevel(logging.WARNING)
token = os.getenv("TOKEN") or "17Zg"
app_id = int(os.getenv("APP_ID") or "922")
app_hash = os.getenv("APP_HASH") or "490"
bot = TelegramClient('bot', app_id, app_hash,
device_model=f"{platform.system()} {platform.node()}-{os.path.basename(__file__)}",
system_version=platform.platform()).start(bot_token=token)
r = fakeredis.FakeStrictRedis()
EXPIRE = 5
def get_metadata(video_path):
try:
metadata = extractMetadata(createParser(video_path))
if isinstance(metadata, MkvMetadata):
return dict(
duration=metadata.get('duration').seconds,
w=metadata['video[1]'].get('width'),
h=metadata['video[1]'].get('height')
), metadata.get('mime_type')
elif isinstance(metadata, FlacMetadata):
return dict(
duration=metadata.get('duration').seconds,
), metadata.get('mime_type')
else:
return dict(
duration=metadata.get('duration').seconds,
w=metadata.get('width', 0),
h=metadata.get('height', 0)
), metadata.get('mime_type')
except Exception as e:
logging.error(e)
return dict(duration=0, w=0, h=0), 'application/octet-stream'
def go(chat_id, message, msg):
asyncio.run(sync_edit_message(chat_id, message, msg))
def sizeof_fmt(num: int, suffix='B'):
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def progress_hook(d: dict, chat_id, message):
if d['status'] == 'downloading':
downloaded = d.get("downloaded_bytes", 0)
total = d.get("total_bytes") or d.get("total_bytes_estimate", 0)
filesize = sizeof_fmt(total)
if total > 2 * 1024 * 1024 * 1024:
raise Exception("\n\nYour video is too large. %s will exceed Telegram's max limit 2GiB" % filesize)
percent = d.get("_percent_str", "N/A")
speed = d.get("_speed_str", "N/A")
msg = f'[{filesize}]: Downloading {percent} - {downloaded}/{total} @ {speed}'
threading.Thread(target=go, args=(chat_id, message, msg)).start()
def run_in_executor(f):
@functools.wraps(f)
def inner(*args, **kwargs):
loop = asyncio.get_running_loop()
return loop.run_in_executor(None, lambda: f(*args, **kwargs))
return inner
@run_in_executor
def ytdl_download(url, tempdir, chat_id, message) -> dict:
response = dict(status=None, error=None, filepath=None)
logging.info("Downloading for %s", url)
output = os.path.join(tempdir, '%(title)s.%(ext)s')
ydl_opts = {
'progress_hooks': [lambda d: progress_hook(d, chat_id, message)],
'outtmpl': output,
'restrictfilenames': True,
'quiet': True
}
formats = [
"bestvideo[ext=mp4]+bestaudio[ext=m4a]/bestvideo+bestaudio",
"bestvideo[vcodec^=avc]+bestaudio[acodec^=mp4a]/best[vcodec^=avc]/best",
""
]
success, err = None, None
for f in formats:
if f:
ydl_opts["format"] = f
try:
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([url])
success = True
except DownloadError:
err = traceback.format_exc()
logging.error("Download failed for %s ", url)
if success:
response["status"] = True
response["filepath"] = os.path.join(tempdir, [i for i in os.listdir(tempdir)][0])
break
else:
response["status"] = False
response["error"] = err
# convert format if necessary
convert_to_mp4(response)
return response
def convert_to_mp4(resp: dict):
default_type = ["video/x-flv"]
if resp["status"]:
mime = filetype.guess(resp["filepath"]).mime
if mime in default_type:
path = resp["filepath"]
new_name = os.path.basename(path).split(".")[0] + ".mp4"
new_file_path = os.path.join(os.path.dirname(path), new_name)
cmd = "ffmpeg -i {} {}".format(path, new_file_path)
logging.info("Detected %s, converting to mp4...", mime)
subprocess.check_output(cmd.split())
resp["filepath"] = new_file_path
return resp
async def upload_callback(current, total, chat_id, message):
key = f"{chat_id}-{message.id}"
# if the key exists, we shouldn't send edit message
if not r.exists(key):
r.set(key, "ok", ex=EXPIRE)
filesize = sizeof_fmt(total)
msg = f'[{filesize}]: Uploading {round(current / total * 100, 2)}% - {current}/{total}'
await bot.edit_message(chat_id, message, msg)
async def sync_edit_message(chat_id, message, msg):
# try to avoid flood
key = f"{chat_id}-{message.id}"
if not r.exists(key):
r.set(key, "ok", ex=EXPIRE)
await bot.edit_message(chat_id, message, msg)
# bot starts here
@bot.on(events.NewMessage(pattern='/start'))
async def send_start(event):
logging.info("Welcome to youtube-dl bot!")
async with bot.action(event.chat_id, 'typing'):
await bot.send_messsage("Hiโจ Welcome ๐\nI am YouTube Video/Audio Downloader๐ Created by @Psycho_Bots\nSend me YT link to get Your Video๐/audio", buttons=[
[
Button.url("Supportโฃ๏ธ", url="https://t.me/PsychoBots_Chat")
],
[
Button.url("Updates", url="https://t.me/Psycho_Bots")]
]
)
raise events.StopPropagation
async def convert_flac(flac_name, tmp):
flac_tmp = pathlib.Path(tmp.name).parent.joinpath(flac_name).as_posix()
# ffmpeg -i input-video.avi -vn -acodec copy output-audio.m4a
cmd = "ffmpeg -y -i {} -vn -acodec copy {}".format(tmp.name, flac_tmp)
print(cmd)
logging.info("converting to flac")
subprocess.check_output(cmd.split())
return flac_tmp
@bot.on(events.CallbackQuery)
async def handler(event):
await event.answer('Converting to audio...please wait patiently')
msg = await event.get_message()
chat_id = msg.chat_id
mp4_name = msg.file.name # 'youtube-dl_test_video_a.mp4'
flac_name = mp4_name.replace("mp4", "m4a")
with tempfile.NamedTemporaryFile() as tmp:
with open(tmp.name, "wb") as out:
logging.info("downloading to %s", tmp.name)
async with bot.action(chat_id, 'record-round'):
await download_file(event.client, msg.media.document, out, )
logging.info("downloading complete %s", tmp.name)
# execute ffmpeg
async with bot.action(chat_id, 'record-audio'):
await asyncio.sleep(1)
flac_tmp = await convert_flac(flac_name, tmp)
async with bot.action(chat_id, 'document'):
logging.info("Converting flac complete, sending...")
# with open(flac_tmp, 'rb') as f:
# input_file = await upload_file(bot, f)
# metadata, mime_type = get_metadata(flac_tmp)
# input_media = get_input_media(input_file)
# input_media.attributes = [
# DocumentAttributeAudio(duration=metadata["duration"]),
# DocumentAttributeFilename(flac_name),
# ]
# input_media.mime_type = mime_type
# await bot.send_file(chat_id, input_media)
# TODO temp
await bot.send_file(chat_id, flac_tmp)
os.unlink(flac_tmp)
tmp.close()
@bot.on(events.NewMessage(pattern='/help'))
async def send_help(event):
async with bot.action(event.chat_id, 'typing'):
await bot.send_message(event.chat_id, "Send me YT links To download videos IF Our Bot Is not working...Report Bugs AT Support")
raise events.StopPropagation
@bot.on(events.NewMessage(pattern='/ping'))
async def send_ping(event)
async with bot.action(event.chat_id, 'typing'):
bot_info = get_runtime("botsrunner_ytdl_1", "YouTube-dl")
await bot.send_message(event.chat_id, f"{bot_info}\n", parse_mode='md')
raise events.StopPropagation
@bot.on(events.NewMessage(pattern='/about'))
async def send_about(event):
async with bot.action(event.chat_id, 'typing'):
await bot.send_message(event.chat_id, "This is YouTube Downloader Created By @Psycho_Bots")
raise events.StopPropagation
@bot.on(events.NewMessage(incoming=True))
async def send_video(event):
chat_id = event.message.chat_id
url = re.sub(r'/ytdl\s*', '', event.message.text)
logging.info("start %s", url)
# if this is in a group/channel
if not event.message.is_private and not event.message.text.lower().startswith("/ytdl"):
logging.warning("%s, it's annoying me...๐๏ธ ", event.message.text)
return
if not re.findall(r"^https?://", url.lower()):
await event.reply("I think you should send me a link. Don't you agree with me?")
return
message = await event.reply("Processing...")
temp_dir = tempfile.TemporaryDirectory()
async with bot.action(chat_id, 'video'):
result = await ytdl_download(url, temp_dir.name, chat_id, message)
# markup
markup = bot.build_reply_markup(Button.inline('audio'))
if result["status"]:
async with bot.action(chat_id, 'document'):
video_path = result["filepath"]
await bot.edit_message(chat_id, message, 'Download complete. Sending now...')
metadata, mime_type = get_metadata(video_path)
with open(video_path, 'rb') as f:
input_file = await upload_file(
bot, f,
progress_callback=lambda x, y: upload_callback(x, y, chat_id, message))
input_media = get_input_media(input_file)
file_name = os.path.basename(video_path)
input_media.attributes = [
DocumentAttributeVideo(round_message=False, supports_streaming=True, **metadata),
DocumentAttributeFilename(file_name),
]
input_media.mime_type = mime_type
# duration here is int - convert to timedelta
metadata["duration_str"] = datetime.timedelta(seconds=metadata["duration"])
metadata["size"] = sizeof_fmt(os.stat(video_path).st_size)
caption = "{name}\n{duration_str} {size} {w}*{h}".format(name=file_name, **metadata)
await bot.send_file(chat_id, input_media, caption=caption, buttons=markup)
await bot.edit_message(chat_id, message, 'Download success!โ
')
else:
async with bot.action(chat_id, 'typing'):
tb = result["error"][0:4000]
await bot.edit_message(chat_id, message, f"{url} download failedโ๏ผ\n```{tb}```",
parse_mode='markdown')
temp_dir.cleanup()
if __name__ == '__main__':
bot.run_until_disconnected()
|
run.py | try:
import sr.comp
except ImportError:
print("No srcomp detected. Did you activate the virtualenv?")
print("$ source venv/bin/activate")
exit(1)
import cherrypy
import argparse
import os.path
import threading
import subprocess
import time
from sr.comp.http import app
parser = argparse.ArgumentParser()
parser.add_argument('compstate', help='path to compstate repo')
parser.add_argument('--no-scorer', action='store_false',
dest='scorer', help='disable the scorer system')
parser.add_argument('--no-stream', action='store_false',
dest='stream', help='disable the event stream')
parser.add_argument('--overlay', action='store_true',
help='enable using the livestream overlay')
args = parser.parse_args()
app.config['COMPSTATE'] = args.compstate
mydir = os.path.dirname(os.path.realpath(__file__))
if args.stream:
def start_stream_thread():
murder_stream = threading.Event()
# Run streams thread
def run_streams():
# Hack, pending a better solution to determining whether
# cherrypy has actually started yet.
stream_dir = os.path.join(mydir, 'srcomp-stream')
stream_process = subprocess.Popen(('node', 'main.js'),
cwd=stream_dir)
murder_stream.wait()
stream_process.terminate()
stream_process.wait()
cherrypy.engine.subscribe('stop', murder_stream.set)
thr = threading.Thread(name='streams', target=run_streams)
thr.start()
cherrypy.engine.subscribe('start', start_stream_thread)
cherrypy.tree.graft(app, '/comp-api')
if args.scorer:
from sr.comp.scorer import app as scorer_app
scorer_app.config['COMPSTATE'] = args.compstate
scorer_app.config['COMPSTATE_LOCAL'] = True
cherrypy.tree.graft(scorer_app, '/scorer')
screens_dir = os.path.realpath(os.path.join(mydir, 'srcomp-screens'))
config={
'/': {
'tools.staticdir.on': True,
'tools.staticdir.dir': screens_dir,
'tools.caching.on': False,
},
'global': {
'server.socket_host': '::',
'server.socket_port': 5112,
'server.thread_pool': 8
}
}
if args.overlay:
overlay_dir = os.path.realpath(os.path.join(mydir, 'livestream-overlay'))
config['/livestream-overlay'] = {
'tools.staticdir.on': True,
'tools.staticdir.dir': overlay_dir,
'tools.caching.on': False,
'tools.staticdir.index': 'stream.html',
}
cherrypy.quickstart(config=config)
|
e2e_throughput.py | #!/usr/bin/python3
from zoo.serving.client import InputQueue, OutputQueue
from zoo.common.encryption_utils import encrypt_with_AES_GCM
import os
import cv2
import json
import time
from optparse import OptionParser
import base64
from multiprocessing import Process
import redis
import yaml
import argparse
from numpy import *
RESULT_PREFIX = "cluster-serving_"
name = "serving_stream"
def main(args):
if args.image_num % args.proc_num != 0:
raise EOFError("Please make sure that image push number can be divided by multi-process number")
redis_args = {}
with open(args.config_path) as file:
config = yaml.full_load(file)
redis_url = config.get('redisUrl')
if redis_url:
host = redis_url.split(':')[0]
port = redis_url.split(':')[1]
redis_args = {'host': host, 'port': port}
if config.get('redisSecureEnabled'):
if not os.path.isdir(args.keydir):
raise EOFError("Please set secure key path")
redis_args['ssl'] = 'True'
redis_args['ssl_cert_reqs'] = 'none'
redis_args['ssl_certfile'] = redis_args['ssl_ca_certs'] = os.path.join(args.keydir, "server.crt")
redis_args['ssl_keyfile'] = os.path.join(args.keydir, "server.key")
encrypt = config.get('recordEncrypted')
DB = redis.StrictRedis(**redis_args)
redis_args.pop('ssl_cert_reqs', None)
try:
print("Entering initial dequeue")
output_api = OutputQueue(**redis_args)
start = time.time()
res = output_api.dequeue()
end = time.time()
print("Dequeued", len(res), "records in", end - start, "sec, dequeue fps:", len(res) / (end - start))
print("Initial dequeue completed")
except Exception:
print("Dequeue error encountered")
e2e_start = image_enqueue(redis_args, args.image_num, args.proc_num, args.image_path, encrypt)
e2e_end, dequeue_num, num_invalid = image_dequeue(DB, args.image_num)
num_valid = maximum(dequeue_num - num_invalid, 0)
duration = e2e_end - e2e_start
print("Served", num_valid, "images in", duration, "sec, e2e throughput is", num_valid / duration,
"images/sec, excluded", num_invalid, "invalid results")
def image_enqueue(redis_args, img_num, proc_num, path, encrypt):
print("Entering enqueue")
input_api = InputQueue(**redis_args)
img = cv2.imread(path)
img = cv2.resize(img, (224, 224))
data = cv2.imencode(".jpg", img)[1]
img_encoded = base64.b64encode(data).decode("utf-8")
if encrypt:
img_encoded = encrypt_with_AES_GCM(img_encoded, "secret", "salt")
print("Record encoded")
img_per_proc = int(img_num / proc_num)
procs = []
def push_image(image_num, index, proc_id):
print("Entering enqueue", proc_id)
for i in range(image_num):
input_api.enqueue("my-img-" + str(i + index), t={"b64": img_encoded})
start = time.time()
for i in range(proc_num):
proc = Process(target=push_image, args=(img_per_proc, i * img_per_proc, i,))
procs.append(proc)
proc.start()
for p in procs:
p.join()
end = time.time()
print(img_num, "images enqueued")
print("total enqueue time:", end - start)
fps = img_num / (end - start)
print("enqueue fps:", fps)
return start
def image_dequeue(DB, img_num):
print("Entering dequeue")
dequeue_num = 0
num_invalid = 0
start = time.time()
while dequeue_num < img_num:
pipe = DB.pipeline()
res_list = DB.keys(RESULT_PREFIX + name + ':*')
for res in res_list:
pipe.hgetall(res.decode('utf-8'))
res_dict_list = pipe.execute()
for res_dict in res_dict_list:
try:
res_val = res_dict[b'value'].decode('utf-8')
except Exception:
print("Irregular result dict:", res_dict)
num_invalid += 1
continue
if res_val == 'NaN':
num_invalid += 1
num_res = len(res_list)
if num_res > 0:
dequeue_num += num_res
print("Received", dequeue_num, "results, including", num_invalid, "invalid results")
DB.delete(*res_list)
print("Total dequeue time:", time.time() - start)
return time.time(), dequeue_num, num_invalid
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--config_path', '-c', help='path of cluster serving config.yaml', default='config.yaml')
parser.add_argument('--image_path', '-i', help='path of test image', default='ILSVRC2012_val_00000001.JPEG')
parser.add_argument('--image_num', '-n', type=int, help='number of iterations to push image', default=1000)
parser.add_argument('--proc_num', '-p', type=int, help='number of procs', default=10)
parser.add_argument('--keydir', '-k', help='key files directory path', default='keys')
args = parser.parse_args()
main(args)
|
rconrestart.py | import lib.rconprotocol
import sched
import logging
import time
import threading
"""
RconRestart class module.
Used to setup a shutdown timer with restart messages.
Please note that this module will only exit the server.
You may need an additional script (or watchdog) to get the server back online.
"""
class RconRestart(object):
def __init__(self, rcon, config):
# shutdown and shutdown message scheduler
self.sched = sched.scheduler(time.time, time.sleep)
self.shutdownTimer = 0
self.restartMessages = None
self.exitOnRestart = False
self.inProgress = False
self.canceled = False
self.rcon = rcon
self.shutdownDelay = config['delay'] if 'delay' in config and config['delay'] >= 5 else 15
self.setMessages(config['messages'])
self.setInterval(config['interval'])
self.setExitOnRestart(config['exitonrestart'])
logging.debug('%s() initialized' % type(self).__name__)
"""
Set an interval when the server should receive the shutdown command
@param integer min - no of minutes until server shutdown
"""
def setInterval(self, min):
self.shutdownTimer = min * 60
"""
set a list messages as (multidimensional array) to inform players when the server restart
The format of this array is as following:
[<minBeforeRestart|integer>, "<Message|string>"],
Example:
[
[5, "Restart in 5 minutes"],
[10,"Restart in 10 minutes"]
]
"""
def setMessages(self, messageList):
self.restartMessages = []
for m in messageList:
self.restartMessages.append( RestartMessage(m[0],m[1]) )
"""
Exit this application (by using Rcon.Abort()) when the restart occured
@param bool yesNo - true = exit the program when shutdown command has been sent, otherwise not
"""
def setExitOnRestart(self, yesNo):
self.exitOnRestart = yesNo
"""
Event: Called from Rcon.OnConnected()
When connection is established start the "restart" schedule
"""
def OnConnected(self):
if self.shutdownTimer > 0 and self.inProgress == False:
# a separate thread to handle the restart and restart messages
# It is set as daemon to be able to stop it using SystemExit or Ctrl + C
t = threading.Thread(target=self._initRestartScheduler)
t.daemon = True
t.start()
logging.info('OnConnect(): %s ready to restart server every %d seconds' % (type(self).__name__, self.shutdownTimer))
else:
logging.info("OnConnect(): %s disabled" % type(self).__name__)
"""
Event: Called from Rcon.OnReconnected()
"""
def OnReconnected(self):
if self.shutdownTimer > 0 and self.inProgress == False:
# restart the module
t = threading.Thread(target=self._initRestartScheduler)
t.daemon = True
t.start()
logging.info('OnReconnect(): %s ready to restart server every %d seconds' % (type(self).__name__, self.shutdownTimer))
"""
private: restart message to warn the players
"""
def _restartMessageTask(self, msg):
logging.info('Sending restart message: {}'.format(msg))
self.rcon.sendCommand("say -1 \"%s\"" % msg)
"""
private: the actual shutdown call (with some delay to make sure players are disconnected)
"""
def _shutdownTask(self):
self.inProgress = True
self.rcon.lockServer()
self.rcon.kickAll()
# wait some seconds before restarting
logging.info('Delay the shutdown process')
time.sleep(self.shutdownDelay)
logging.info('Sending shutdown command')
self.rcon.sendCommand('#shutdown')
self.inProgress = False
"""
private: Clear all schedules
"""
def _emptyScheduler(self):
if not self.sched.empty():
for q in self.sched.queue:
self.sched.cancel(q)
"""
public: call to cancel the shutdown process
"""
def cancelRestart(self):
self.canceled = True
self._emptyScheduler()
self.rcon.sendCommand("say -1 \"RESTART CANCELED\"")
# Cancel the current shutdown timer, BUT continue with regular restarts
self.OnConnected()
"""
private: initialize the scheduler for restart messages and the shutdown itself
"""
def _initRestartScheduler(self):
# make sure all previous scheds are being removed
self._emptyScheduler()
self.sched.enter(self.shutdownTimer, 1, self._shutdownTask, ())
if type(self.restartMessages) is list:
for msg in self.restartMessages:
if int(self.shutdownTimer - msg.toSecond()) > 0:
self.sched.enter( self.shutdownTimer - msg.toSecond(), 1, self._restartMessageTask, (msg.message,) )
self.sched.run()
logging.debug('All shutdown tasks executed')
if self.exitOnRestart and not self.canceled:
self.rcon.Abort()
self.canceled = False
"""
RestartMessage Class used to inform the players before shutdown take action
"""
class RestartMessage():
def __init__(self, min, message):
self.min = min
self.message = message
def toSecond(self):
return self.min * 60
|
manager.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import errno
import traceback
import socket
import logging
import json
import collections
from shadowsocks import common, eventloop, tcprelay, udprelay, asyncdns, shell
BUF_SIZE = 1506
STAT_SEND_LIMIT = 50
class Manager(object):
def __init__(self, config):
self._config = config
self._relays = {} # (tcprelay, udprelay)
self._loop = eventloop.EventLoop()
self._dns_resolver = asyncdns.DNSResolver()
self._dns_resolver.add_to_loop(self._loop)
self._statistics = collections.defaultdict(int)
self._control_client_addr = None
try:
manager_address = config['manager_address']
if ':' in manager_address:
addr = manager_address.rsplit(':', 1)
addr = addr[0], int(addr[1])
addrs = socket.getaddrinfo(addr[0], addr[1])
if addrs:
family = addrs[0][0]
else:
logging.error('invalid address: %s', manager_address)
exit(1)
else:
addr = manager_address
family = socket.AF_UNIX
self._control_socket = socket.socket(family,
socket.SOCK_DGRAM)
self._control_socket.bind(addr)
self._control_socket.setblocking(False)
except (OSError, IOError) as e:
logging.error(e)
logging.error('can not bind to manager address')
exit(1)
self._loop.add(self._control_socket,
eventloop.POLL_IN, self)
# self._loop.add_periodic(self.handle_periodic)
port_password = config['port_password']
del config['port_password']
for port, password, method in port_password.items():
a_config = config.copy()
a_config['server_port'] = int(port)
a_config['password'] = password
self.add_port(a_config)
def add_port(self, config):
port = int(config['server_port'])
servers = self._relays.get(port, None)
if servers:
logging.error("server already exists at %s:%d" % (config['server'],
port))
return
logging.info("adding server at %s:%d" % (config['server'], port))
t = tcprelay.TCPRelay(config, self._dns_resolver, False,
self.stat_callback)
u = udprelay.UDPRelay(config, self._dns_resolver, False,
self.stat_callback)
t.add_to_loop(self._loop)
u.add_to_loop(self._loop)
self._relays[port] = (t, u)
def remove_port(self, config):
port = int(config['server_port'])
servers = self._relays.get(port, None)
if servers:
logging.info("removing server at %s:%d" % (config['server'], port))
t, u = servers
t.close(next_tick=False)
u.close(next_tick=False)
del self._relays[port]
else:
logging.error("server not exist at %s:%d" % (config['server'],
port))
def stat_port(self, config):
port = int(config['server_port'])
servers = self._relays.get(port, None)
if servers:
self._send_control_data(b'{"stat":"ok", "password":"%s", "method":"%s"}' % (servers[0]._config['password'], servers[0]._config['method']))
else:
self._send_control_data(b'{"stat":"ko"}')
def handle_event(self, sock, fd, event):
if sock == self._control_socket and event == eventloop.POLL_IN:
data, self._control_client_addr = sock.recvfrom(BUF_SIZE)
#print(data)
parsed = self._parse_command(data)
#print(parsed)
if parsed:
command, config = parsed
a_config = self._config.copy()
if command == 'transfer':
self.handle_periodic()
else:
if config:
# let the command override the configuration file
#print(config)
a_config.update(config)
if 'server_port' not in a_config:
logging.error('can not find server_port in config')
else:
if command == 'add':
if a_config['method'] == 'None':
a_config['method'] = self._config['method']
self.add_port(a_config)
self._send_control_data(b'ok')
elif command == 'remove':
self.remove_port(a_config)
self._send_control_data(b'ok')
elif command == 'stat':
self.stat_port(a_config)
elif command == 'ping':
self._send_control_data(b'pong')
else:
logging.error('unknown command %s', command)
def _parse_command(self, data):
# commands:
# add: {"server_port": 8000, "password": "foobar"}
# remove: {"server_port": 8000"}
data = common.to_str(data)
parts = data.split(':', 1)
if len(parts) < 2:
return data, None
command, config_json = parts
try:
config = shell.parse_json_in_str(config_json)
return command, config
except Exception as e:
logging.error(e)
return None
def stat_callback(self, port, data_len):
self._statistics[port] += data_len
def handle_periodic(self):
r = {}
i = 0
def send_data(data_dict):
if data_dict:
# use compact JSON format (without space)
data = common.to_bytes(json.dumps(data_dict,
separators=(',', ':')))
self._send_control_data(data)
for k, v in self._statistics.items():
r[k] = v
i += 1
# split the data into segments that fit in UDP packets
if i >= STAT_SEND_LIMIT:
send_data(r)
r.clear()
i = 0
if len(r) > 0:
send_data(r)
self._send_control_data('e')
self._statistics.clear()
def _send_control_data(self, data):
if self._control_client_addr:
try:
self._control_socket.sendto(data, self._control_client_addr)
except (socket.error, OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if sys.platform == "win32":
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK, errno.WSAEWOULDBLOCK):
return
elif error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
return
else:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
def run(self):
self._loop.run()
def run(config):
Manager(config).run()
def test():
import time
import threading
import struct
from shadowsocks import encrypt
logging.basicConfig(level=5,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
enc = []
eventloop.TIMEOUT_PRECISION = 1
def run_server():
config = {
'server': '127.0.0.1',
'local_port': 1081,
'port_password': {
'8381': 'foobar1',
'8382': 'foobar2'
},
'method': 'aes-256-cfb',
'manager_address': '127.0.0.1:6001',
'timeout': 60,
'fast_open': False,
'verbose': 2
}
manager = Manager(config)
enc.append(manager)
manager.run()
t = threading.Thread(target=run_server)
t.start()
time.sleep(1)
manager = enc[0]
cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
cli.connect(('127.0.0.1', 6001))
# test add and remove
time.sleep(1)
cli.send(b'add: {"server_port":7001, "password":"asdfadsfasdf"}')
time.sleep(1)
assert 7001 in manager._relays
data, addr = cli.recvfrom(1506)
assert b'ok' in data
cli.send(b'remove: {"server_port":8381}')
time.sleep(1)
assert 8381 not in manager._relays
data, addr = cli.recvfrom(1506)
assert b'ok' in data
logging.info('add and remove test passed')
# test statistics for TCP
header = common.pack_addr(b'google.com') + struct.pack('>H', 80)
data = encrypt.encrypt_all(b'asdfadsfasdf', 'aes-256-cfb', 1,
header + b'GET /\r\n\r\n')
tcp_cli = socket.socket()
tcp_cli.connect(('127.0.0.1', 7001))
tcp_cli.send(data)
tcp_cli.recv(4096)
tcp_cli.close()
data, addr = cli.recvfrom(1506)
data = common.to_str(data)
assert data.startswith('stat: ')
data = data.split('stat:')[1]
stats = shell.parse_json_in_str(data)
assert '7001' in stats
logging.info('TCP statistics test passed')
# test statistics for UDP
header = common.pack_addr(b'127.0.0.1') + struct.pack('>H', 80)
data = encrypt.encrypt_all(b'foobar2', 'aes-256-cfb', 1,
header + b'test')
udp_cli = socket.socket(type=socket.SOCK_DGRAM)
udp_cli.sendto(data, ('127.0.0.1', 8382))
tcp_cli.close()
data, addr = cli.recvfrom(1506)
data = common.to_str(data)
assert data.startswith('stat: ')
data = data.split('stat:')[1]
stats = json.loads(data)
assert '8382' in stats
logging.info('UDP statistics test passed')
manager._loop.stop()
t.join()
if __name__ == '__main__':
test()
|
test_crt_basic_vm_with_max_threads.py | '''
New Perf Test for creating KVM VM with basic L3 network.
The created number will depends on the environment variable: ZSTACK_TEST_NUM
The difference with test_basic_l3_vm_with_given_num.py is this case's max thread is 1000
@author: Youyk
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.config_operations as con_ops
import zstackwoodpecker.operations.account_operations as acc_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import time
import os
import sys
import threading
import random
session_uuid = None
session_to = None
session_mc = None
thread_threshold = os.environ.get('ZSTACK_THREAD_THRESHOLD')
if not thread_threshold:
thread_threshold = 1000
else:
thread_threshold = int(thread_threshold)
exc_info = []
def check_thread_exception():
if exc_info:
info1 = exc_info[0][1]
info2 = exc_info[0][2]
raise info1, None, info2
def create_vm(vm):
try:
vm.create()
except:
exc_info.append(sys.exc_info())
def test():
global session_uuid
global session_to
global session_mc
vm_num = os.environ.get('ZSTACK_TEST_NUM')
if not vm_num:
vm_num = 0
else:
vm_num = int(vm_num)
test_util.test_logger('ZSTACK_THREAD_THRESHOLD is %d' % thread_threshold)
test_util.test_logger('ZSTACK_TEST_NUM is %d' % vm_num)
org_num = vm_num
vm_creation_option = test_util.VmOption()
image_name = os.environ.get('imageName_s')
image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
l3_name = os.environ.get('l3PublicNetworkName')
l3s = test_lib.lib_get_l3s()
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
vm_creation_option.set_image_uuid(image_uuid)
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
#change account session timeout.
session_to = con_ops.change_global_config('identity', 'session.timeout', '720000', session_uuid)
session_mc = con_ops.change_global_config('identity', 'session.maxConcurrent', '10000', session_uuid)
session_uuid = acc_ops.login_as_admin()
vm_creation_option.set_session_uuid(session_uuid)
vm = test_vm_header.ZstackTestVm()
random_name = random.random()
vm_name = 'multihost_basic_vm_%s' % str(random_name)
vm_creation_option.set_name(vm_name)
while vm_num > 0:
check_thread_exception()
vm_creation_option.set_l3_uuids([random.choice(l3s).uuid])
vm.set_creation_option(vm_creation_option)
vm_num -= 1
thread = threading.Thread(target=create_vm, args=(vm,))
while threading.active_count() > thread_threshold:
time.sleep(1)
thread.start()
while threading.active_count() > 1:
time.sleep(0.01)
cond = res_ops.gen_query_conditions('name', '=', vm_name)
vms = res_ops.query_resource_count(res_ops.VM_INSTANCE, cond, session_uuid)
con_ops.change_global_config('identity', 'session.timeout', session_to, session_uuid)
con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc, session_uuid)
acc_ops.logout(session_uuid)
if vms == org_num:
test_util.test_pass('Create %d VMs Test Success' % org_num)
else:
test_util.test_fail('Create %d VMs Test Failed. Only find %d VMs.' % (org_num, vms))
#Will be called only if exception happens in test().
def error_cleanup():
if session_to:
con_ops.change_global_config('identity', 'session.timeout', session_to, session_uuid)
if session_mc:
con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc, session_uuid)
if session_uuid:
acc_ops.logout(session_uuid)
|
robot_driver.py | import rospy
import time
import actionlib
import threading
import argparse
from control_msgs.msg import FollowJointTrajectoryAction
from control_msgs.msg import FollowJointTrajectoryFeedback
from sensor_msgs.msg import JointState
from std_msgs.msg import String
from hiwin_robot_interface import HiwinRobotInterface
JOINTS_NAMES = ['joint_1', 'joint_2', 'joint_3',
'joint_4', 'joint_5', 'joint_6']
last_joint_states_lock = threading.Lock()
DEBUG = True # Set True to show debug log, False to hide it.
class HiwinRobotStatesPublisher(object):
"""Reads robot joints' states and publish them on the joint_state topic."""
def __init__(self, robot_interface):
# type: (HiwinRobotInterface) -> None
"""Initialize Robot's state publisher.
:param
robot_interface: HiwinRobotInterface used to connect to the robot
"""
self.robot_interface = robot_interface
self.robot_name = self.robot_interface.name
self.pub_joint_states = rospy.Publisher('/joint_states',
JointState,
queue_size=100)
self.ip = None
self.robot_handler = None
self.__keep_running = False
self.__thread = None
def start(self):
"""Begin the thread that runs the self.__run() function."""
self.__keep_running = True
self.__thread = threading.Thread(name="HiwinRobotJointStatePublisher",
target=self.__run)
self.__thread.daemon = True
self.__thread.start()
def __run(self):
"""Read the robot's axis, publish on the joint_states topic."""
global last_joint_states_lock
while self.__keep_running:
result, joints = self.robot_interface.get_current_joints()
msg = JointState()
msg.header.stamp = rospy.get_rostime()
msg.name = [self.robot_name+"/".join(["", joint_name]) for joint_name in JOINTS_NAMES]
msg.position = joints
self.pub_joint_states.publish(msg)
rospy.sleep(0.01)
with last_joint_states_lock:
pass
class HiwinDIOPublisher(object):
def __init__(self, robot_interface):
# type: (HiwinRobotInterface) -> None
"""Initialize Robot's state publisher.
:param
robot_interface: HiwinRobotInterface used to connect to the robot
"""
self.robot_interface = robot_interface
self.robot_name = self.robot_interface.name
self.pub_dig_in = rospy.Publisher(self.robot_name + '/digital_input',
String, queue_size=10)
self.pub_dig_out = rospy.Publisher(self.robot_name + '/digital_output',
String, queue_size=10)
self.ip = None
self.robot_handler = None
self.__keep_running = False
self.__thread_i = None
self.__thread_o = None
def start(self):
"""Begin the thread that runs the self.__run() function."""
self.__keep_running = True
self.__thread_i = threading.Thread(name="HiwinDIPublisher",
target=self.__run_i)
self.__thread_i.daemon = True
self.__thread_i.start()
self.__thread_o = threading.Thread(name="HiwinDOPublisher",
target=self.__run_o)
self.__thread_o.daemon = True
self.__thread_o.start()
def __run_i(self):
"""Read the robot's axis, publish on the joint_states topic."""
global last_joint_states_lock
while self.__keep_running:
inputs = self.robot_interface.get_current_digital_inputs()
msg = String()
msg.data = "".join([str(i)+" " for i in inputs])
self.pub_dig_in.publish(msg)
rospy.sleep(0.1)
with last_joint_states_lock:
pass
def __run_o(self):
"""Read the robot's axis, publish on the joint_states topic."""
global last_joint_states_lock
while self.__keep_running:
outputs = self.robot_interface.get_current_digital_outputs()
msg = String()
msg.data = "".join([str(o)+" " for o in outputs])
self.pub_dig_out.publish(msg)
rospy.sleep(0.1)
with last_joint_states_lock:
pass
class HiwinDOSetter(object):
def __init__(self, robot_interface):
# type: (HiwinRobotInterface) -> None
"""Initialize Robot's state publisher.
:param
robot_interface: HiwinRobotInterface used to connect to the robot
"""
self.robot_interface = robot_interface
self.robot_name = self.robot_interface.name
self.set_dig_out = rospy.Subscriber(self.robot_name +
'/set_digital_output',
String, callback=self.__set_do,
queue_size=10)
self.ip = None
self.robot_handler = None
self.__keep_running = False
self.__thread_i = None
self.__thread_o = None
def __set_do(self, msg): # type: (String) -> None
if not self.robot_interface.is_connected():
self.robot_interface.reconnect()
try:
do_index, do_value = msg.data.split(" ")
self.robot_interface.set_io_value(int(do_index), bool(int(do_value)==1))
except:
rospy.logwarn_once("Set DO Message format is wrong, expected 'DO_INDEX DO_VALUE' (e.g. '12 1') but got {}".format(msg.data))
class HiwinRobotTrajectoryFollower(object):
"""Class used to make the robot follow the trajectory given by MoveIt!"""
RATE = 0.02
def __init__(self, robot_interface):
# type: (HiwinRobotInterface) -> None
self.robot_interface = robot_interface
self.robot_name = self.robot_interface.name
self.server = actionlib.ActionServer(self.robot_name +
"/follow_joint_trajectory",
FollowJointTrajectoryAction,
self.on_goal, self.on_cancel,
auto_start=False)
rospy.Subscriber("manipulator/taskstart", String,
callback=self.taskstart_topic_callback, queue_size=10)
self.pub_feedback_states = rospy.Publisher(
'/feedback_states',
FollowJointTrajectoryFeedback,
queue_size=10 )
self.goal_handle = None
self.start_time = 0
self.received_trajectory = None # Store last received trajectory
self.trajectory_to_execute = None # Once executed, it will be deleted
self.target = None
self.update_timer = rospy.Timer(rospy.Duration(self.RATE),
self._update)
self.__keep_running = False
self.__thread = None
def start(self):
"""Begin the thread that runs the self.server.start function."""
self.__keep_running = True
self.__thread = threading.Thread(name="HiwinRobotControlNode",
target=self.server.start)
self.__thread.daemon = True
self.__thread.start()
self.__keep_running = False
def on_goal(self, goal_handle):
"""When a trajectory has been received from move_group, execute it.
Main idea: the trajectory is composed by many points. We move point to
point (PTP) from each point to the next one. Note: as today, we
do not take care of the time execution of the trajectory. For
example, we do not care about the velocity of the trajectory that
has been received. In future it is needed to add this possibility.
"""
# Check if there is another goal on the go
if self.goal_handle:
self.on_cancel(None)
# Communicate that the goal has been accepted
self.goal_handle = goal_handle
self.goal_handle.set_accepted()
rospy.loginfo("Trajectory received and accepted")
# Read the trajectory
self.start_time = rospy.Time.now()
self.received_trajectory = goal_handle.get_goal().trajectory
self.trajectory_to_execute = goal_handle.get_goal().trajectory
# Print out the target final point
self.target = self.received_trajectory.points[-1].positions
rospy.loginfo("The trajectory has a total of {} points."
.format(len(self.received_trajectory.points)))
def on_cancel(self, _):
"""When a trajectory is canceled, stop the robot."""
rospy.logwarn("Trajectory was canceled before reaching goal.")
# Stop the Robot in the current position
self.robot_interface.stop_motion()
self.trajectory_to_execute = None
self.target = None
self.goal_handle.set_canceled()
self.goal_handle = None
def _update(self, event):
"""Manage trajectory goal_handle and publish the feedback"""
# Publish the current feedback states of the robot
self.publish_feedback()
# If there is no goal pending, do nothing.
if not self.goal_handle:
return
# If the goal has been reached, set goal successfully reached.
if self.target is not None:
if self.goal_reached(self.target):
rospy.loginfo("Trajectory completed. Goal reached!")
self.goal_handle.set_succeeded()
self.goal_handle = None
self.trajectory_to_execute = None
self.target = None
return
# If there's no position trajectory to be executed, do nothing.
if self.trajectory_to_execute is None:
return
# If a position trajectory need to be executed, execute it:
# for each point in the trajectory execute Point-To-Point movement
# (note that the first point is always the starting point)
for point in self.trajectory_to_execute.points[1:]:
# Read the target values of the joints
target_joints = [joint for joint in point.positions]
# Make sure the goal has not been canceled meanwhile
if not self.goal_handle:
break
# Move to the target joints
self.robot_interface.move_ptp(target_axis_values=target_joints)
# The trajectory has been executed, forget it.
self.trajectory_to_execute = None
def goal_reached(self, target_joint_states):
# type: (list[float]) -> bool
"""Returns True if the robot has achieved the goal, False otherwise.
To reach the goal means:
1. Be near the goal
2. Not moving (being in IDLE state)
:param
target_joint_states: The goal states (in radians)
"""
state_reached = self.robot_interface.is_in_state(target_joint_states)
not_moving = self.robot_interface.is_in_idle()
return state_reached and not_moving
def publish_feedback(self):
"""Publishes the position feedback of the robot.
The feedback is the difference between the desired states and the
current ones.
"""
# If there is no trajectory, there is nothing to do
if self.received_trajectory is None:
return
# Get the current states of the joints
success, current_joints_states = \
self.robot_interface.get_current_joints()
if not success: # Couldn't get the current joints' state
rospy.logwarn("Could not publish on feedback_states:"
"current states are unknown. Assuming all to 0.")
current_joints_states = [0 for _ in range(6)]
# Get the desired position. What should the robot joints be right now?
time_from_start = rospy.Time.now() - self.start_time
# Find which point represents the current desired position
for point in self.received_trajectory.points:
if time_from_start > point.time_from_start:
continue
break
desired_point = point
# Make sure the length of the current states and the target states
# is exactly the length of the joints
assert len(JOINTS_NAMES) == len(current_joints_states) and \
len(JOINTS_NAMES) == len(desired_point.positions), \
"Target and current states have different length. " \
"Expected {} joints, got {} (target) and {} (current)".format(
len(JOINTS_NAMES), len(desired_point.positions),
len(current_joints_states)
)
# Create the message to be published
msg = FollowJointTrajectoryFeedback()
msg.header.frame_id = ""
msg.header.stamp = rospy.get_rostime()
msg.joint_names = JOINTS_NAMES
# Set the goal states4
msg.desired.positions = desired_point.positions
msg.desired.velocities = []
msg.desired.accelerations = []
msg.desired.effort = []
msg.desired.time_from_start = desired_point.time_from_start
# Set the actual states
msg.actual.positions = current_joints_states
msg.actual.velocities = []
msg.actual.accelerations = []
msg.actual.effort = []
msg.actual.time_from_start = desired_point.time_from_start
# Calculate the error
position_error = [goal - current for goal, current in zip(
msg.desired.positions, msg.actual.positions
)]
velocity_error = [goal - current for goal, current in zip(
msg.desired.velocities, msg.actual.velocities
)]
acceleration_error = [goal - current for goal, current in zip(
msg.desired.accelerations,
msg.actual.accelerations
)]
effort_error = [goal - current for goal, current in zip(
msg.desired.effort, msg.actual.effort
)]
# Set the errors
msg.error.positions = position_error
msg.error.velocities = velocity_error
msg.error.accelerations = acceleration_error
msg.error.effort = effort_error
msg.error.time_from_start = desired_point.time_from_start
# Publish the message on /feedback_states topic
self.pub_feedback_states.publish(msg)
def taskstart_topic_callback(self, msg): # type: (String) -> None
if not self.robot_interface.is_connected():
self.robot_interface.reconnect(trials=1, sec_between_trials=1)
self.robot_interface.stop_task()
if not self.robot_interface.is_connected():
self.robot_interface.reconnect(trials=1, sec_between_trials=1)
self.robot_interface.start_task(msg.data) == 0
return
if __name__ == '__main__':
# Get the arguments
arg_parser = argparse.ArgumentParser("Driver Node")
arg_parser.add_argument("--robot_ip", help="IP addr of the robot",
type=str)
arg_parser.add_argument("--robot_name", help="Name of the robot", type=str)
arg_parser.add_argument("--control_mode", help="Default is 1, set it to 0 if you do not want to control the robot, but only to monitor its state.",
type=bool, default=1, required=False)
arg_parser.add_argument("--log_level", help="Logging level: INFO, DEBUG",
type=str, default="INFO", required=False)
arg_parser.add_argument("__name")
arg_parser.add_argument("__log")
args = arg_parser.parse_args()
# Extract the necessary arguments
robot_ip = args.robot_ip
robot_name = args.robot_name
control_mode = int(args.control_mode)
if args.log_level == "DEBUG":
log_level = rospy.DEBUG
elif args.log_level == "ERROR":
log_level = rospy.ERROR
else:
log_level = rospy.INFO
# Start the ROS node
rospy.init_node('hiwin_robot_sdk_'+robot_name,
log_level=log_level,
disable_signals=True)
if rospy.get_param("use_sim_time", False):
rospy.logwarn("use_sim_time is set!!!")
robot_ctr = HiwinRobotInterface(robot_ip=robot_ip, connection_level=control_mode,
name=robot_name)
robot_ctr.connect()
# Highest priority: start the controllers for the robot.
# (If the controllers don't start in time, MoveIt! won't work.)
# Start arm controller
arm_action_server = HiwinRobotTrajectoryFollower(robot_ctr)
arm_action_server.start()
set_do_thread = HiwinDOSetter(robot_ctr)
robot_mtr = HiwinRobotInterface(robot_ip=robot_ip, connection_level=0,
name=robot_name)
# Start joint states publisher for the robot
hiwin_states_publisher = HiwinRobotStatesPublisher(robot_mtr)
hiwin_states_publisher.start()
# Start Digital I/O publisher for the robot
hiwin_dio_publisher = HiwinDIOPublisher(robot_mtr)
hiwin_dio_publisher.start()
try:
rospy.spin()
except KeyboardInterrupt:
pass |
worker.py | import os
import sys
import json
import platform
import socket
import signal
import logging
import logging.config
import threading
import traceback
import argparse
import multiprocessing
from time import monotonic
from typing import Dict, Any, List, Tuple # noqa
import amqp
from kuyruk import importer, signals
from kuyruk.kuyruk import Kuyruk
from kuyruk.task import Task
from kuyruk.heartbeat import Heartbeat
from kuyruk.exceptions import Reject, Discard, HeartbeatError, ExcInfoType
logger = logging.getLogger(__name__)
class Worker:
"""Consumes tasks from queues and runs them.
:param app: An instance of :class:`~kuyruk.Kuyruk`
:param args: Command line arguments
"""
def __init__(self, app: Kuyruk, args: argparse.Namespace) -> None:
self.kuyruk = app
if not args.queues:
args.queues = ['kuyruk']
def add_host(queue: str) -> str:
if queue.endswith('.localhost'):
queue = queue.rsplit('.localhost')[0]
return "%s.%s" % (queue, self._hostname)
else:
return queue
self._hostname = socket.gethostname()
self.queues = [add_host(q) for q in args.queues]
self._tasks = {} # type: Dict[Tuple[str, str], Task]
self.shutdown_pending = threading.Event()
self.consuming = False
self.current_task = None # type: Task
self.current_args = None # type: Tuple
self.current_kwargs = None # type: Dict[str, Any]
self._started_at = None # type: float
self._pid = os.getpid()
self._logging_level = app.config.WORKER_LOGGING_LEVEL
if args.logging_level is not None:
self._logging_level = args.logging_level
self._max_run_time = app.config.WORKER_MAX_RUN_TIME
if args.max_run_time is not None:
self._max_run_time = args.max_run_time
self._max_load = app.config.WORKER_MAX_LOAD
if args.max_load is not None:
self._max_load = args.max_load
if self._max_load == -1:
self._max_load == multiprocessing.cpu_count()
self._threads = [] # type: List[threading.Thread]
if self._max_load:
self._threads.append(threading.Thread(target=self._watch_load))
if self._max_run_time:
self._threads.append(threading.Thread(target=self._shutdown_timer))
signals.worker_init.send(self.kuyruk, worker=self)
def run(self) -> None:
"""Runs the worker and consumes messages from RabbitMQ.
Returns only after `shutdown()` is called.
"""
if self._logging_level:
logging.basicConfig(
level=getattr(logging, self._logging_level.upper()),
format="%(levelname).1s %(name)s.%(funcName)s:%(lineno)d - %(message)s")
signal.signal(signal.SIGINT, self._handle_sigint)
signal.signal(signal.SIGTERM, self._handle_sigterm)
if platform.system() != 'Windows':
# These features will not be available on Windows, but that is OK.
# Read this issue for more details:
# https://github.com/cenkalti/kuyruk/issues/54
signal.signal(signal.SIGHUP, self._handle_sighup)
signal.signal(signal.SIGUSR1, self._handle_sigusr1)
signal.signal(signal.SIGUSR2, self._handle_sigusr2)
self._started_at = os.times().elapsed
for t in self._threads:
t.start()
try:
signals.worker_start.send(self.kuyruk, worker=self)
self._consume_messages()
signals.worker_shutdown.send(self.kuyruk, worker=self)
finally:
self.shutdown_pending.set()
for t in self._threads:
t.join()
logger.debug("End run worker")
def _consume_messages(self) -> None:
with self.kuyruk.channel() as ch:
# Set prefetch count to 1. If we don't set this, RabbitMQ keeps
# sending messages while we are already working on a message.
ch.basic_qos(0, 1, True)
self._declare_queues(ch)
self._consume_queues(ch)
logger.info('Consumer started')
self._main_loop(ch)
def _main_loop(self, ch: amqp.Channel) -> None:
while not self.shutdown_pending.is_set():
self._pause_or_resume(ch)
try:
ch.connection.heartbeat_tick()
ch.connection.drain_events(timeout=1)
except socket.timeout:
pass
def _consumer_tag(self, queue: str) -> str:
return "%s:%s@%s" % (queue, self._pid, self._hostname)
def _declare_queues(self, ch: amqp.Channel) -> None:
for queue in self.queues:
logger.debug("queue_declare: %s", queue)
ch.queue_declare(queue=queue, durable=True, auto_delete=False)
def _pause_or_resume(self, channel: amqp.Channel) -> None:
if not self._max_load:
return
try:
load = self._current_load
except AttributeError:
should_pause = False
else:
should_pause = load > self._max_load
if should_pause and self.consuming:
logger.warning('Load is above the treshold (%.2f/%s), ' 'pausing consumer', load, self._max_load)
self._cancel_queues(channel)
elif not should_pause and not self.consuming:
logger.warning('Load is below the treshold (%.2f/%s), ' 'resuming consumer', load, self._max_load)
self._consume_queues(channel)
def _consume_queues(self, ch: amqp.Channel) -> None:
self.consuming = True
for queue in self.queues:
logger.debug("basic_consume: %s", queue)
ch.basic_consume(queue=queue, consumer_tag=self._consumer_tag(queue), callback=self._process_message)
def _cancel_queues(self, ch: amqp.Channel) -> None:
self.consuming = False
for queue in self.queues:
logger.debug("basic_cancel: %s", queue)
ch.basic_cancel(self._consumer_tag(queue))
def _process_message(self, message: amqp.Message) -> None:
"""Processes the message received from the queue."""
if self.shutdown_pending.is_set():
return
try:
if isinstance(message.body, bytes):
message.body = message.body.decode()
description = json.loads(message.body)
except Exception:
logger.error("Cannot decode message. Dropping. Message: %r", message.body)
traceback.print_exc()
message.channel.basic_reject(message.delivery_tag, requeue=False)
else:
logger.info("Processing task: %r", description)
self._process_description(message, description)
def _process_description(self, message: amqp.Message, description: Dict[str, Any]) -> None:
try:
task = self._import_task(description['module'], description['function'])
args, kwargs = description['args'], description['kwargs']
except Exception:
logger.error('Cannot import task')
exc_info = sys.exc_info()
signals.worker_failure.send(self.kuyruk, description=description, exc_info=exc_info, worker=self)
message.channel.basic_reject(message.delivery_tag, requeue=False)
else:
self._process_task(message, description, task, args, kwargs)
def _import_task(self, module: str, function: str) -> Task:
if (module, function) in self._tasks:
return self._tasks[(module, function)]
task = importer.import_object(module, function)
self._tasks[(module, function)] = task
return task
def _process_task(
self,
message: amqp.Message,
description: Dict[str, Any],
task: Task,
args: Tuple,
kwargs: Dict[str, Any],
) -> None:
queue = message.delivery_info['routing_key']
reply_to = message.properties.get('reply_to')
try:
result = self._run_task(message.channel.connection, task, args, kwargs)
except Reject:
logger.warning('Task is rejected')
message.channel.basic_reject(message.delivery_tag, requeue=True)
except Discard:
logger.warning('Task is discarded')
message.channel.basic_reject(message.delivery_tag, requeue=False)
if reply_to:
exc_info = sys.exc_info()
self._send_reply(reply_to, message.channel, None, exc_info)
except HeartbeatError as e:
logger.error('Error while sending heartbeat')
exc_info = e.exc_info
logger.error(''.join(traceback.format_exception(*exc_info)))
signals.worker_failure.send(
self.kuyruk,
description=description,
task=task,
args=args,
kwargs=kwargs,
exc_info=exc_info,
worker=self,
queue=queue)
raise
except Exception:
logger.error('Task raised an exception')
exc_info = sys.exc_info()
logger.error(''.join(traceback.format_exception(*exc_info)))
signals.worker_failure.send(
self.kuyruk,
description=description,
task=task,
args=args,
kwargs=kwargs,
exc_info=exc_info,
worker=self,
queue=queue)
message.channel.basic_reject(message.delivery_tag, requeue=False)
if reply_to:
self._send_reply(reply_to, message.channel, None, exc_info)
else:
logger.info('Task is successful')
message.channel.basic_ack(message.delivery_tag)
if reply_to:
self._send_reply(reply_to, message.channel, result, None)
finally:
logger.debug("Task is processed")
def _run_task(self, connection: amqp.Connection, task: Task, args: Tuple, kwargs: Dict[str, Any]) -> Any:
hb = Heartbeat(connection, self._on_heartbeat_error)
hb.start()
self.current_task = task
self.current_args = args
self.current_kwargs = kwargs
try:
return self._apply_task(task, args, kwargs)
finally:
self.current_task = None
self.current_args = None
self.current_kwargs = None
hb.stop()
def _on_heartbeat_error(self, exc_info: ExcInfoType) -> None:
self._heartbeat_exc_info = exc_info
os.kill(os.getpid(), signal.SIGHUP)
@staticmethod
def _apply_task(task: Task, args: Tuple, kwargs: Dict[str, Any]) -> Any:
"""Logs the time spent while running the task."""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
start = monotonic()
try:
return task.apply(*args, **kwargs)
finally:
delta = monotonic() - start
logger.info("%s finished in %i seconds." % (task.name, delta))
def _send_reply(
self,
reply_to: str,
channel: amqp.Channel,
result: Any,
exc_info: ExcInfoType,
) -> None:
logger.debug("Sending reply result=%r", result)
reply = {'result': result}
if exc_info:
reply['exception'] = self._exc_info_dict(exc_info)
try:
body = json.dumps(reply)
except Exception as e:
logger.error('Cannot serialize result as JSON: %s', e)
exc_info = sys.exc_info()
reply = {'result': None, 'exception': self._exc_info_dict(exc_info)}
body = json.dumps(reply)
msg = amqp.Message(body=body)
channel.basic_publish(msg, exchange="", routing_key=reply_to)
@staticmethod
def _exc_info_dict(exc_info: ExcInfoType) -> Dict[str, str]:
type_, val, tb = exc_info
return {
'type': '%s.%s' % (type_.__module__, type_.__name__),
'value': str(val),
'traceback': ''.join(traceback.format_tb(tb)),
}
def _watch_load(self) -> None:
"""Pause consuming messages if lood goes above the allowed limit."""
while not self.shutdown_pending.wait(1):
self._current_load = os.getloadavg()[0]
@property
def uptime(self) -> float:
if not self._started_at:
return 0
return os.times().elapsed - self._started_at
def _shutdown_timer(self) -> None:
"""Counts down from MAX_WORKER_RUN_TIME. When it reaches zero sutdown
gracefully.
"""
remaining = self._max_run_time - self.uptime
if not self.shutdown_pending.wait(remaining):
logger.warning('Run time reached zero')
self.shutdown()
def shutdown(self) -> None:
"""Exits after the current task is finished."""
logger.warning("Shutdown requested")
self.shutdown_pending.set()
def _handle_sigint(self, signum: int, frame: Any) -> None:
"""Shutdown after processing current task."""
logger.warning("Catched SIGINT")
self.shutdown()
def _handle_sigterm(self, signum: int, frame: Any) -> None:
"""Shutdown after processing current task."""
logger.warning("Catched SIGTERM")
self.shutdown()
def _handle_sighup(self, signum: int, frame: Any) -> None:
"""Used internally to fail the task when connection to RabbitMQ is
lost during the execution of the task.
"""
logger.warning("Catched SIGHUP")
exc_info = self._heartbeat_exc_info
self._heartbeat_exc_info = None
# Format exception info to see in tools like Sentry.
formatted_exception = ''.join(traceback.format_exception(*exc_info)) # noqa
raise HeartbeatError(exc_info)
@staticmethod
def _handle_sigusr1(signum: int, frame: Any) -> None:
"""Print stacktrace."""
print('=' * 70)
print(''.join(traceback.format_stack()))
print('-' * 70)
def _handle_sigusr2(self, signum: int, frame: Any) -> None:
"""Drop current task."""
logger.warning("Catched SIGUSR2")
if self.current_task:
logger.warning("Dropping current task...")
raise Discard
def drop_task(self) -> None:
os.kill(os.getpid(), signal.SIGUSR2)
|
03_bosshp_detect.py |
import time
import os
from pynput.keyboard import Key, Listener, KeyCode
from pynput import mouse, keyboard
from windowcapture import WindowCapture
import cv2
from threading import Thread
os.chdir(os.path.dirname(os.path.abspath(__file__)))
class ScreenshotGrabber5Sec():
def __init__(self) -> None:
self.listener = None
def start(self):
self.start_keypress_listener()
while True:
time.sleep(0.5)
def start_keypress_listener(self):
if self.listener == None:
self.listener = Listener(on_press=self.on_press,
on_release=self.on_release)
self.listener.start()
def on_press(self, key):
if key == keyboard.Key.f11:
os._exit(1)
def on_release(self, key):
if key == keyboard.Key.f10:
print("Starting bosshp check")
t = Thread(target=self.bosshp_checker)
t.start()
def detect_boss_bar(self, gamename):
wincap = WindowCapture(gamename, custom_rect=[
415+97, 105+533, 415+98, 105+534])
image = wincap.get_screenshot()
# bgr
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[0][-1]]
# print("abc={},{},{}".format(a, b, c))
if c+f > 440:
if a+b+d+e < 80:
return True
return False
def bosshp_checker(self):
with open("gamename.txt") as f:
gamename = f.readline()
while not self.detect_boss_bar(gamename):
time.sleep(0.15)
print("Finally found boss hp bar!!!")
if __name__ == "__main__":
ssg = ScreenshotGrabber5Sec()
ssg.start()
|
run_manager.py | # -*- coding: utf-8 -*-
# (c) 2020-2021 Martin Wendt and contributors; see https://github.com/mar10/stressor
# Licensed under the MIT license: https://www.opensource.org/licenses/mit-license.php
"""
"""
import itertools
import sys
import threading
import time
from collections import defaultdict
from datetime import datetime
from snazzy import emoji, green, red, yellow
from stressor.config_manager import ConfigManager
from stressor.monitor.server import MonitorServer
from stressor.session_manager import SessionManager, User
from stressor.statistic_manager import StatisticManager
from stressor.util import (
check_arg,
format_elap,
format_num,
get_random_number,
logger,
set_console_ctrl_handler,
)
class RunManager:
"""
Executes a run-configuration in parallel sessions.
"""
CTRL_HANDLER_SET = None
CURRENT_RUN_MANAGER = None
DEFAULT_OPTS = {
"monitor": False,
"log_summary": True,
# "dry_run": False,
}
STAGES = (
# "new",
"ready",
"running",
"done",
"waiting",
"stopping",
"stopped",
)
CHANNELS = (
"log",
"start_run",
"start_session",
"start_sequence",
"start_activity",
"end_activity",
"end_sequence",
"end_session",
"end_run",
)
activity_map = {}
def __init__(self):
self.lock = threading.RLock()
self.host_id = "h1"
self.process_id = "p1"
#: :class:`ConfigManager` used to load the configuration YAML
self.config_manager = None
self.has_hooks = False
self.has_catch_all_hooks = False
self._hooks = defaultdict(list)
#: Set this event to shut down the app
self.stop_request = threading.Event()
#: (bool): TODO: determines if a stop request is graceful or not
#: True: Finalize the current sequence, then do 'end' sequence before stopping
self.stop_request_graceful = None
#: (bool): TODO: determines if a stop request will keep the monitor running
#: True: Finalize the current sequence, then do 'end' sequence before stopping?
self.stop_request_monitor = None
self.session_list = []
#: :class:`~stressor.statistic_manager.StatisticManager` object that containscurrent execution path
self.stats = StatisticManager()
self.options = self.DEFAULT_OPTS.copy()
self.stage = "ready"
self.start_dt = None
self.start_stamp = None
self.end_dt = None
self.end_stamp = None
# register_plugins()
self.CURRENT_RUN_MANAGER = self
self.set_console_ctrl_handler()
def __str__(self):
# name = self.config_manager.path if self.config_manager else "?"
# name = self.run_config.get("name") if self.run_config else "?"
return "RunManager<{}>".format(self.stage.upper())
@staticmethod
def set_console_ctrl_handler():
if RunManager.CTRL_HANDLER_SET is None:
RunManager.CTRL_HANDLER_SET = set_console_ctrl_handler(
RunManager._console_ctrl_handler
)
logger.info("set_console_ctrl_handler()")
@staticmethod
def _console_ctrl_handler(ctrl):
# NOTE: seems that print/logger do not work here?
# print("_console_ctrl_handler()")
return RunManager.CURRENT_RUN_MANAGER.console_ctrl_handler(ctrl)
def console_ctrl_handler(self, ctrl):
"""
Args:
ctrl (int): 0: CTRL_C_EVENT, 1: CTRL_BREAK_EVENT, 2: CTRL_CLOSE_EVENT
Returns:
True if handled
False if not handled, i.e. next registered handler will be called
"""
# if self.stop_request.is_set():
# print("Got Ctrl-C 2nd time: terminating...")
# logger.warning("Got Ctrl-C a 2nd time: terminating...")
# time.sleep(0.1)
# # sys.exit(2)
print("Got Ctrl-C (windows handler), terminating...", file=sys.stderr)
logger.warning("Got Ctrl-C (windows handler), terminating...")
# self.stop_request.set()
self.stop()
# return False
return True
def set_stage(self, stage):
check_arg(stage, str, stage in self.STAGES)
logger.info("Enter stage '{}'".format(stage.upper()))
self.stage = stage
def publish(self, channel, allow_cancel=False, *args, **kwargs):
"""Notify all subscribed handlers."""
assert channel in self.CHANNELS
result_list = []
if not self.has_hooks:
return result_list
channel_hooks = self._hooks.get(channel)
generic_hooks = self._hooks.get("*")
if channel_hooks:
if generic_hooks:
hooks = itertools.chain(channel_hooks, generic_hooks)
else:
hooks = channel_hooks
elif generic_hooks:
hooks = generic_hooks
for handler in hooks:
res = handler(channel, *args, **kwargs)
if allow_cancel and res is False:
return False
result_list.append(res)
return result_list
def subscribe(self, channel, handler):
self.has_hooks = True
if channel == "*":
self.has_catch_all_hooks = True
else:
assert channel in (self.CHANNELS)
assert callable(handler)
self._hooks[channel].append(handler)
def has_errors(self, or_warnings=False):
return self.stats.has_errors()
def get_cli_summary(self):
cm = self.config_manager
lines = []
run_time = self.end_stamp - self.start_stamp
user_count = len(self.session_list)
has_errors = self.has_errors()
ap = lines.append
col = red if has_errors else green
horz_line = col("=-" * 38 + "=")
ap("Result Summary:")
ap(horz_line)
ap("Stressor scenario '{}' finished.".format(cm.name))
ap(" Tag: '{}'".format(cm.config.get("tag", "n.a.")))
ap(" Base URL: {}".format(cm.config.get("base_url", "")))
ap(" Start: {}".format(self.start_dt.strftime("%Y-%m-%d %H:%M:%S")))
ap(" End: {}".format(self.end_dt.strftime("%Y-%m-%d %H:%M:%S")))
ap(
"Run time {}, net: {}.".format(
format_elap(run_time, high_prec=True),
format_elap(self.stats["net_act_time"], high_prec=True),
)
)
ap(
"Executed {:,} activities in {:,} sequences, using {:,} parallel sessions.".format(
self.stats["act_count"],
self.stats["seq_count"],
user_count,
)
)
if run_time and self.stats["seq_count"]:
ap(
"Sequence duration: {} average.".format(
format_elap(run_time / self.stats["seq_count"], high_prec=True)
)
)
if run_time:
ap(
" rate: {} sequences per minute (per user: {}).".format(
format_num(60.0 * self.stats["seq_count"] / run_time),
format_num(
60.0 * self.stats["seq_count"] / (run_time * user_count)
),
)
)
ap(
"Activity rate: {} activities per second (per user: {}).".format(
format_num(self.stats["act_count"] / run_time),
format_num(self.stats["act_count"] / (run_time * user_count)),
)
)
# --- List of all activities that are marked `monitor: true`
if self.stats["monitored"]:
print(self.stats["monitored"])
ap("{} monitored activities:".format(len(self.stats["monitored"])))
for path, info in self.stats["monitored"].items():
errors = info.get("errors")
ap(" - {}".format(path))
if not info:
ap(" n: 0, min: n.a., avg: n.a., max: n.a.")
continue
ap(
" n: {:,}, min: {}, avg: {}, max: {}{}".format(
info["act_count"],
format_elap(info["act_time_min"], high_prec=True),
format_elap(info["act_time_avg"], high_prec=True),
format_elap(info["act_time_max"], high_prec=True),
red(", {} errors".format(errors)) if errors else "",
)
)
if has_errors:
pics = emoji(" ๐ฅ ๐ ๐ฅ", "")
ap(
red(
"Result: ERROR, found {:,} errors and {:,} warnings.".format(
self.stats["errors"],
self.stats["warnings"],
)
+ pics
)
)
if self.stats.stats["run_limit_reached"]:
ap(
yellow(
"Some activities where skipped due to the `max-errors` "
" or `max-time` limit."
)
)
else:
pics = emoji(" โจ ๐ฐ โจ", "")
ap(green("Result: Ok." + pics))
ap(horz_line)
return "\n".join(lines)
def get_status_info(self):
cm = self.config_manager
stats_info = self.stats.get_monitor_info(cm.config_all)
res = {
"name": cm.name,
"scenarioDetails": cm.get("config.details", "n.a."),
"tag": cm.get("config.tag", "n.a."),
"stage": self.stage,
"stageDisplay": "done" if self.stage == "waiting" else self.stage,
"hasErrors": self.has_errors(),
"startTimeStr": "{}".format(self.start_dt.strftime("%Y-%m-%d %H:%M:%S")),
"baseUrl": cm.get("config.base_url"),
"sessionCount": self.stats["sess_count"],
"sessionsRunning": self.stats["sess_running"],
"stats": stats_info,
}
if self.end_dt:
elap = self.end_dt - self.start_dt
res["endTimeStr"] = "{} ({})".format(
self.end_dt.strftime("%Y-%m-%d %H:%M:%S"),
format_elap(elap.total_seconds()),
)
else:
elap = datetime.now() - self.start_dt
res["endTimeStr"] = "(running for {}...)".format(
format_elap(elap.total_seconds())
)
return res
def log_info(self, *args, **kwargs):
self.publish("log", level="info", *args, **kwargs)
def load_config(self, run_config_file):
"""Load configuration file and set shortcuts."""
cr = ConfigManager(self.stats)
cr.read(run_config_file, load_files=True)
self.config_manager = cr
# self.run_config = cr.run_config
logger.info("Successfully compiled configuration {}.".format(cr.path))
def _run_one(self, session_manager):
"""Run inside a separate thread."""
try:
session_manager.run()
# We don't need to print results if only one session was run, since
# it is also part of the global stats:
# rc = self.run_config
# if not rc.get("force_single") and rc["sessions"]["count"] > 1:
# logger.info(
# "Results for {}:\n{}".format(
# session_manager, session_manager.stats.format_result()
# )
# )
except KeyboardInterrupt:
logger.exception("Session thread received Ctrl-C")
self.stats.report_error(None, None, None, "KeyboardInterrupt")
self.stop_request.set()
# self.stats.inc("errors")
except Exception as e:
logger.exception("Session thread raised exception")
self.stats.report_error(None, None, None, e)
# self.stats.inc("errors")
# raise
return
def run_in_threads(self, user_list, context):
self.publish("start_run", run_manager=self)
self.stop_request.clear()
thread_list = []
self.session_list = []
for i, user in enumerate(user_list, 1):
name = "t{:02}".format(i)
sess = SessionManager(self, context, name, user)
self.session_list.append(sess)
t = threading.Thread(name=name, target=self._run_one, args=[sess])
t.setDaemon(True) # Required to make Ctrl-C work
thread_list.append(t)
logger.info("Starting {} session workers...".format(len(thread_list)))
self.set_stage("running")
self.stats.report_start(None, None, None)
ramp_up_delay = self.config_manager.sessions.get("ramp_up_delay")
start_run = time.monotonic()
for i, t in enumerate(thread_list):
if ramp_up_delay and i > 1:
delay = get_random_number(ramp_up_delay)
logger.info(
"Ramp-up delay for t{:02}: {:.2f} seconds...".format(i, delay)
)
time.sleep(delay)
t.start()
logger.important(
"All {} sessions running, waiting for them to terminate...".format(
len(thread_list)
)
)
for t in thread_list:
t.join()
self.set_stage("done")
elap = time.monotonic() - start_run
# self.stats.add_timing("run", elap)
self.stats.report_end(None, None, None)
self.publish("end_run", run_manager=self, elap=elap)
logger.debug("Results for {}:\n{}".format(self, self.stats.format_result()))
return not self.has_errors()
def run(self, options, extra_context=None):
"""Run the current
Args:
options (dict): see RunManager.DEFAULT_OPTS
extra_context (dict, optional):
Returns:
(int) Exit code 0 if no errors occurred
"""
check_arg(options, dict)
check_arg(extra_context, dict, or_none=True)
self.options.update(options)
if extra_context:
self.config_manager.update_config(extra_context)
context = self.config_manager.context
sessions = self.config_manager.sessions
count = int(sessions.get("count", 1))
if count > 1 and self.config_manager.config.get("force_single"):
logger.info("force_single: restricting sessions count to one.")
count = 1
# Construct a `User` with at least 'name', 'password', and optional
# custom attributes
user_list = []
for user_dict in sessions["users"]:
user = User(**user_dict)
user_list.append(user)
# We have N users and want `count` sessions: re-use round-robin
user_list = itertools.islice(itertools.cycle(user_list), 0, count)
user_list = list(user_list)
monitor = None
if self.options.get("monitor"):
monitor = MonitorServer(self)
monitor.start()
time.sleep(0.5)
monitor.open_browser()
self.start_stamp = time.monotonic()
self.start_dt = datetime.now()
self.end_dt = None
self.end_stamp = None
try:
try:
res = False
res = self.run_in_threads(user_list, context)
except KeyboardInterrupt:
# if not self.stop_request.is_set():
logger.warning("Caught Ctrl-C: terminating...")
self.stop()
finally:
self.end_dt = datetime.now()
self.end_stamp = time.monotonic()
if self.options.get("log_summary", True):
logger.important(self.get_cli_summary())
if monitor:
self.set_stage("waiting")
logger.important("Waiting for monitor... Press Ctrl+C to quit.")
self.stop_request.wait()
finally:
if monitor:
monitor.shutdown()
# print("RES", res, self.has_errors(), self.stats.format_result())
self.set_stage("stopped")
return res
def stop(self, graceful=2):
""""""
# logger.info("Stop request received")
# TODO: set errors += 1 if we interrupt a running stage
self.set_stage("stopping")
self.stop_request.set()
return True
def get_run_time(self):
return time.monotonic() - self.start_stamp
|
auto_rotate.py | #!/usr/bin/env python3
import sys, os, sh, threading, time
from tablet_mode_detection import pipe_switch_output, listen_for_switch_state
def find_monitor_name():
os.system("xrandr --listmonitors | grep '0:' > /tmp/showmonitor.txt")
with open("/tmp/showmonitor.txt", "rb") as f:
byts = 0
f.seek(-2, os.SEEK_END)
while f.read(1) != b' ':
byts += 1
f.seek(-2, os.SEEK_CUR)
return f.read(byts).decode("utf-8")
MONITOR_NAME = find_monitor_name()
STYLUS_NAME = "Wacom Serial Penabled Pen stylus" #This is your stylus device name. Use 'xsetwacom list devices'
#in the command line to display your touch devices. Change this
#variable as needed.
##rotate() method:
#Loops until tablet_mode, the mode-indicating-variable, is true. Once such case is met,
#it loops through, continuously opening and reading the output file that contains accelerometer
#coordinates from HDAPS. It uses these coordinates with a series of statements to determine which
#set of commands to invoke using sh. These commands, of course, rotate the x11 display along with
#the stylus input matrix according to the position of your laptop/tablet. The moment tablet_mode is
#found to be false, it breaks out of the subloop and goes back into the main loop, awaiting tablet_mode
#to be true again.
def rotate():
while True:
from tablet_mode_detection import tablet_mode
if tablet_mode:
print("[Tablet Mode] On")
while True:
from tablet_mode_detection import tablet_mode
with open('/sys/devices/platform/hdaps/position', 'r') as f:
coordinates = f.read()
x1 = abs(int(((coordinates.split("(")[1]).split(")")[0]).split(",")[0]))
y1 = abs(int(((coordinates.split("(")[1]).split(")")[0]).split(",")[1]))
if not tablet_mode:
print("[Tablet Mode] Off")
sh.xrandr("--output", MONITOR_NAME, "--rotate", "normal")
sh.xsetwacom("set", STYLUS_NAME, "Rotate", "none")
break
elif x1 < 350:
sh.xrandr("--output", MONITOR_NAME, "--rotate", "right")
sh.xsetwacom("set", STYLUS_NAME, "Rotate", "cw")
elif x1 > 620:
sh.xrandr("--output", MONITOR_NAME, "--rotate", "left")
sh.xsetwacom("set", STYLUS_NAME, "Rotate", "ccw")
elif y1 < 350:
sh.xrandr("--output", MONITOR_NAME, "--rotate", "inverted")
sh.xsetwacom("set", STYLUS_NAME, "Rotate", "half")
elif y1 > 620:
sh.xrandr("--output", MONITOR_NAME, "--rotate", "normal")
sh.xsetwacom("set", STYLUS_NAME, "Rotate", "none")
switch_output_thread = threading.Thread(target=pipe_switch_output)
switch_output_thread.start()
time.sleep(.5)
listen_switch_thread = threading.Thread(target=listen_for_switch_state)
listen_switch_thread.start()
print(f"Starting autorotation...\n==>Please confirm that the following monitor name\nmatches with the monitor name found in the command output that proceeds it\n(should be something like 'LVDS-1' or a similarly lengthed string in capital letters)\n<==\nMonitor Name: {MONITOR_NAME}\n[xrandr --listmonitors | grep '0:'] Output:")
os.system("xrandr --listmonitors | grep '0:'")
print(f"\n==>Please confirm that the following wacom device matches with the one that you wish to use from the output that proceeds it.\n<==\nWacom Device: {STYLUS_NAME}\n[xsetwacom list devices] Output:")
os.system("xsetwacom list devices")
print("\nNote: When you physically convert your laptop into tablet mode,\n'[Tablet Mode] On' should be echoed in the terminal/console.\nConversely, '[Tablet Mode] Off' should be echoed.")
rotate()
|
tasks.py | import os
import sys
import fcntl
import datetime
import json
import re
import time
import zipfile
import threading
import hashlib
import shutil
import subprocess
import pprint
import random
from invoke import task
import boto3
import botocore.exceptions
import multiprocessing
import io
import ai2thor.build
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s [%(process)d] %(funcName)s - %(levelname)s - %(message)s"
)
handler.setFormatter(formatter)
logger.addHandler(handler)
def add_files(zipf, start_dir, exclude_ext=()):
for root, dirs, files in os.walk(start_dir):
for f in files:
fn = os.path.join(root, f)
if any(map(lambda ext: fn.endswith(ext), exclude_ext)):
#print("skipping file %s" % fn)
continue
arcname = os.path.relpath(fn, start_dir)
if arcname.split("/")[0].endswith("_BackUpThisFolder_ButDontShipItWithYourGame"):
# print("skipping %s" % arcname)
continue
# print("adding %s" % arcname)
zipf.write(fn, arcname)
def push_build(build_archive_name, zip_data, include_private_scenes):
logger.info("start of push_build")
import boto3
from base64 import b64encode
# subprocess.run("ls %s" % build_archive_name, shell=True)
# subprocess.run("gsha256sum %s" % build_archive_name)
logger.info("boto3 resource")
s3 = boto3.resource("s3", region_name="us-west-2")
acl = "public-read"
bucket = ai2thor.build.PUBLIC_S3_BUCKET
if include_private_scenes:
bucket = ai2thor.build.PRIVATE_S3_BUCKET
acl = "private"
logger.info("archive base")
archive_base = os.path.basename(build_archive_name)
key = "builds/%s" % (archive_base,)
sha256_key = "builds/%s.sha256" % (os.path.splitext(archive_base)[0],)
logger.info("hashlib sha256")
sha = hashlib.sha256(zip_data)
try:
logger.info("pushing build %s" % (key,))
s3.Object(bucket, key).put(Body=zip_data, ACL=acl, ChecksumSHA256=b64encode(sha.digest()).decode('ascii'))
logger.info("pushing sha256 %s" % (sha256_key,))
s3.Object(bucket, sha256_key).put(
Body=sha.hexdigest(), ACL=acl, ContentType="text/plain"
)
except botocore.exceptions.ClientError as e:
logger.error("caught error uploading archive %s: %s" % (build_archive_name, e))
logger.info("pushed build %s to %s" % (bucket, build_archive_name))
def _webgl_local_build_path(prefix, source_dir="builds"):
return os.path.join(
os.getcwd(), "unity/{}/thor-{}-WebGL/".format(source_dir, prefix)
)
def _unity_version():
import yaml
with open("unity/ProjectSettings/ProjectVersion.txt") as pf:
project_version = yaml.load(pf.read(), Loader=yaml.FullLoader)
return project_version["m_EditorVersion"]
def _unity_path():
unity_version = _unity_version()
standalone_path = None
if sys.platform.startswith("darwin"):
unity_hub_path = (
"/Applications/Unity/Hub/Editor/{}/Unity.app/Contents/MacOS/Unity".format(
unity_version
)
)
standalone_path = (
"/Applications/Unity-{}/Unity.app/Contents/MacOS/Unity".format(
unity_version
)
)
elif "win" in sys.platform:
unity_hub_path = "C:/PROGRA~1/Unity/Hub/Editor/{}/Editor/Unity.exe".format(
unity_version
)
# TODO: Verify windows unity standalone path
standalone_path = "C:/PROGRA~1/{}/Editor/Unity.exe".format(unity_version)
elif sys.platform.startswith("linux"):
unity_hub_path = "{}/Unity/Hub/Editor/{}/Editor/Unity".format(
os.environ["HOME"], unity_version
)
if standalone_path and os.path.exists(standalone_path):
unity_path = standalone_path
else:
unity_path = unity_hub_path
return unity_path
def _build(unity_path, arch, build_dir, build_name, env={}):
import yaml
project_path = os.path.join(os.getcwd(), unity_path)
# osxintel64 is not a BuildTarget
build_target_map = dict(OSXIntel64="OSXUniversal")
# -buildTarget must be passed as an option for the CloudRendering target otherwise a clang error
# will get thrown complaining about missing features.h
command = (
"%s -quit -batchmode -logFile %s/%s.log -projectpath %s -buildTarget %s -executeMethod Build.%s"
% (_unity_path(), os.getcwd(), build_name, project_path, build_target_map.get(arch, arch), arch)
)
target_path = os.path.join(build_dir, build_name)
full_env = os.environ.copy()
full_env.update(env)
full_env["UNITY_BUILD_NAME"] = target_path
result_code = subprocess.check_call(command, shell=True, env=full_env)
print("Exited with code {}".format(result_code))
success = result_code == 0
if success:
generate_build_metadata(os.path.join(project_path, build_dir, "metadata.json"))
return success
def generate_build_metadata(metadata_path):
# this server_types metadata is maintained
# to allow future versions of the Python API
# to launch older versions of the Unity build
# and know whether the Fifo server is available
server_types = ["WSGI"]
try:
import ai2thor.fifo_server
server_types.append("FIFO")
except Exception as e:
pass
with open(os.path.join(metadata_path), "w") as f:
f.write(json.dumps(dict(server_types=server_types)))
def class_dataset_images_for_scene(scene_name):
import ai2thor.controller
from itertools import product
from collections import defaultdict
import numpy as np
import cv2
env = ai2thor.controller.Controller(quality="Low")
player_size = 300
zoom_size = 1000
target_size = 256
rotations = [0, 90, 180, 270]
horizons = [330, 0, 30]
buffer = 15
# object must be at least 40% in view
min_size = ((target_size * 0.4) / zoom_size) * player_size
env.start(width=player_size, height=player_size)
env.reset(scene_name)
event = env.step(
dict(
action="Initialize",
gridSize=0.25,
renderInstanceSegmentation=True,
renderSemanticSegmentation=False,
renderImage=False,
)
)
for o in event.metadata["objects"]:
if o["receptacle"] and o["receptacleObjectIds"] and o["openable"]:
print("opening %s" % o["objectId"])
env.step(
dict(action="OpenObject", objectId=o["objectId"], forceAction=True)
)
event = env.step(dict(action="GetReachablePositions", gridSize=0.25))
visible_object_locations = []
for point in event.metadata["actionReturn"]:
for rot, hor in product(rotations, horizons):
exclude_colors = set(
map(tuple, np.unique(event.instance_segmentation_frame[0], axis=0))
)
exclude_colors.update(
set(
map(
tuple,
np.unique(event.instance_segmentation_frame[:, -1, :], axis=0),
)
)
)
exclude_colors.update(
set(
map(tuple, np.unique(event.instance_segmentation_frame[-1], axis=0))
)
)
exclude_colors.update(
set(
map(
tuple,
np.unique(event.instance_segmentation_frame[:, 0, :], axis=0),
)
)
)
event = env.step(
dict(
action="TeleportFull",
x=point["x"],
y=point["y"],
z=point["z"],
rotation=rot,
horizon=hor,
forceAction=True,
),
raise_for_failure=True,
)
visible_objects = []
for o in event.metadata["objects"]:
if o["visible"] and o["objectId"] and o["pickupable"]:
color = event.object_id_to_color[o["objectId"]]
mask = (
(event.instance_segmentation_frame[:, :, 0] == color[0])
& (event.instance_segmentation_frame[:, :, 1] == color[1])
& (event.instance_segmentation_frame[:, :, 2] == color[2])
)
points = np.argwhere(mask)
if len(points) > 0:
min_y = int(np.min(points[:, 0]))
max_y = int(np.max(points[:, 0]))
min_x = int(np.min(points[:, 1]))
max_x = int(np.max(points[:, 1]))
max_dim = max((max_y - min_y), (max_x - min_x))
if (
max_dim > min_size
and min_y > buffer
and min_x > buffer
and max_x < (player_size - buffer)
and max_y < (player_size - buffer)
):
visible_objects.append(
dict(
objectId=o["objectId"],
min_x=min_x,
min_y=min_y,
max_x=max_x,
max_y=max_y,
)
)
print(
"[%s] including object id %s %s"
% (scene_name, o["objectId"], max_dim)
)
if visible_objects:
visible_object_locations.append(
dict(point=point, rot=rot, hor=hor, visible_objects=visible_objects)
)
env.stop()
env = ai2thor.controller.Controller()
env.start(width=zoom_size, height=zoom_size)
env.reset(scene_name)
event = env.step(dict(action="Initialize", gridSize=0.25))
for o in event.metadata["objects"]:
if o["receptacle"] and o["receptacleObjectIds"] and o["openable"]:
print("opening %s" % o["objectId"])
env.step(
dict(action="OpenObject", objectId=o["objectId"], forceAction=True)
)
for vol in visible_object_locations:
point = vol["point"]
event = env.step(
dict(
action="TeleportFull",
x=point["x"],
y=point["y"],
z=point["z"],
rotation=vol["rot"],
horizon=vol["hor"],
forceAction=True,
),
raise_for_failure=True,
)
for v in vol["visible_objects"]:
object_id = v["objectId"]
min_y = int(round(v["min_y"] * (zoom_size / player_size)))
max_y = int(round(v["max_y"] * (zoom_size / player_size)))
max_x = int(round(v["max_x"] * (zoom_size / player_size)))
min_x = int(round(v["min_x"] * (zoom_size / player_size)))
delta_y = max_y - min_y
delta_x = max_x - min_x
scaled_target_size = max(delta_x, delta_y, target_size) + buffer * 2
if min_x > (zoom_size - max_x):
start_x = min_x - (scaled_target_size - delta_x)
end_x = max_x + buffer
else:
end_x = max_x + (scaled_target_size - delta_x)
start_x = min_x - buffer
if min_y > (zoom_size - max_y):
start_y = min_y - (scaled_target_size - delta_y)
end_y = max_y + buffer
else:
end_y = max_y + (scaled_target_size - delta_y)
start_y = min_y - buffer
# print("max x %s max y %s min x %s min y %s" % (max_x, max_y, min_x, min_y))
# print("start x %s start_y %s end_x %s end y %s" % (start_x, start_y, end_x, end_y))
print("storing %s " % object_id)
img = event.cv2img[start_y:end_y, start_x:end_x, :]
dst = cv2.resize(
img, (target_size, target_size), interpolation=cv2.INTER_LANCZOS4
)
object_type = object_id.split("|")[0].lower()
target_dir = os.path.join("images", scene_name, object_type)
h = hashlib.md5()
h.update(json.dumps(point, sort_keys=True).encode("utf8"))
h.update(json.dumps(v, sort_keys=True).encode("utf8"))
os.makedirs(target_dir, exist_ok=True)
cv2.imwrite(os.path.join(target_dir, h.hexdigest() + ".png"), dst)
env.stop()
return scene_name
@task
def build_class_dataset(context):
import concurrent.futures
import ai2thor.controller
multiprocessing.set_start_method("spawn")
controller = ai2thor.controller.Controller()
executor = concurrent.futures.ProcessPoolExecutor(max_workers=4)
futures = []
for scene in controller.scene_names():
print("processing scene %s" % scene)
futures.append(executor.submit(class_dataset_images_for_scene, scene))
for f in concurrent.futures.as_completed(futures):
scene = f.result()
print("scene name complete: %s" % scene)
def local_build_name(prefix, arch):
return "thor-%s-%s" % (prefix, arch)
@task
def local_build_test(context, prefix="local", arch="OSXIntel64"):
from ai2thor.tests.constants import TEST_SCENE
local_build(context, prefix, arch, [TEST_SCENE])
@task(iterable=["scenes"])
def local_build(
context, prefix="local", arch="OSXIntel64", scenes=None, scripts_only=False
):
import ai2thor.controller
build = ai2thor.build.Build(arch, prefix, False)
env = dict()
if os.path.isdir("unity/Assets/Private/Scenes"):
env["INCLUDE_PRIVATE_SCENES"] = "true"
build_dir = os.path.join("builds", build.name)
if scripts_only:
env["BUILD_SCRIPTS_ONLY"] = "true"
if scenes:
env["BUILD_SCENES"] = ",".join(
map(ai2thor.controller.Controller.normalize_scene, scenes)
)
if _build("unity", arch, build_dir, build.name, env=env):
print("Build Successful")
else:
print("Build Failure")
generate_quality_settings(context)
@task
def webgl_build(
context,
scenes="",
room_ranges=None,
directory="builds",
prefix="local",
verbose=False,
content_addressable=False,
crowdsource_build=False,
):
"""
Creates a WebGL build
:param context:
:param scenes: String of scenes to include in the build as a comma separated list
:param prefix: Prefix name for the build
:param content_addressable: Whether to change the unityweb build files to be content-addressable
have their content hashes as part of their names.
:return:
"""
from functools import reduce
def file_to_content_addressable(file_path):
# name_split = os.path.splitext(file_path)
path_split = os.path.split(file_path)
directory = path_split[0]
file_name = path_split[1]
print("File name {} ".format(file_name))
with open(file_path, "rb") as f:
h = hashlib.md5()
h.update(f.read())
md5_id = h.hexdigest()
new_file_name = "{}_{}".format(md5_id, file_name)
os.rename(file_path, os.path.join(directory, new_file_name))
arch = "WebGL"
build_name = local_build_name(prefix, arch)
if room_ranges is not None:
floor_plans = [
"FloorPlan{}_physics".format(i)
for i in reduce(
lambda x, y: x + y,
map(
lambda x: x + [x[-1] + 1],
[
list(range(*tuple(int(y) for y in x.split("-"))))
for x in room_ranges.split(",")
],
),
)
]
scenes = ",".join(floor_plans)
if verbose:
print(scenes)
env = dict(BUILD_SCENES=scenes)
# https://forum.unity.com/threads/cannot-build-for-webgl-in-unity-system-dllnotfoundexception.1254429/
# without setting this environment variable the error mentioned in the thread will get thrown
os.environ["EMSDK_PYTHON"] = "/usr/bin/python3"
if crowdsource_build:
env["DEFINES"] = "CROWDSOURCE_TASK"
if _build("unity", arch, directory, build_name, env=env):
print("Build Successful")
else:
print("Build Failure")
build_path = _webgl_local_build_path(prefix, directory)
generate_quality_settings(context)
# the remainder of this is only used to generate scene metadata, but it
# is not part of building webgl player
rooms = {
"kitchens": {"name": "Kitchens", "roomRanges": range(1, 31)},
"livingRooms": {"name": "Living Rooms", "roomRanges": range(201, 231)},
"bedrooms": {"name": "Bedrooms", "roomRanges": range(301, 331)},
"bathrooms": {"name": "Bathrooms", "roomRanges": range(401, 431)},
"foyers": {"name": "Foyers", "roomRanges": range(501, 531)},
}
room_type_by_id = {}
for room_type, room_data in rooms.items():
for room_num in room_data["roomRanges"]:
room_id = "FloorPlan{}_physics".format(room_num)
room_type_by_id[room_id] = {"type": room_type, "name": room_data["name"]}
scene_metadata = {}
for scene_name in scenes.split(","):
if scene_name not in room_type_by_id:
# allows for arbitrary scenes to be included dynamically
room_type = {"type": "Other", "name": None}
else:
room_type = room_type_by_id[scene_name]
if room_type["type"] not in scene_metadata:
scene_metadata[room_type["type"]] = {
"scenes": [],
"name": room_type["name"],
}
scene_metadata[room_type["type"]]["scenes"].append(scene_name)
if verbose:
print(scene_metadata)
to_content_addressable = [
("{}.data".format(build_name), "dataUrl"),
("{}.loader.js".format(build_name), "loaderUrl"),
("{}.wasm".format(build_name), "wasmCodeUrl"),
("{}.framework.js".format(build_name), "wasmFrameworkUrl"),
]
if content_addressable:
for file_name, key in to_content_addressable:
file_to_content_addressable(
os.path.join(build_path, "Build/{}".format(file_name)),
)
with open(os.path.join(build_path, "scenes.json"), "w") as f:
f.write(json.dumps(scene_metadata, sort_keys=False, indent=4))
@task
def generate_quality_settings(ctx):
import yaml
class YamlUnity3dTag(yaml.SafeLoader):
def let_through(self, node):
return self.construct_mapping(node)
YamlUnity3dTag.add_constructor(
"tag:unity3d.com,2011:47", YamlUnity3dTag.let_through
)
qs = yaml.load(
open("unity/ProjectSettings/QualitySettings.asset").read(),
Loader=YamlUnity3dTag,
)
quality_settings = {}
default = "Ultra"
for i, q in enumerate(qs["QualitySettings"]["m_QualitySettings"]):
quality_settings[q["name"]] = i
assert default in quality_settings
with open("ai2thor/_quality_settings.py", "w") as f:
f.write("# GENERATED FILE - DO NOT EDIT\n")
f.write("DEFAULT_QUALITY = '%s'\n" % default)
f.write("QUALITY_SETTINGS = " + pprint.pformat(quality_settings))
def git_commit_comment():
comment = (
subprocess.check_output("git log -n 1 --format=%B", shell=True)
.decode("utf8")
.strip()
)
return comment
def git_commit_id():
commit_id = (
subprocess.check_output("git log -n 1 --format=%H", shell=True)
.decode("ascii")
.strip()
)
return commit_id
@task
def deploy_pip(context):
if "TWINE_PASSWORD" not in os.environ:
raise Exception("Twine token not specified in environment")
subprocess.check_call("twine upload -u __token__ dist/*", shell=True)
@task
def push_pip_commit(context):
import glob
commit_id = git_commit_id()
s3 = boto3.resource("s3")
for g in glob.glob("dist/ai2thor-0+%s*" % commit_id):
acl = "public-read"
pip_name = os.path.basename(g)
logger.info("pushing pip file %s" % g)
with open(g, "rb") as f:
s3.Object(
ai2thor.build.PYPI_S3_BUCKET, os.path.join("ai2thor", pip_name)
).put(Body=f, ACL=acl)
@task
def build_pip_commit(context):
commit_id = git_commit_id()
if os.path.isdir("dist"):
shutil.rmtree("dist")
generate_quality_settings(context)
# must use this form to create valid PEP440 version specifier
version = "0+" + commit_id
with open("ai2thor/_builds.py", "w") as fi:
fi.write("# GENERATED FILE - DO NOT EDIT\n")
fi.write("COMMIT_ID = '%s'\n" % commit_id)
with open("ai2thor/_version.py", "w") as fi:
fi.write("# Copyright Allen Institute for Artificial Intelligence 2021\n")
fi.write("# GENERATED FILE - DO NOT EDIT\n")
fi.write("__version__ = '%s'\n" % (version))
subprocess.check_call("python setup.py clean --all", shell=True)
subprocess.check_call("python setup.py sdist bdist_wheel --universal", shell=True)
@task
def build_pip(context, version):
import xml.etree.ElementTree as ET
import requests
res = requests.get("https://pypi.org/rss/project/ai2thor/releases.xml")
res.raise_for_status()
root = ET.fromstring(res.content)
latest_version = None
for title in root.findall("./channel/item/title"):
latest_version = title.text
break
# make sure that the tag is on this commit
commit_tags = (
subprocess.check_output("git tag --points-at", shell=True)
.decode("ascii")
.strip()
.split("\n")
)
if version not in commit_tags:
raise Exception("tag %s is not on current commit" % version)
commit_id = git_commit_id()
res = requests.get("https://api.github.com/repos/allenai/ai2thor/commits?sha=main")
res.raise_for_status()
if commit_id not in map(lambda c: c["sha"], res.json()):
raise Exception("tag %s is not off the main branch" % version)
if not re.match(r"^[0-9]{1,3}\.+[0-9]{1,3}\.[0-9]{1,3}$", version):
raise Exception("invalid version: %s" % version)
for plat in ai2thor.build.AUTO_BUILD_PLATFORMS:
commit_build = ai2thor.build.Build(plat, commit_id, False)
if not commit_build.exists():
raise Exception("Build does not exist for %s/%s" % (commit_id, plat.name()))
current_maj, current_min, current_sub = list(map(int, latest_version.split(".")))
next_maj, next_min, next_sub = list(map(int, version.split(".")))
if (
(next_maj == current_maj + 1)
or (next_maj == current_maj and next_min == current_min + 1)
or (
next_maj == current_maj
and next_min == current_min
and next_sub >= current_sub + 1
)
):
if os.path.isdir("dist"):
shutil.rmtree("dist")
generate_quality_settings(context)
with open("ai2thor/_builds.py", "w") as fi:
fi.write("# GENERATED FILE - DO NOT EDIT\n")
fi.write("COMMIT_ID = '%s'\n" % commit_id)
with open("ai2thor/_version.py", "w") as fi:
fi.write("# Copyright Allen Institute for Artificial Intelligence 2021\n")
fi.write("# GENERATED FILE - DO NOT EDIT\n")
fi.write("__version__ = '%s'\n" % (version))
subprocess.check_call("python setup.py clean --all", shell=True)
subprocess.check_call(
"python setup.py sdist bdist_wheel --universal", shell=True
)
else:
raise Exception(
"Invalid version increment: new version=%s,current version=%s; must increment the major, minor or patch by only 1"
% (version, latest_version)
)
@task
def fetch_source_textures(context):
import ai2thor.downloader
zip_data = ai2thor.downloader.download(
"http://s3-us-west-2.amazonaws.com/ai2-thor/assets/source-textures.zip",
"source-textures",
"75476d60a05747873f1173ba2e1dbe3686500f63bcde3fc3b010eea45fa58de7",
)
z = zipfile.ZipFile(io.BytesIO(zip_data))
z.extractall(os.getcwd())
def build_log_push(build_info, include_private_scenes):
with open(build_info["log"]) as f:
build_log = f.read() + "\n" + build_info.get("build_exception", "")
build_log_key = "builds/" + build_info["log"]
s3 = boto3.resource("s3")
bucket = ai2thor.build.PUBLIC_S3_BUCKET
acl = "public-read"
if include_private_scenes:
bucket = ai2thor.build.PRIVATE_S3_BUCKET
acl = "private"
s3.Object(bucket, build_log_key).put(
Body=build_log, ACL=acl, ContentType="text/plain"
)
def archive_push(unity_path, build_path, build_dir, build_info, include_private_scenes):
threading.current_thread().success = False
archive_name = os.path.join(unity_path, build_path)
zip_buf = io.BytesIO()
# Unity build is done with CompressWithLz4. Zip with compresslevel=1
# results in smaller builds than Uncompressed Unity + zip comprseslevel=6 (default)
logger.info("building zip archive %s %s" % (archive_name, os.path.join(unity_path, build_dir)))
zipf = zipfile.ZipFile(zip_buf, "w", zipfile.ZIP_DEFLATED, compresslevel=1)
add_files(zipf, os.path.join(unity_path, build_dir), exclude_ext=('.debug',))
zipf.close()
zip_buf.seek(0)
zip_data = zip_buf.read()
logger.info("generated zip archive %s %s" % (archive_name, len(zip_data)))
push_build(archive_name, zip_data, include_private_scenes)
build_log_push(build_info, include_private_scenes)
print("Build successful")
threading.current_thread().success = True
@task
def pre_test(context):
import ai2thor.controller
c = ai2thor.controller.Controller()
os.makedirs("unity/builds/%s" % c.build_name())
shutil.move(
os.path.join("unity", "builds", c.build_name() + ".app"),
"unity/builds/%s" % c.build_name(),
)
def clean():
import scripts.update_private
# a deploy key is used on the build server and an .ssh/config entry has been added
# to point to the deploy key caclled ai2thor-private-github
scripts.update_private.private_repo_url = (
"git@ai2thor-private-github:allenai/ai2thor-private.git"
)
subprocess.check_call("git reset --hard", shell=True)
subprocess.check_call("git clean -f -d -x", shell=True)
shutil.rmtree("unity/builds", ignore_errors=True)
shutil.rmtree(scripts.update_private.private_dir, ignore_errors=True)
scripts.update_private.checkout_branch()
def ci_prune_cache(cache_dir):
entries = {}
for e in os.scandir(cache_dir):
if os.path.isdir(e.path):
mtime = os.stat(e.path).st_mtime
entries[e.path] = mtime
# keeping the most recent 60 entries (this keeps the cache around 300GB-500GB)
sorted_paths = sorted(entries.keys(), key=lambda x: entries[x])[:-60]
for path in sorted_paths:
if os.path.basename(path) != "main":
logger.info("pruning cache directory: %s" % path)
shutil.rmtree(path)
def link_build_cache(root_dir, arch, branch):
library_path = os.path.join(root_dir, "unity", "Library")
logger.info("linking build cache for %s" % branch)
if os.path.lexists(library_path):
os.unlink(library_path)
# this takes takes care of branches with '/' in it
# to avoid implicitly creating directories under the cache dir
encoded_branch = re.sub(r"[^a-zA-Z0-9_\-.]", "_", re.sub("_", "__", branch))
cache_base_dir = os.path.join(os.environ["HOME"], "cache")
os.makedirs(cache_base_dir, exist_ok=True)
ci_prune_cache(cache_base_dir)
main_cache_dir = os.path.join(cache_base_dir, "main", arch)
branch_cache_dir = os.path.join(cache_base_dir, encoded_branch, arch)
# use the main cache as a starting point to avoid
# having to re-import all assets, which can take up to 1 hour
if not os.path.exists(branch_cache_dir) and os.path.exists(main_cache_dir):
logger.info("copying main cache for %s" % encoded_branch)
os.makedirs(os.path.dirname(branch_cache_dir), exist_ok=True)
# -c uses MacOS clonefile
subprocess.check_call(
"cp -a -c %s %s" % (main_cache_dir, branch_cache_dir), shell=True
)
logger.info("copying main cache complete for %s" % encoded_branch)
branch_library_cache_dir = os.path.join(branch_cache_dir, "Library")
os.makedirs(branch_library_cache_dir, exist_ok=True)
os.symlink(branch_library_cache_dir, library_path)
# update atime/mtime to simplify cache pruning
os.utime(os.path.join(cache_base_dir, encoded_branch))
def travis_build(build_id):
import requests
res = requests.get(
"https://api.travis-ci.com/build/%s" % build_id,
headers={
"Accept": "application/json",
"Content-Type": "application/json",
"Travis-API-Version": "3",
},
)
res.raise_for_status()
return res.json()
def pending_travis_build():
import requests
res = requests.get(
"https://api.travis-ci.com/repo/3459357/builds?include=build.id%2Cbuild.commit%2Cbuild.branch%2Cbuild.request%2Cbuild.created_by%2Cbuild.repository&build.state=started&sort_by=started_at:desc",
headers={
"Accept": "application/json",
"Content-Type": "application/json",
"Travis-API-Version": "3",
},
timeout=10,
)
for b in res.json()["builds"]:
tag = None
if b["tag"]:
tag = b["tag"]["name"]
return {
"branch": b["branch"]["name"],
"commit_id": b["commit"]["sha"],
"tag": tag,
"id": b["id"],
}
def pytest_s3_object(commit_id):
s3 = boto3.resource("s3")
pytest_key = "builds/pytest-%s.json" % commit_id
return s3.Object(ai2thor.build.PUBLIC_S3_BUCKET, pytest_key)
@task
def ci_merge_push_pytest_results(context, commit_id):
s3_obj = pytest_s3_object(commit_id)
s3_pytest_url = "http://s3-us-west-2.amazonaws.com/%s/%s" % (
s3_obj.bucket_name,
s3_obj.key,
)
logger.info("pytest url %s" % s3_pytest_url)
merged_result = dict(success=True, stdout="", stderr="")
result_files = ["tmp/pytest_results.json", "tmp/test_utf_results.json"]
for rf in result_files:
with open(rf) as f:
result = json.loads(f.read())
merged_result["success"] &= result["success"]
merged_result["stdout"] += result["stdout"] + "\n"
merged_result["stderr"] += result["stderr"] + "\n"
s3_obj.put(
Body=json.dumps(merged_result), ACL="public-read", ContentType="application/json"
)
def ci_pytest(branch, commit_id):
import requests
logger.info("running pytest for %s %s" % (branch, commit_id))
proc = subprocess.run(
"pytest", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
result = dict(
success=proc.returncode == 0,
stdout=proc.stdout.decode("ascii"),
stderr=proc.stderr.decode("ascii"),
)
with open("tmp/pytest_results.json", "w") as f:
f.write(json.dumps(result))
logger.info("finished pytest for %s %s" % (branch, commit_id))
@task
def ci_build(context):
lock_f = open(os.path.join(os.environ["HOME"], ".ci-build.lock"), "w")
arch_temp_dirs = dict()
try:
fcntl.flock(lock_f, fcntl.LOCK_EX | fcntl.LOCK_NB)
build = pending_travis_build()
skip_branches = ["vids", "video", "erick/cloudrendering"]
if build and build["branch"] not in skip_branches:
# disabling delete temporarily since it interferes with pip releases
# pytest_s3_object(build["commit_id"]).delete()
logger.info(
"pending build for %s %s" % (build["branch"], build["commit_id"])
)
clean()
subprocess.check_call("git fetch", shell=True)
subprocess.check_call("git checkout %s --" % build["branch"], shell=True)
subprocess.check_call("git checkout -qf %s" % build["commit_id"], shell=True)
private_scene_options = [False]
procs = []
build_archs = ["OSXIntel64", "Linux64"]
# CloudRendering only supported with 2020.3.25
# should change this in the future to automatically install
# cloudrendering engine if available
if _unity_version() == "2020.3.25f1":
build_archs.append("CloudRendering")
for include_private_scenes in private_scene_options:
for arch in build_archs:
logger.info(
"starting build for %s %s %s"
% (arch, build["branch"], build["commit_id"])
)
temp_dir = arch_temp_dirs[arch] = os.path.join(os.environ["HOME"], "tmp/unity-%s-%s-%s-%s" % (arch, build["commit_id"], os.getpid(), random.randint(0, 2**32 - 1)))
os.makedirs(temp_dir)
logger.info( "copying unity data to %s" % (temp_dir,))
# -c uses MacOS clonefile
subprocess.check_call("cp -a -c unity %s" % temp_dir, shell=True)
logger.info( "completed unity data copy to %s" % (temp_dir,))
rdir = os.path.join(temp_dir, "unity/builds")
commit_build = ai2thor.build.Build(
arch,
build["commit_id"],
include_private_scenes=include_private_scenes,
releases_dir=rdir,
)
if commit_build.exists():
logger.info(
"found build for commit %s %s" % (build["commit_id"], arch)
)
# download the build so that we can run the tests
if arch == "OSXIntel64":
commit_build.download()
else:
# this is done here so that when a tag build request arrives and the commit_id has already
# been built, we avoid bootstrapping the cache since we short circuited on the line above
link_build_cache(temp_dir, arch, build["branch"])
# ci_build_arch(temp_dir, arch, build["commit_id"], include_private_scenes)
p = multiprocessing.Process(target=ci_build_arch, args=(temp_dir, arch, build["commit_id"], include_private_scenes,))
active_procs = lambda x: sum([p.is_alive() for p in x])
started = False
for _ in range(200):
if active_procs(procs) > 0:
logger.info("too many active procs - waiting before start %s " % arch)
time.sleep(15)
continue
else:
logger.info("starting build process for %s " % arch)
started = True
p.start()
# wait for Unity to start so that it can pick up the GICache config
# changes
time.sleep(30)
procs.append(p)
break
if not started:
logger.error("could not start build for %s" % arch)
# the UnityLockfile is used as a trigger to indicate that Unity has closed
# the project and we can run the unit tests
# waiting for all builds to complete before starting tests
for arch in build_archs:
lock_file_path = os.path.join(arch_temp_dirs[arch], "unity/Temp/UnityLockfile")
if os.path.isfile(lock_file_path):
logger.info("attempting to lock %s" % lock_file_path)
lock_file = os.open(lock_file_path, os.O_RDWR)
fcntl.lockf(lock_file, fcntl.LOCK_EX)
fcntl.lockf(lock_file, fcntl.LOCK_UN)
os.close(lock_file)
logger.info("obtained lock on %s" % lock_file_path)
# don't run tests for a tag since results should exist
# for the branch commit
if build["tag"] is None:
# its possible that the cache doesn't get linked if the builds
# succeeded during an earlier run
link_build_cache(arch_temp_dirs["OSXIntel64"], "OSXIntel64", build["branch"])
# link builds directory so pytest can run
logger.info("current directory pre-symlink %s" % os.getcwd())
os.symlink(os.path.join(arch_temp_dirs["OSXIntel64"], "unity/builds"), "unity/builds")
os.makedirs('tmp', exist_ok=True)
# using threading here instead of multiprocessing since we must use the start_method of spawn, which
# causes the tasks.py to get reloaded, which may be different on a branch from main
utf_proc = threading.Thread(target=ci_test_utf, args=(build["branch"], build["commit_id"], arch_temp_dirs["OSXIntel64"]))
utf_proc.start()
procs.append(utf_proc)
pytest_proc = threading.Thread(target=ci_pytest, args=(build["branch"], build["commit_id"]))
pytest_proc.start()
procs.append(pytest_proc)
## allow webgl to be force deployed with #webgl-deploy in the commit comment
if (
build["branch"] in ["main", "demo-updates"]
and "#webgl-deploy" in git_commit_comment()
):
ci_build_webgl(context, build["commit_id"])
for p in procs:
if p:
logger.info(
"joining proc %s for %s %s"
% (p, build["branch"], build["commit_id"])
)
p.join()
if build["tag"] is None:
ci_merge_push_pytest_results(context, build["commit_id"])
# must have this after all the procs are joined
# to avoid generating a _builds.py file that would affect pytest execution
build_pip_commit(context)
push_pip_commit(context)
generate_pypi_index(context)
# give the travis poller time to see the result
for i in range(12):
b = travis_build(build["id"])
logger.info("build state for %s: %s" % (build["id"], b["state"]))
if b["state"] != "started":
break
time.sleep(10)
logger.info("build complete %s %s" % (build["branch"], build["commit_id"]))
fcntl.flock(lock_f, fcntl.LOCK_UN)
except io.BlockingIOError as e:
pass
finally:
for arch, temp_dir in arch_temp_dirs.items():
logger.info("deleting temp dir %s" % temp_dir)
shutil.rmtree(temp_dir)
lock_f.close()
@task
def install_cloudrendering_engine(context, force=False):
if not sys.platform.startswith("darwin"):
raise Exception("CloudRendering Engine can only be installed on Mac")
s3 = boto3.resource("s3")
target_base_dir = "/Applications/Unity/Hub/Editor/{}/PlaybackEngines".format(_unity_version())
full_dir = os.path.join(target_base_dir, "CloudRendering")
if os.path.isdir(full_dir):
if force:
shutil.rmtree(full_dir)
logger.info("CloudRendering engine already installed - removing due to force")
else:
logger.info("skipping installation - CloudRendering engine already installed")
return
print("packages/CloudRendering-%s.zip" % _unity_version())
res = s3.Object(ai2thor.build.PRIVATE_S3_BUCKET, "packages/CloudRendering-%s.zip" % _unity_version()).get()
data = res["Body"].read()
z = zipfile.ZipFile(io.BytesIO(data))
z.extractall(target_base_dir)
@task
def ci_build_webgl(context, commit_id):
branch = "main"
logger.info("starting auto-build webgl build deploy %s %s" % (branch, commit_id))
# linking here in the event we didn't link above since the builds had
# already completed. Omitting this will cause the webgl build
# to import all assets from scratch into a new unity/Library
arch = "WebGL"
set_gi_cache_folder(arch)
link_build_cache(os.getcwd(), arch, branch)
webgl_build_deploy_demo(context, verbose=True, content_addressable=False, force=True)
logger.info("finished webgl build deploy %s %s" % (branch, commit_id))
update_webgl_autodeploy_commit_id(commit_id)
def set_gi_cache_folder(arch):
gi_cache_folder = os.path.join(os.environ["HOME"], "GICache/%s" % arch)
plist_path = os.path.join(os.environ["HOME"], "Library/Preferences/com.unity3d.UnityEditor5.x.plist")
# done to avoid race conditions when modifying GICache from more than one build
subprocess.check_call("plutil -replace GICacheEnableCustomPath -bool TRUE %s" % plist_path, shell=True)
subprocess.check_call("plutil -replace GICacheFolder -string '%s' %s" % (gi_cache_folder, plist_path), shell=True)
subprocess.check_call("plutil -replace GICacheMaximumSizeGB -integer 100 %s" % (plist_path,), shell=True)
def ci_build_arch(root_dir, arch, commit_id, include_private_scenes=False):
os.chdir(root_dir)
unity_path = "unity"
build_name = ai2thor.build.build_name(arch, commit_id, include_private_scenes)
build_dir = os.path.join("builds", build_name)
build_path = build_dir + ".zip"
build_info = {}
proc = None
try:
build_info["log"] = "%s.log" % (build_name,)
env = {}
if include_private_scenes:
env["INCLUDE_PRIVATE_SCENES"] = "true"
set_gi_cache_folder(arch)
_build(unity_path, arch, build_dir, build_name, env)
logger.info("finished build for %s %s" % (arch, commit_id))
archive_push(unity_path, build_path, build_dir, build_info, include_private_scenes)
except Exception as e:
print("Caught exception %s" % e)
build_info["build_exception"] = "Exception building: %s" % e
build_log_push(build_info, include_private_scenes)
@task
def poll_ci_build(context):
import requests.exceptions
import requests
import datetime
commit_id = git_commit_id()
start_datetime = datetime.datetime.utcnow()
last_emit_time = 0
for i in range(360):
log_exist_count = 0
# must emit something at least once every 10 minutes
# otherwise travis will time out the build
if (time.time() - last_emit_time) > 120:
print(".", end="")
last_emit_time = time.time()
check_platforms = ai2thor.build.AUTO_BUILD_PLATFORMS
for plat in check_platforms:
commit_build = ai2thor.build.Build(plat, commit_id, False)
try:
res = requests.head(commit_build.log_url)
if res.status_code == 200:
last_modified = datetime.datetime.strptime(res.headers['Last-Modified'], '%a, %d %b %Y %H:%M:%S GMT')
# if a build is restarted, a log from a previous build will exist
# but its last-modified date will precede the start datetime
if last_modified > start_datetime or commit_build.exists():
log_exist_count += 1
# we observe errors when polling AWS periodically - we don't want these to stop
# the build
except requests.exceptions.ConnectionError as e:
print("Caught exception %s" % e)
if log_exist_count == len(check_platforms):
break
sys.stdout.flush()
time.sleep(10)
for plat in ai2thor.build.AUTO_BUILD_PLATFORMS:
commit_build = ai2thor.build.Build(plat, commit_id, False)
if not commit_build.exists():
print("Build log url: %s" % commit_build.log_url)
raise Exception("Failed to build %s for commit: %s " % (plat.name(), commit_id))
pytest_missing = True
for i in range(30):
if (time.time() - last_emit_time) > 120:
print(".", end="")
last_emit_time = time.time()
s3_obj = pytest_s3_object(commit_id)
s3_pytest_url = "http://s3-us-west-2.amazonaws.com/%s/%s" % (
s3_obj.bucket_name,
s3_obj.key,
)
res = requests.get(s3_pytest_url)
if res.status_code == 200:
print("pytest url %s" % s3_pytest_url)
pytest_missing = False
pytest_result = res.json()
print(pytest_result["stdout"]) # print so that it appears in travis log
print(pytest_result["stderr"])
if not pytest_result["success"]:
raise Exception("pytest failure")
break
time.sleep(10)
if pytest_missing:
raise Exception("Missing pytest output")
@task
def build(context, local=False):
version = datetime.datetime.now().strftime("%Y%m%d%H%M")
builds = {"Docker": {"tag": version}}
threads = []
for include_private_scenes in (True, False):
for plat in ai2thor.build.AUTO_BUILD_PLATFORMS:
env = {}
if include_private_scenes:
env["INCLUDE_PRIVATE_SCENES"] = "true"
unity_path = "unity"
build_name = ai2thor.build.build_name(plat.name(), version, include_private_scenes)
build_dir = os.path.join("builds", build_name)
build_path = build_dir + ".zip"
build_info = builds[plat.name()] = {}
build_info["log"] = "%s.log" % (build_name,)
_build(unity_path, plat.name(), build_dir, build_name, env=env)
t = threading.Thread(
target=archive_push,
args=(
unity_path,
build_path,
build_dir,
build_info,
include_private_scenes,
),
)
t.start()
threads.append(t)
# dp.join()
# if dp.exitcode != 0:
# raise Exception("Exception with docker build")
for t in threads:
t.join()
if not t.success:
raise Exception("Error with thread")
generate_quality_settings(context)
@task
def interact(
ctx,
scene,
editor_mode=False,
local_build=False,
image=False,
depth_image=False,
class_image=False,
object_image=False,
metadata=False,
robot=False,
port=8200,
host="127.0.0.1",
image_directory=".",
width=300,
height=300,
include_private_scenes=False,
noise=False,
):
import ai2thor.controller
import ai2thor.robot_controller
if image_directory != ".":
if os.path.exists(image_directory):
shutil.rmtree(image_directory)
os.makedirs(image_directory)
if not robot:
env = ai2thor.controller.Controller(
host=host,
port=port,
width=width,
height=height,
local_build=local_build,
image_dir=image_directory,
start_unity=False if editor_mode else True,
save_image_per_frame=True,
include_private_scenes=include_private_scenes,
add_depth_noise=noise,
scene=scene,
)
else:
env = ai2thor.robot_controller.Controller(
host=host,
port=port,
width=width,
height=height,
image_dir=image_directory,
save_image_per_frame=True,
)
env.reset(scene)
initialize_event = env.step(
dict(
action="Initialize",
gridSize=0.25,
renderInstanceSegmentation=object_image,
renderSemanticSegmentation=class_image,
renderDepthImage=depth_image,
)
)
from ai2thor.interact import InteractiveControllerPrompt
InteractiveControllerPrompt.write_image(
initialize_event,
image_directory,
"_init",
image_per_frame=True,
semantic_segmentation_frame=class_image,
instance_segmentation_frame=object_image,
color_frame=image,
depth_frame=depth_image,
metadata=metadata,
)
env.interact(
semantic_segmentation_frame=class_image,
instance_segmentation_frame=object_image,
depth_frame=depth_image,
color_frame=image,
metadata=metadata,
)
env.stop()
@task
def get_depth(
ctx,
scene=None,
image=False,
depth_image=False,
class_image=False,
object_image=False,
metadata=False,
port=8200,
host="127.0.0.1",
image_directory=".",
number=1,
local_build=False,
teleport=None,
rotation=0,
):
import ai2thor.controller
import ai2thor.robot_controller
if image_directory != ".":
if os.path.exists(image_directory):
shutil.rmtree(image_directory)
os.makedirs(image_directory)
if scene is None:
env = ai2thor.robot_controller.Controller(
host=host,
port=port,
width=600,
height=600,
image_dir=image_directory,
save_image_per_frame=True,
)
else:
env = ai2thor.controller.Controller(
width=600, height=600, local_build=local_build
)
if scene is not None:
env.reset(scene)
initialize_event = env.step(
dict(
action="Initialize",
gridSize=0.25,
renderInstanceSegmentation=object_image,
renderSemanticSegmentation=class_image,
renderDepthImage=depth_image,
agentMode="locobot",
fieldOfView=59,
continuous=True,
snapToGrid=False,
)
)
from ai2thor.interact import InteractiveControllerPrompt
if scene is not None:
teleport_arg = dict(
action="TeleportFull", y=0.9010001, rotation=dict(x=0, y=rotation, z=0)
)
if teleport is not None:
teleport = [float(pos) for pos in teleport.split(",")]
t_size = len(teleport)
if 1 <= t_size:
teleport_arg["x"] = teleport[0]
if 2 <= t_size:
teleport_arg["z"] = teleport[1]
if 3 <= t_size:
teleport_arg["y"] = teleport[2]
evt = env.step(teleport_arg)
InteractiveControllerPrompt.write_image(
evt,
image_directory,
"_{}".format("teleport"),
image_per_frame=True,
semantic_segmentation_frame=class_image,
instance_segmentation_frame=object_image,
color_frame=image,
depth_frame=depth_image,
metadata=metadata,
)
InteractiveControllerPrompt.write_image(
initialize_event,
image_directory,
"_init",
image_per_frame=True,
semantic_segmentation_frame=class_image,
instance_segmentation_frame=object_image,
color_frame=image,
depth_frame=depth_image,
metadata=metadata,
)
for i in range(number):
event = env.step(action="MoveAhead", moveMagnitude=0.0)
InteractiveControllerPrompt.write_image(
event,
image_directory,
"_{}".format(i),
image_per_frame=True,
semantic_segmentation_frame=class_image,
instance_segmentation_frame=object_image,
color_frame=image,
depth_frame=depth_image,
metadata=metadata,
)
env.stop()
@task
def inspect_depth(
ctx, directory, all=False, indices=None, jet=False, under_score=False
):
import numpy as np
import cv2
import glob
under_prefix = "_" if under_score else ""
regex_str = "depth{}(.*)\.png".format(under_prefix)
def sort_key_function(name):
split_name = name.split("/")
x = re.search(regex_str, split_name[len(split_name) - 1]).group(1)
try:
val = int(x)
return val
except ValueError:
return -1
if indices is None or all:
images = sorted(
glob.glob("{}/depth{}*.png".format(directory, under_prefix)),
key=sort_key_function,
)
print(images)
else:
images = ["depth{}{}.png".format(under_prefix, i) for i in indices.split(",")]
for depth_filename in images:
# depth_filename = os.path.join(directory, "depth_{}.png".format(index))
split_fn = depth_filename.split("/")
index = re.search(regex_str, split_fn[len(split_fn) - 1]).group(1)
print("index {}".format(index))
print("Inspecting: '{}'".format(depth_filename))
depth_raw_filename = os.path.join(
directory, "depth_raw{}{}.npy".format("_" if under_score else "", index)
)
raw_depth = np.load(depth_raw_filename)
if jet:
mn = np.min(raw_depth)
mx = np.max(raw_depth)
print("min depth value: {}, max depth: {}".format(mn, mx))
norm = (((raw_depth - mn).astype(np.float32) / (mx - mn)) * 255.0).astype(
np.uint8
)
img = cv2.applyColorMap(norm, cv2.COLORMAP_JET)
else:
grayscale = (
255.0 / raw_depth.max() * (raw_depth - raw_depth.min())
).astype(np.uint8)
print("max {} min {}".format(raw_depth.max(), raw_depth.min()))
img = grayscale
print(raw_depth.shape)
def inspect_pixel(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
print("Pixel at x: {}, y: {} ".format(y, x))
print(raw_depth[y][x])
cv2.namedWindow("image")
cv2.setMouseCallback("image", inspect_pixel)
cv2.imshow("image", img)
cv2.waitKey(0)
@task
def real_2_sim(
ctx, source_dir, index, scene, output_dir, rotation=0, local_build=False, jet=False
):
import numpy as np
import cv2
from ai2thor.util.transforms import transform_real_2_sim
depth_metadata_fn = os.path.join(source_dir, "metadata_{}.json".format(index))
color_real_fn = os.path.join(source_dir, "color_{}.png".format(index))
color_sim_fn = os.path.join(output_dir, "color_teleport.png".format(index))
with open(depth_metadata_fn, "r") as f:
metadata = json.load(f)
pos = metadata["agent"]["position"]
sim_pos = transform_real_2_sim(pos)
teleport_arg = "{},{},{}".format(sim_pos["x"], sim_pos["z"], sim_pos["y"])
print(sim_pos)
print(teleport_arg)
inspect_depth(ctx, source_dir, indices=index, under_score=True, jet=jet)
get_depth(
ctx,
scene=scene,
image=True,
depth_image=True,
class_image=False,
object_image=False,
metadata=True,
image_directory=output_dir,
number=1,
local_build=local_build,
teleport=teleport_arg,
rotation=rotation,
)
im = cv2.imread(color_real_fn)
cv2.imshow("color_real.png", im)
im2 = cv2.imread(color_sim_fn)
cv2.imshow("color_sim.png", im2)
inspect_depth(ctx, output_dir, indices="teleport", under_score=True, jet=jet)
@task
def noise_depth(ctx, directory, show=False):
import glob
import cv2
import numpy as np
def imshow_components(labels):
# Map component labels to hue val
label_hue = np.uint8(179 * labels / np.max(labels))
blank_ch = 255 * np.ones_like(label_hue)
labeled_img = cv2.merge([label_hue, blank_ch, blank_ch])
# cvt to BGR for display
labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_HSV2BGR)
# set bg label to black
labeled_img[label_hue == 0] = 0
if show:
cv2.imshow("labeled.png", labeled_img)
cv2.waitKey()
images = glob.glob("{}/depth_*.png".format(directory))
indices = []
for image_file in images:
print(image_file)
grayscale_img = cv2.imread(image_file, 0)
img = grayscale_img
img_size = img.shape
img = cv2.threshold(img, 30, 255, cv2.THRESH_BINARY_INV)[1]
ret, labels = cv2.connectedComponents(img)
print("Components: {}".format(ret))
imshow_components(labels)
print(img_size[0])
indices_top_left = np.where(labels == labels[0][0])
indices_top_right = np.where(labels == labels[0][img_size[1] - 1])
indices_bottom_left = np.where(labels == labels[img_size[0] - 1][0])
indices_bottom_right = np.where(
labels == labels[img_size[0] - 1][img_size[1] - 1]
)
indices = [
indices_top_left,
indices_top_right,
indices_bottom_left,
indices_bottom_right,
]
blank_image = np.zeros((300, 300, 1), np.uint8)
blank_image.fill(255)
blank_image[indices_top_left] = 0
blank_image[indices_top_right] = 0
blank_image[indices_bottom_left] = 0
blank_image[indices_bottom_right] = 0
if show:
cv2.imshow("labeled.png", blank_image)
cv2.waitKey()
break
compressed = []
for indices_arr in indices:
unique_e, counts = np.unique(indices_arr[0], return_counts=True)
compressed.append(counts)
np.save("depth_noise", compressed)
@task
def release(ctx):
x = subprocess.check_output("git status --porcelain", shell=True).decode("ASCII")
for line in x.split("\n"):
if line.strip().startswith("??") or len(line.strip()) == 0:
continue
raise Exception(
"Found locally modified changes from 'git status' - please commit and push or revert"
)
import ai2thor._version
tag = "v" + ai2thor._version.__version__
subprocess.check_call('git tag -a %s -m "release %s"' % (tag, tag), shell=True)
subprocess.check_call("git push origin main --tags", shell=True)
subprocess.check_call(
"twine upload -u ai2thor dist/ai2thor-{ver}-* dist/ai2thor-{ver}.*".format(
ver=ai2thor._version.__version__
),
shell=True,
)
@task
def check_visible_objects_closed_receptacles(ctx, start_scene, end_scene):
from itertools import product
import ai2thor.controller
controller = ai2thor.controller.BFSController()
controller.start()
for i in range(int(start_scene), int(end_scene)):
print("working on floorplan %s" % i)
controller.search_all_closed("FloorPlan%s" % i)
visibility_object_id = None
visibility_object_types = ["Mug", "CellPhone", "SoapBar"]
for obj in controller.last_event.metadata["objects"]:
if obj["pickupable"]:
controller.step(
action=dict(
action="PickupObject",
objectId=obj["objectId"],
forceVisible=True,
)
)
if (
visibility_object_id is None
and obj["objectType"] in visibility_object_types
):
visibility_object_id = obj["objectId"]
if visibility_object_id is None:
raise Exception("Couldn't get a visibility_object")
bad_receptacles = set()
for point in controller.grid_points:
controller.step(
dict(action="Teleport", x=point["x"], y=point["y"], z=point["z"]),
raise_for_failure=True,
)
for rot, hor in product(controller.rotations, controller.horizons):
event = controller.step(
dict(action="RotateLook", rotation=rot, horizon=hor),
raise_for_failure=True,
)
for j in event.metadata["objects"]:
if j["receptacle"] and j["visible"] and j["openable"]:
controller.step(
action=dict(
action="Replace",
forceVisible=True,
pivot=0,
receptacleObjectId=j["objectId"],
objectId=visibility_object_id,
)
)
replace_success = controller.last_event.metadata[
"lastActionSuccess"
]
if replace_success:
if (
controller.is_object_visible(visibility_object_id)
and j["objectId"] not in bad_receptacles
):
bad_receptacles.add(j["objectId"])
print("Got bad receptacle: %s" % j["objectId"])
# import cv2
# cv2.imshow('aoeu', controller.last_event.cv2image())
# cv2.waitKey(0)
controller.step(
action=dict(
action="PickupObject",
objectId=visibility_object_id,
forceVisible=True,
)
)
@task
def benchmark(
ctx,
screen_width=600,
screen_height=600,
editor_mode=False,
out="benchmark.json",
verbose=False,
local_build=False,
commit_id=ai2thor.build.COMMIT_ID,
):
import ai2thor.controller
import random
move_actions = ["MoveAhead", "MoveBack", "MoveLeft", "MoveRight"]
rotate_actions = ["RotateRight", "RotateLeft"]
look_actions = ["LookUp", "LookDown"]
all_actions = move_actions + rotate_actions + look_actions
def test_routine(env, test_actions, n=100):
average_frame_time = 0
for i in range(n):
action = random.choice(test_actions)
start = time.time()
env.step(dict(action=action))
end = time.time()
frame_time = end - start
average_frame_time += frame_time
average_frame_time = average_frame_time / float(n)
return average_frame_time
def benchmark_actions(env, action_name, actions, n=100):
if verbose:
print("--- Actions {}".format(actions))
frame_time = test_routine(env, actions)
if verbose:
print("{} average: {}".format(action_name, 1 / frame_time))
return 1 / frame_time
args = {}
if editor_mode:
args["port"] = 8200
args["start_unity"] = False
elif local_build:
args["local_build"] = local_build
else:
args["commit_id"] = commit_id
env = ai2thor.controller.Controller(
width=screen_width, height=screen_height, **args
)
# Kitchens: FloorPlan1 - FloorPlan30
# Living rooms: FloorPlan201 - FloorPlan230
# Bedrooms: FloorPlan301 - FloorPlan330
# Bathrooms: FloorPLan401 - FloorPlan430
room_ranges = [(1, 30), (201, 230), (301, 330), (401, 430)]
benchmark_map = {"scenes": {}}
total_average_ft = 0
scene_count = 0
print("Start loop")
for room_range in room_ranges:
for i in range(room_range[0], room_range[1]):
scene = "FloorPlan{}_physics".format(i)
scene_benchmark = {}
if verbose:
print("Loading scene {}".format(scene))
# env.reset(scene)
env.step(dict(action="Initialize", gridSize=0.25))
if verbose:
print("------ {}".format(scene))
sample_number = 100
action_tuples = [
("move", move_actions, sample_number),
("rotate", rotate_actions, sample_number),
("look", look_actions, sample_number),
("all", all_actions, sample_number),
]
scene_average_fr = 0
for action_name, actions, n in action_tuples:
ft = benchmark_actions(env, action_name, actions, n)
scene_benchmark[action_name] = ft
scene_average_fr += ft
scene_average_fr = scene_average_fr / float(len(action_tuples))
total_average_ft += scene_average_fr
if verbose:
print("Total average frametime: {}".format(scene_average_fr))
benchmark_map["scenes"][scene] = scene_benchmark
scene_count += 1
benchmark_map["average_framerate_seconds"] = total_average_ft / scene_count
with open(out, "w") as f:
f.write(json.dumps(benchmark_map, indent=4, sort_keys=True))
env.stop()
def list_objects_with_metadata(bucket):
keys = {}
s3c = boto3.client("s3")
continuation_token = None
while True:
if continuation_token:
objects = s3c.list_objects_v2(
Bucket=bucket, ContinuationToken=continuation_token
)
else:
objects = s3c.list_objects_v2(Bucket=bucket)
for i in objects.get("Contents", []):
keys[i["Key"]] = i
if "NextContinuationToken" in objects:
continuation_token = objects["NextContinuationToken"]
else:
break
return keys
def s3_etag_data(data):
h = hashlib.md5()
h.update(data)
return '"' + h.hexdigest() + '"'
cache_seconds = 31536000
@task
def webgl_deploy(
ctx,
bucket=ai2thor.build.PUBLIC_WEBGL_S3_BUCKET,
prefix="local",
source_dir="builds",
target_dir="",
verbose=False,
force=False,
extensions_no_cache="",
):
from pathlib import Path
from os.path import isfile, join, isdir
content_types = {
".js": "application/javascript; charset=utf-8",
".html": "text/html; charset=utf-8",
".ico": "image/x-icon",
".svg": "image/svg+xml; charset=utf-8",
".css": "text/css; charset=utf-8",
".png": "image/png",
".txt": "text/plain",
".jpg": "image/jpeg",
".wasm": "application/wasm",
".data": "application/octet-stream",
".unityweb": "application/octet-stream",
".json": "application/json",
}
content_encoding = {".unityweb": "gzip"}
bucket_name = bucket
s3 = boto3.resource("s3")
current_objects = list_objects_with_metadata(bucket_name)
no_cache_extensions = {".txt", ".html", ".json", ".js"}
no_cache_extensions.union(set(extensions_no_cache.split(",")))
def walk_recursive(path, func, parent_dir=""):
for file_name in os.listdir(path):
f_path = join(path, file_name)
relative_path = join(parent_dir, file_name)
if isfile(f_path):
key = Path(join(target_dir, relative_path))
func(f_path, key.as_posix())
elif isdir(f_path):
walk_recursive(f_path, func, relative_path)
def upload_file(f_path, key):
_, ext = os.path.splitext(f_path)
if verbose:
print("'{}'".format(key))
with open(f_path, "rb") as f:
file_data = f.read()
etag = s3_etag_data(file_data)
kwargs = {}
if ext in content_encoding:
kwargs["ContentEncoding"] = content_encoding[ext]
if (
not force
and key in current_objects
and etag == current_objects[key]["ETag"]
):
if verbose:
print("ETag match - skipping %s" % key)
return
if ext in content_types:
cache = (
"no-cache, no-store, must-revalidate"
if ext in no_cache_extensions
else "public, max-age={}".format(cache_seconds)
)
now = datetime.datetime.utcnow()
expires = (
now
if ext == ".html" or ext == ".txt"
else now + datetime.timedelta(seconds=cache_seconds)
)
s3.Object(bucket_name, key).put(
Body=file_data,
ACL="public-read",
ContentType=content_types[ext],
CacheControl=cache,
Expires=expires,
**kwargs,
)
else:
if verbose:
print(
"Warning: Content type for extension '{}' not defined,"
" uploading with no content type".format(ext)
)
s3.Object(bucket_name, key).put(Body=f.read(), ACL="public-read")
if prefix is not None:
build_path = _webgl_local_build_path(prefix, source_dir)
else:
build_path = source_dir
if verbose:
print("Build path: '{}'".format(build_path))
print("Uploading...")
walk_recursive(build_path, upload_file)
@task
def webgl_build_deploy_demo(ctx, verbose=False, force=False, content_addressable=False):
# Main demo
demo_selected_scene_indices = [
1,
3,
7,
29,
30,
204,
209,
221,
224,
227,
301,
302,
308,
326,
330,
401,
403,
411,
422,
430,
]
scenes = ["FloorPlan{}_physics".format(x) for x in demo_selected_scene_indices]
webgl_build(
ctx,
scenes=",".join(scenes),
directory="builds/demo",
content_addressable=content_addressable,
)
webgl_deploy(
ctx, source_dir="builds/demo", target_dir="demo", verbose=verbose, force=force
)
if verbose:
print("Deployed selected scenes to bucket's 'demo' directory")
# Full framework demo
kitchens = [f"FloorPlan{i}_physics" for i in range(1, 31)]
living_rooms = [f"FloorPlan{200 + i}_physics" for i in range(1, 31)]
bedrooms = [f"FloorPlan{300 + i}_physics" for i in range(1, 31)]
bathrooms = [f"FloorPlan{400 + i}_physics" for i in range(1, 31)]
robothor_train = [
f"FloorPlan_Train{i}_{j}" for i in range(1, 13) for j in range(1, 6)
]
robothor_val = [f"FloorPlan_Val{i}_{j}" for i in range(1, 4) for j in range(1, 6)]
scenes = (
kitchens + living_rooms + bedrooms + bathrooms + robothor_train + robothor_val
)
webgl_build(
ctx,
scenes=",".join(scenes),
content_addressable=content_addressable,
)
webgl_deploy(ctx, verbose=verbose, force=force, target_dir="full")
if verbose:
print("Deployed all scenes to bucket's root.")
def current_webgl_autodeploy_commit_id():
s3 = boto3.resource("s3")
try:
res = s3.Object(ai2thor.build.PUBLIC_WEBGL_S3_BUCKET, "autodeploy.json").get()
return json.loads(res["Body"].read())["commit_id"]
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "NoSuchKey":
return None
else:
raise e
def update_webgl_autodeploy_commit_id(commit_id):
s3 = boto3.resource("s3")
s3.Object(ai2thor.build.PUBLIC_WEBGL_S3_BUCKET, "autodeploy.json").put(
Body=json.dumps(dict(timestamp=time.time(), commit_id=commit_id)),
ContentType="application/json",
)
@task
def webgl_deploy_all(ctx, verbose=False, individual_rooms=False):
rooms = {
"kitchens": (1, 30),
"livingRooms": (201, 230),
"bedrooms": (301, 330),
"bathrooms": (401, 430),
"foyers": (501, 530),
}
for key, room_range in rooms.items():
range_str = "{}-{}".format(room_range[0], room_range[1])
if verbose:
print("Building for rooms: {}".format(range_str))
build_dir = "builds/{}".format(key)
if individual_rooms:
for i in range(room_range[0], room_range[1]):
floorPlanName = "FloorPlan{}_physics".format(i)
target_s3_dir = "{}/{}".format(key, floorPlanName)
build_dir = "builds/{}".format(target_s3_dir)
webgl_build(ctx, scenes=floorPlanName, directory=build_dir)
webgl_deploy(
ctx, source_dir=build_dir, target_dir=target_s3_dir, verbose=verbose
)
else:
webgl_build(ctx, room_ranges=range_str, directory=build_dir)
webgl_deploy(ctx, source_dir=build_dir, target_dir=key, verbose=verbose)
@task
def webgl_s3_deploy(
ctx, bucket, target_dir, scenes="", verbose=False, all=False, deploy_skip=False
):
"""
Builds and deploys a WebGL unity site
:param context:
:param target_dir: Target s3 bucket
:param target_dir: Target directory in bucket
:param scenes: String of scene numbers to include in the build as a comma separated list e.g. "4,6,230"
:param verbose: verbose build
:param all: overrides 'scenes' parameter and builds and deploys all separate rooms
:param deploy_skip: Whether to skip deployment and do build only.
:return:
"""
rooms = {
"kitchens": (1, 30),
"livingRooms": (201, 230),
"bedrooms": (301, 330),
"bathrooms": (401, 430),
}
if all:
flatten = lambda l: [item for sublist in l for item in sublist]
room_numbers = flatten(
[
[i for i in range(room_range[0], room_range[1])]
for key, room_range in rooms.items()
]
)
else:
room_numbers = [s.strip() for s in scenes.split(",")]
if verbose:
print("Rooms in build: '{}'".format(room_numbers))
for i in room_numbers:
floor_plan_name = "FloorPlan{}_physics".format(i)
if verbose:
print("Building room '{}'...".format(floor_plan_name))
target_s3_dir = "{}/{}".format(target_dir, floor_plan_name)
build_dir = "builds/{}".format(target_s3_dir)
webgl_build(
ctx, scenes=floor_plan_name, directory=build_dir, crowdsource_build=True
)
if verbose:
print("Deploying room '{}'...".format(floor_plan_name))
if not deploy_skip:
webgl_deploy(
ctx,
bucket=bucket,
source_dir=build_dir,
target_dir=target_s3_dir,
verbose=verbose,
extensions_no_cache=".css",
)
@task
def webgl_site_deploy(
context,
template_name,
output_dir,
bucket,
unity_build_dir="",
s3_target_dir="",
force=False,
verbose=False,
):
from pathlib import Path
from os.path import isfile, join, isdir
template_dir = Path("unity/Assets/WebGLTemplates/{}".format(template_name))
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
# os.mkdir(output_dir)
ignore_func = lambda d, files: [
f for f in files if isfile(join(d, f)) and f.endswith(".meta")
]
if unity_build_dir != "":
shutil.copytree(unity_build_dir, output_dir, ignore=ignore_func)
# shutil.copytree(os.path.join(unity_build_dir, "Build"), os.path.join(output_dir, "Build"), ignore=ignore_func)
else:
shutil.copytree(template_dir, output_dir, ignore=ignore_func)
webgl_deploy(
context,
bucket=bucket,
prefix=None,
source_dir=output_dir,
target_dir=s3_target_dir,
verbose=verbose,
force=force,
extensions_no_cache=".css",
)
@task
def mock_client_request(context):
import msgpack
import numpy as np
import requests
import cv2
r = requests.post(
"http://127.0.0.1:9200/step", json=dict(action="MoveAhead", sequenceId=1)
)
payload = msgpack.unpackb(r.content, raw=False)
metadata = payload["metadata"]["agents"][0]
image = np.frombuffer(payload["frames"][0], dtype=np.uint8).reshape(
metadata["screenHeight"], metadata["screenWidth"], 3
)
pprint.pprint(metadata)
cv2.imshow("aoeu", image)
cv2.waitKey(1000)
@task
def start_mock_real_server(context):
import ai2thor.mock_real_server
m = ai2thor.mock_real_server.MockServer(height=300, width=300)
print("Started mock server on port: http://" + m.host + ":" + str(m.port))
m.start()
@task
def create_robothor_dataset(
context,
local_build=False,
editor_mode=False,
width=300,
height=300,
output="robothor-dataset.json",
intermediate_directory=".",
visibility_distance=1.0,
objects_filter=None,
scene_filter=None,
filter_file=None,
):
"""
Creates a dataset for the robothor challenge in `intermediate_directory`
named `robothor-dataset.json`
"""
import ai2thor.controller
import ai2thor.util.metrics as metrics
scene = "FloorPlan_Train1_1"
angle = 45
gridSize = 0.25
# Restrict points visibility_multiplier_filter * visibility_distance away from the target object
visibility_multiplier_filter = 2
scene_object_filter = {}
if filter_file is not None:
with open(filter_file, "r") as f:
scene_object_filter = json.load(f)
print("Filter:")
pprint.pprint(scene_object_filter)
print("Visibility distance: {}".format(visibility_distance))
controller = ai2thor.controller.Controller(
width=width,
height=height,
local_build=local_build,
start_unity=False if editor_mode else True,
scene=scene,
port=8200,
host="127.0.0.1",
# Unity params
gridSize=gridSize,
fieldOfView=60,
rotateStepDegrees=angle,
agentMode="bot",
visibilityDistance=visibility_distance,
)
targets = [
"Apple",
"Baseball Bat",
"BasketBall",
"Bowl",
"Garbage Can",
"House Plant",
"Laptop",
"Mug",
"Remote",
"Spray Bottle",
"Vase",
"Alarm Clock",
"Television",
"Pillow",
]
failed_points = []
if objects_filter is not None:
obj_filter = set([o for o in objects_filter.split(",")])
targets = [o for o in targets if o.replace(" ", "") in obj_filter]
desired_points = 30
event = controller.step(
dict(
action="GetScenesInBuild",
)
)
scenes_in_build = event.metadata["actionReturn"]
objects_types_in_scene = set()
def sqr_dist(a, b):
x = a[0] - b[0]
z = a[2] - b[2]
return x * x + z * z
def sqr_dist_dict(a, b):
x = a["x"] - b["x"]
z = a["z"] - b["z"]
return x * x + z * z
def get_points(contoller, object_type, scene):
print("Getting points in scene: '{}'...: ".format(scene))
controller.reset(scene)
event = controller.step(
dict(
action="ObjectTypeToObjectIds", objectType=object_type.replace(" ", "")
)
)
object_ids = event.metadata["actionReturn"]
if object_ids is None or len(object_ids) > 1 or len(object_ids) == 0:
print("Object type '{}' not available in scene.".format(object_type))
return None
objects_types_in_scene.add(object_type)
object_id = object_ids[0]
event_reachable = controller.step(
dict(action="GetReachablePositions", gridSize=0.25)
)
target_position = controller.step(
action="GetObjectPosition", objectId=object_id
).metadata["actionReturn"]
reachable_positions = event_reachable.metadata["actionReturn"]
reachable_pos_set = set(
[
(pos["x"], pos["y"], pos["z"])
for pos in reachable_positions
# if sqr_dist_dict(pos, target_position) >= visibility_distance * visibility_multiplier_filter
]
)
def filter_points(selected_points, point_set, minimum_distance):
result = set()
for selected in selected_points:
if selected in point_set:
result.add(selected)
remove_set = set(
[
p
for p in point_set
if sqr_dist(p, selected)
<= minimum_distance * minimum_distance
]
)
point_set = point_set.difference(remove_set)
return result
import random
points = random.sample(reachable_pos_set, desired_points * 4)
final_point_set = filter_points(points, reachable_pos_set, gridSize * 2)
print("Total number of points: {}".format(len(final_point_set)))
print("Id {}".format(event.metadata["actionReturn"]))
point_objects = []
eps = 0.0001
counter = 0
for (x, y, z) in final_point_set:
possible_orientations = [0, 90, 180, 270]
pos_unity = dict(x=x, y=y, z=z)
try:
path = metrics.get_shortest_path_to_object(
controller, object_id, pos_unity, {"x": 0, "y": 0, "z": 0}
)
minimum_path_length = metrics.path_distance(path)
rotation_allowed = False
while not rotation_allowed:
if len(possible_orientations) == 0:
break
roatation_y = random.choice(possible_orientations)
possible_orientations.remove(roatation_y)
evt = controller.step(
action="TeleportFull",
x=pos_unity["x"],
y=pos_unity["y"],
z=pos_unity["z"],
rotation=dict(x=0, y=roatation_y, z=0),
)
rotation_allowed = evt.metadata["lastActionSuccess"]
if not evt.metadata["lastActionSuccess"]:
print(evt.metadata["errorMessage"])
print(
"--------- Rotation not allowed! for pos {} rot {} ".format(
pos_unity, roatation_y
)
)
if minimum_path_length > eps and rotation_allowed:
m = re.search("FloorPlan_([a-zA-Z\-]*)([0-9]+)_([0-9]+)", scene)
point_id = "{}_{}_{}_{}_{}".format(
m.group(1), m.group(2), m.group(3), object_type, counter
)
point_objects.append(
{
"id": point_id,
"scene": scene,
"object_type": object_type,
"object_id": object_id,
"target_position": target_position,
"initial_position": pos_unity,
"initial_orientation": roatation_y,
"shortest_path": path,
"shortest_path_length": minimum_path_length,
}
)
counter += 1
except ValueError:
print("-----Invalid path discarding point...")
failed_points.append(
{
"scene": scene,
"object_type": object_type,
"object_id": object_id,
"target_position": target_position,
"initial_position": pos_unity,
}
)
sorted_objs = sorted(point_objects, key=lambda m: m["shortest_path_length"])
third = int(len(sorted_objs) / 3.0)
for i, obj in enumerate(sorted_objs):
if i < third:
level = "easy"
elif i < 2 * third:
level = "medium"
else:
level = "hard"
sorted_objs[i]["difficulty"] = level
return sorted_objs
dataset = {}
dataset_flat = []
if intermediate_directory is not None:
if intermediate_directory != ".":
if os.path.exists(intermediate_directory):
shutil.rmtree(intermediate_directory)
os.makedirs(intermediate_directory)
def key_sort_func(scene_name):
m = re.search("FloorPlan_([a-zA-Z\-]*)([0-9]+)_([0-9]+)", scene_name)
return m.group(1), int(m.group(2)), int(m.group(3))
scenes = sorted(
[scene for scene in scenes_in_build if "physics" not in scene],
key=key_sort_func,
)
if scene_filter is not None:
scene_filter_set = set(scene_filter.split(","))
scenes = [s for s in scenes if s in scene_filter_set]
print("Sorted scenes: {}".format(scenes))
for scene in scenes:
dataset[scene] = {}
dataset["object_types"] = targets
objects = []
for objectType in targets:
if filter_file is None or (
objectType in scene_object_filter
and scene in scene_object_filter[objectType]
):
dataset[scene][objectType] = []
obj = get_points(controller, objectType, scene)
if obj is not None:
objects = objects + obj
dataset_flat = dataset_flat + objects
if intermediate_directory != ".":
with open(
os.path.join(intermediate_directory, "{}.json".format(scene)), "w"
) as f:
json.dump(objects, f, indent=4)
with open(os.path.join(intermediate_directory, output), "w") as f:
json.dump(dataset_flat, f, indent=4)
print("Object types in scene union: {}".format(objects_types_in_scene))
print("Total unique objects: {}".format(len(objects_types_in_scene)))
print("Total scenes: {}".format(len(scenes)))
print("Total datapoints: {}".format(len(dataset_flat)))
print(failed_points)
with open(os.path.join(intermediate_directory, "failed.json"), "w") as f:
json.dump(failed_points, f, indent=4)
@task
def shortest_path_to_object(
context,
scene,
object,
x,
z,
y=0.9103442,
rotation=0,
editor_mode=False,
local_build=False,
visibility_distance=1.0,
grid_size=0.25,
):
p = dict(x=x, y=y, z=z)
import ai2thor.controller
import ai2thor.util.metrics as metrics
angle = 45
gridSize = grid_size
controller = ai2thor.controller.Controller(
width=300,
height=300,
local_build=local_build,
start_unity=False if editor_mode else True,
scene=scene,
port=8200,
host="127.0.0.1",
# Unity params
gridSize=gridSize,
fieldOfView=60,
rotateStepDegrees=angle,
agentMode="bot",
visibilityDistance=visibility_distance,
)
path = metrics.get_shortest_path_to_object_type(
controller, object, p, {"x": 0, "y": 0, "z": 0}
)
minimum_path_length = metrics.path_distance(path)
print("Path: {}".format(path))
print("Path lenght: {}".format(minimum_path_length))
@task
def filter_dataset(ctx, filename, output_filename, ids=False):
"""
Filters objects in dataset that are not reachable in at least one of the scenes (have
zero occurrences in the dataset)
"""
with open(filename, "r") as f:
obj = json.load(f)
targets = [
"Apple",
"Baseball Bat",
"BasketBall",
"Bowl",
"Garbage Can",
"House Plant",
"Laptop",
"Mug",
"Spray Bottle",
"Vase",
"Alarm Clock",
"Television",
"Pillow",
]
counter = {}
for f in obj:
obj_type = f["object_type"]
if f["scene"] not in counter:
counter[f["scene"]] = {target: 0 for target in targets}
scene_counter = counter[f["scene"]]
if obj_type not in scene_counter:
scene_counter[obj_type] = 1
else:
scene_counter[obj_type] += 1
objects_with_zero = set()
objects_with_zero_by_obj = {}
for k, item in counter.items():
# print("Key {} ".format(k))
for obj_type, count in item.items():
# print("obj {} count {}".format(obj_type, count))
if count == 0:
if obj_type not in objects_with_zero_by_obj:
objects_with_zero_by_obj[obj_type] = set()
# print("With zero for obj: {} in scene {}".format(obj_type, k))
objects_with_zero_by_obj[obj_type].add(k)
objects_with_zero.add(obj_type)
print("Objects with zero: {}".format(objects_with_zero))
with open("with_zero.json", "w") as fw:
dict_list = {k: list(v) for k, v in objects_with_zero_by_obj.items()}
json.dump(dict_list, fw, sort_keys=True, indent=4)
pprint.pprint(objects_with_zero_by_obj)
filtered = [o for o in obj if o["object_type"] not in objects_with_zero]
counter = 0
current_scene = ""
current_object_type = ""
for i, o in enumerate(filtered):
if current_scene != o["scene"] or current_object_type != o["object_type"]:
counter = 0
current_scene = o["scene"]
current_object_type = o["object_type"]
m = re.search("FloorPlan_([a-zA-Z\-]*)([0-9]+)_([0-9]+)", o["scene"])
point_id = "{}_{}_{}_{}_{}".format(
m.group(1), m.group(2), m.group(3), o["object_type"], counter
)
counter += 1
o["id"] = point_id
with open(output_filename, "w") as f:
json.dump(filtered, f, indent=4)
@task
def fix_dataset_object_types(
ctx, input_file, output_file, editor_mode=False, local_build=False
):
import ai2thor.controller
with open(input_file, "r") as f:
obj = json.load(f)
scene = "FloorPlan_Train1_1"
angle = 45
gridSize = 0.25
controller = ai2thor.controller.Controller(
width=300,
height=300,
local_build=local_build,
start_unity=False if editor_mode else True,
scene=scene,
port=8200,
host="127.0.0.1",
# Unity params
gridSize=gridSize,
fieldOfView=60,
rotateStepDegrees=angle,
agentMode="bot",
visibilityDistance=1,
)
current_scene = None
object_map = {}
for i, point in enumerate(obj):
if current_scene != point["scene"]:
print("Fixing for scene '{}'...".format(point["scene"]))
controller.reset(point["scene"])
current_scene = point["scene"]
object_map = {
o["objectType"].lower(): {
"id": o["objectId"],
"type": o["objectType"],
}
for o in controller.last_event.metadata["objects"]
}
key = point["object_type"].replace(" ", "").lower()
point["object_id"] = object_map[key]["id"]
point["object_type"] = object_map[key]["type"]
with open(output_file, "w") as fw:
json.dump(obj, fw, indent=True)
@task
def test_dataset(
ctx, filename, scenes=None, objects=None, editor_mode=False, local_build=False
):
import ai2thor.controller
import ai2thor.util.metrics as metrics
scene = "FloorPlan_Train1_1" if scenes is None else scenes.split(",")[0]
controller = ai2thor.controller.Controller(
width=300,
height=300,
local_build=local_build,
start_unity=False if editor_mode else True,
scene=scene,
port=8200,
host="127.0.0.1",
# Unity params
gridSize=0.25,
fieldOfView=60,
rotateStepDegrees=45,
agentMode="bot",
visibilityDistance=1,
)
with open(filename, "r") as f:
dataset = json.load(f)
filtered_dataset = dataset
if scenes is not None:
scene_set = set(scenes.split(","))
print("Filtering {}".format(scene_set))
filtered_dataset = [d for d in dataset if d["scene"] in scene_set]
if objects is not None:
object_set = set(objects.split(","))
print("Filtering {}".format(object_set))
filtered_dataset = [
d for d in filtered_dataset if d["object_type"] in object_set
]
current_scene = None
current_object = None
point_counter = 0
print(len(filtered_dataset))
for point in filtered_dataset:
if current_scene != point["scene"]:
current_scene = point["scene"]
print("Testing for scene '{}'...".format(current_scene))
if current_object != point["object_type"]:
current_object = point["object_type"]
point_counter = 0
print(" Object '{}'...".format(current_object))
try:
path = metrics.get_shortest_path_to_object_type(
controller,
point["object_type"],
point["initial_position"],
{"x": 0, "y": point["initial_orientation"], "z": 0},
)
path_dist = metrics.path_distance(path)
point_counter += 1
print(" Total points: {}".format(point_counter))
print(path_dist)
except ValueError:
print("Cannot find path from point")
@task
def visualize_shortest_paths(
ctx,
dataset_path,
width=600,
height=300,
editor_mode=False,
local_build=False,
scenes=None,
gridSize=0.25,
output_dir=".",
object_types=None,
):
angle = 45
import ai2thor.controller
from PIL import Image
controller = ai2thor.controller.Controller(
width=width,
height=height,
local_build=local_build,
start_unity=False if editor_mode else True,
port=8200,
host="127.0.0.1",
# Unity params
gridSize=gridSize,
fieldOfView=60,
rotateStepDegrees=angle,
agentMode="bot",
visibilityDistance=1,
)
if output_dir != "." and os.path.exists(output_dir):
shutil.rmtree(output_dir)
if output_dir != ".":
os.mkdir(output_dir)
evt = controller.step(
action="AddThirdPartyCamera",
rotation=dict(x=90, y=0, z=0),
position=dict(x=5.40, y=3.25, z=-3.0),
fieldOfView=2.25,
orthographic=True,
)
evt = controller.step(action="SetTopLevelView", topView=True)
evt = controller.step(action="ToggleMapView")
# im = Image.fromarray(evt.third_party_camera_frames[0])
# im.save(os.path.join(output_dir, "top_view.jpg"))
with open(dataset_path, "r") as f:
dataset = json.load(f)
dataset_filtered = dataset
if scenes is not None:
scene_f_set = set(scenes.split(","))
dataset_filtered = [d for d in dataset if d["scene"] in scene_f_set]
if object_types is not None:
object_f_set = set(object_types.split(","))
dataset_filtered = [
d for d in dataset_filtered if d["object_type"] in object_f_set
]
print("Running for {} points...".format(len(dataset_filtered)))
index = 0
print(index)
print(len(dataset_filtered))
datapoint = dataset_filtered[index]
current_scene = datapoint["scene"]
current_object = datapoint["object_type"]
failed = {}
while index < len(dataset_filtered):
previous_index = index
controller.reset(current_scene)
while (
current_scene == datapoint["scene"]
and current_object == datapoint["object_type"]
):
index += 1
if index > len(dataset_filtered) - 1:
break
datapoint = dataset_filtered[index]
current_scene = datapoint["scene"]
current_object = datapoint["object_type"]
key = "{}_{}".format(current_scene, current_object)
failed[key] = []
print(
"Points for '{}' in scene '{}'...".format(current_object, current_scene)
)
evt = controller.step(
action="AddThirdPartyCamera",
rotation=dict(x=90, y=0, z=0),
position=dict(x=5.40, y=3.25, z=-3.0),
fieldOfView=2.25,
orthographic=True,
)
sc = dataset_filtered[previous_index]["scene"]
obj_type = dataset_filtered[previous_index]["object_type"]
positions = [
d["initial_position"] for d in dataset_filtered[previous_index:index]
]
# print("{} : {} : {}".format(sc, obj_type, positions))
evt = controller.step(
action="VisualizeShortestPaths",
objectType=obj_type,
positions=positions,
grid=True,
)
im = Image.fromarray(evt.third_party_camera_frames[0])
im.save(os.path.join(output_dir, "{}-{}.jpg".format(sc, obj_type)))
# print("Retur {}, {} ".format(evt.metadata['actionReturn'], evt.metadata['lastActionSuccess']))
# print(evt.metadata['errorMessage'])
failed[key] = [
positions[i]
for i, success in enumerate(evt.metadata["actionReturn"])
if not success
]
pprint.pprint(failed)
@task
def fill_in_dataset(
ctx,
dataset_dir,
dataset_filename,
filter_filename,
intermediate_dir,
output_filename="filled.json",
local_build=False,
editor_mode=False,
visibility_distance=1.0,
):
import glob
import ai2thor.controller
dataset_path = os.path.join(dataset_dir, dataset_filename)
def key_sort_func(scene_name):
m = re.search("FloorPlan_([a-zA-Z\-]*)([0-9]+)_([0-9]+)", scene_name)
return m.group(1), int(m.group(2)), int(m.group(3))
targets = [
"Apple",
"Baseball Bat",
"Basketball",
"Bowl",
"Garbage Can",
"House Plant",
"Laptop",
"Mug",
"Remote",
"Spray Bottle",
"Vase",
"Alarm Clock",
"Television",
"Pillow",
]
controller = ai2thor.controller.Controller(
width=300,
height=300,
local_build=local_build,
start_unity=False if editor_mode else True,
port=8200,
host="127.0.0.1",
# Unity params
gridSize=0.25,
fieldOfView=60,
rotateStepDegrees=45,
agentMode="bot",
visibilityDistance=1,
)
scenes = sorted(
[scene for scene in controller._scenes_in_build if "physics" not in scene],
key=key_sort_func,
)
missing_datapoints_by_scene = {}
partial_dataset_by_scene = {}
for scene in scenes:
missing_datapoints_by_scene[scene] = []
partial_dataset_by_scene[scene] = []
with open(dataset_path, "r") as f:
create_dataset(
ctx,
local_build=local_build,
editor_mode=editor_mode,
output=output_filename,
intermediate_directory=intermediate_dir,
visibility_distance=visibility_distance,
)
for datapoint in filter_dataset:
missing_datapoints_by_scene[datapoint["scene"]].append(datapoint)
partial_dataset_filenames = sorted(
glob.glob("{}/FloorPlan_*.png".format(dataset_dir))
)
print("Datas")
difficulty_order_map = {"easy": 0, "medium": 1, "hard": 2}
for d_filename in partial_dataset_filenames:
with open(d_filename, "r") as fp:
partial_dataset = json.load(fp)
partial_dataset[0]["scene"] = partial_dataset
final_dataset = []
for scene in scenes:
for object_type in targets:
arr = [
p for p in partial_dataset[scene] if p["object_type"] == object_type
] + [
p
for p in missing_datapoints_by_scene[scene]
if p["object_type"] == object_type
]
final_dataset = final_dataset + sorted(
arr,
key=lambda p: (
p["object_type"],
difficulty_order_map[p["difficulty"]],
),
)
@task
def test_teleport(ctx, editor_mode=False, local_build=False):
import ai2thor.controller
import time
controller = ai2thor.controller.Controller(
rotateStepDegrees=30,
visibilityDistance=1.0,
gridSize=0.25,
port=8200,
host="127.0.0.1",
local_build=local_build,
start_unity=False if editor_mode else True,
agentType="stochastic",
continuousMode=True,
continuous=False,
snapToGrid=False,
agentMode="locobot",
scene="FloorPlan_Train1_2",
width=640,
height=480,
continus=True,
)
controller.step(action="GetReachablePositions", gridSize=0.25)
params = {
"x": 8.0,
"y": 0.924999952,
"z": -1.75,
"rotation": {"x": 0.0, "y": 240.0, "z": 0.0},
"horizon": 330.0,
}
evt = controller.step(action="TeleportFull", **params)
print("New pos: {}".format(evt.metadata["agent"]["position"]))
@task
def resort_dataset(ctx, dataset_path, output_path, editor_mode=False, local_build=True):
with open(dataset_path, "r") as f:
dataset = json.load(f)
index = 0
previous_index = 0
datapoint = dataset[index]
current_scene = datapoint["scene"]
current_object = datapoint["object_type"]
# controller.reset(current_scene)
sum_t = 0
new_dataset = []
while index < len(dataset):
previous_index = index
while (
current_scene == datapoint["scene"]
and current_object == datapoint["object_type"]
):
index += 1
if index > len(dataset) - 1:
break
datapoint = dataset[index]
current_scene = datapoint["scene"]
current_object = datapoint["object_type"]
print("Scene '{}'...".format(current_scene))
sorted_datapoints = sorted(
dataset[previous_index:index], key=lambda dp: dp["shortest_path_length"]
)
third = int(len(sorted_datapoints) / 3.0)
for i, obj in enumerate(sorted_datapoints):
if i < third:
level = "easy"
elif i < 2 * third:
level = "medium"
else:
level = "hard"
sorted_datapoints[i]["difficulty"] = level
m = re.search("FloorPlan_([a-zA-Z\-]*)([0-9]+)_([0-9]+)", obj["scene"])
point_id = "{}_{}_{}_{}_{}".format(
m.group(1), m.group(2), m.group(3), obj["object_type"], i
)
sorted_datapoints[i]["id"] = point_id
sorted_datapoints[i]["difficulty"] = level
new_dataset = new_dataset + sorted_datapoints
sum_t += len(sorted_datapoints)
print("original len: {}, new len: {}".format(len(dataset), sum_t))
with open(output_path, "w") as fw:
json.dump(new_dataset, fw, indent=4)
@task
def remove_dataset_spaces(ctx, dataset_dir):
train = os.path.join(dataset_dir, "train.json")
test = os.path.join(dataset_dir, "val.json")
with open(train, "r") as f:
train_data = json.load(f)
with open(test, "r") as f:
test_data = json.load(f)
id_set = set()
for o in train_data:
o["id"] = o["id"].replace(" ", "")
id_set.add(o["id"])
print(sorted(id_set))
id_set = set()
for o in test_data:
o["id"] = o["id"].replace(" ", "")
id_set.add(o["id"])
print(sorted(id_set))
with open("train.json", "w") as fw:
json.dump(train_data, fw, indent=4, sort_keys=True)
with open("val.json", "w") as fw:
json.dump(test_data, fw, indent=4, sort_keys=True)
@task
def shortest_path_to_point(ctx, scene, x0, y0, z0, x1, y1, z1, editor_mode=False):
import ai2thor.util.metrics as metrics
import ai2thor.controller
controller = ai2thor.controller.Controller(
rotateStepDegrees=30,
visibilityDistance=1.0,
gridSize=0.25,
port=8200,
host="127.0.0.1",
local_build=local_build,
start_unity=False if editor_mode else True,
agentType="stochastic",
continuousMode=True,
continuous=False,
snapToGrid=False,
agentMode="locobot",
scene=scene,
width=300,
height=300,
continus=True,
)
evt = metrics.get_shortest_path_to_point(
controller, dict(x=x0, y=y0, z=z0), dict(x=x1, y=y1, z=z1)
)
print(evt.metadata["lastActionSuccess"])
print(evt.metadata["errorMessage"])
@task
def reachable_pos(ctx, scene, editor_mode=False, local_build=False):
import ai2thor.util.metrics as metrics
import ai2thor.controller
gridSize = 0.25
controller = ai2thor.controller.Controller(
rotateStepDegrees=30,
visibilityDistance=1.0,
gridSize=gridSize,
port=8200,
host="127.0.0.1",
local_build=local_build,
start_unity=False if editor_mode else True,
agentType="stochastic",
continuousMode=True,
continuous=False,
snapToGrid=False,
agentMode="locobot",
scene=scene,
width=300,
height=300,
continus=True,
)
print(
"constoller.last_action Agent Pos: {}".format(
controller.last_event.metadata["agent"]["position"]
)
)
evt = controller.step(action="GetReachablePositions", gridSize=gridSize)
print("After GetReachable AgentPos: {}".format(evt.metadata["agent"]["position"]))
print(evt.metadata["lastActionSuccess"])
print(evt.metadata["errorMessage"])
reachable_pos = evt.metadata["actionReturn"]
print(evt.metadata["actionReturn"])
evt = controller.step(
dict(
action="TeleportFull",
x=3.0,
y=reachable_pos[0]["y"],
z=-1.5,
rotation=dict(x=0, y=45.0, z=0),
horizon=0.0,
)
)
print("After teleport: {}".format(evt.metadata["agent"]["position"]))
@task
def get_physics_determinism(
ctx, scene="FloorPlan1_physics", agent_mode="arm", n=100, samples=100
):
import ai2thor.controller
import random
num_trials = n
width = 300
height = 300
fov = 100
def act(controller, actions, n):
for i in range(n):
action = random.choice(actions)
controller.step(dict(action=action))
controller = ai2thor.controller.Controller(
local_executable_path=None,
scene=scene,
gridSize=0.25,
width=width,
height=height,
agentMode=agent_mode,
fieldOfView=fov,
agentControllerType="mid-level",
server_class=ai2thor.fifo_server.FifoServer,
visibilityScheme="Distance",
)
from ai2thor.util.trials import trial_runner, ObjectPositionVarianceAverage
move_actions = ["MoveAhead", "MoveBack", "MoveLeft", "MoveRight"]
rotate_actions = ["RotateRight", "RotateLeft"]
look_actions = ["LookUp", "LookDown"]
all_actions = move_actions + rotate_actions + look_actions
sample_number = samples
action_tuples = [
("move", move_actions, sample_number),
("rotate", rotate_actions, sample_number),
("look", look_actions, sample_number),
("all", all_actions, sample_number),
]
for action_name, actions, n in action_tuples:
for controller, metric in trial_runner(
controller, num_trials, ObjectPositionVarianceAverage()
):
act(controller, actions, n)
print(
" actions: '{}', object_position_variance_average: {} ".format(
action_name, metric
)
)
@task
def generate_pypi_index(context):
s3 = boto3.resource("s3")
root_index = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN">
<HTML>
<BODY>
<a href="/ai2thor/index.html">/ai2thor/</a><br>
</BODY>
</HTML>
"""
s3.Object(ai2thor.build.PYPI_S3_BUCKET, "index.html").put(
Body=root_index, ACL="public-read", ContentType="text/html"
)
objects = list_objects_with_metadata(ai2thor.build.PYPI_S3_BUCKET)
links = []
for k, v in objects.items():
if k.split("/")[-1] != "index.html":
links.append('<a href="/%s">/%s</a><br>' % (k, k))
ai2thor_index = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN">
<HTML>
<BODY>
%s
</BODY>
</HTML>
""" % "\n".join(
links
)
s3.Object(ai2thor.build.PYPI_S3_BUCKET, "ai2thor/index.html").put(
Body=ai2thor_index, ACL="public-read", ContentType="text/html"
)
def ci_test_utf(branch, commit_id, base_dir):
logger.info(
"running Unity Test framework testRunner for %s %s %s"
% (branch, commit_id, base_dir)
)
results_path, results_logfile = test_utf(base_dir)
class_data = generate_pytest_utf(results_path)
test_path = "tmp/test_utf.py"
with open(test_path, "w") as f:
f.write("\n".join(class_data))
proc = subprocess.run(
"pytest %s" % test_path, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
result = dict(
success=proc.returncode == 0,
stdout=proc.stdout.decode("ascii"),
stderr=proc.stderr.decode("ascii"),
)
with open("tmp/test_utf_results.json", "w") as f:
f.write(json.dumps(result))
logger.info(
"finished Unity Test framework runner for %s %s"
% (branch, commit_id)
)
@task
def format(context):
format_py(context)
format_cs(context)
@task
def format_cs(context):
install_dotnet_format(context)
# the following message will get emitted, this can safely be ignored
# "Warnings were encountered while loading the workspace. Set the verbosity option to the 'diagnostic' level to log warnings"
subprocess.check_call(
".dotnet/dotnet tool run dotnet-format unity/AI2-THOR-Base.csproj -w -s",
shell=True,
)
@task
def install_dotnet_format(context, force=False):
install_dotnet(context)
base_dir = os.path.normpath(os.path.dirname(os.path.realpath(__file__)))
if not os.path.isfile(".config/dotnet-tools.json"):
command = os.path.join(base_dir, ".dotnet/dotnet") + " new tool-manifest"
subprocess.check_call(command, shell=True)
with open(".config/dotnet-tools.json") as f:
tools = json.loads(f.read())
# we may want to specify a version here in the future
if not force and "dotnet-format" in tools.get("tools", {}):
# dotnet-format already installed
return
command = os.path.join(base_dir, ".dotnet/dotnet") + " tool install dotnet-format"
subprocess.check_call(command, shell=True)
@task
def install_dotnet(context, force=False):
import requests
import stat
base_dir = os.path.normpath(os.path.dirname(os.path.realpath(__file__)))
if not force and os.path.isfile(os.path.join(base_dir, ".dotnet/dotnet")):
# dotnet already installed
return
# https://docs.microsoft.com/en-us/dotnet/core/tools/dotnet-install-script
res = requests.get("https://dot.net/v1/dotnet-install.sh")
res.raise_for_status()
target = os.path.join(base_dir, "dotnet-install.sh")
with open(target, "wb") as f:
f.write(res.content)
os.chmod(target, stat.S_IREAD | stat.S_IEXEC | stat.S_IWRITE)
env = os.environ.copy()
env["DOTNET_INSTALL_DIR"] = os.path.join(base_dir, ".dotnet")
subprocess.check_call(target, shell=True, env=env)
os.unlink(target)
@task
def format_py(context):
try:
import black
except ImportError:
raise Exception("black not installed - run pip install black")
subprocess.check_call(
"black -v -t py38 --exclude unity/ --exclude .git/ .", shell=True
)
@task
def install_unity_hub(context, target_dir=os.path.join(os.path.expanduser("~"), "local/bin")):
import stat
import requests
if not sys.platform.startswith("linux"):
raise Exception("Installation only support for Linux")
res = requests.get("https://public-cdn.cloud.unity3d.com/hub/prod/UnityHub.AppImage")
res.raise_for_status()
os.makedirs(target_dir, exist_ok=True)
target_path = os.path.join(target_dir, "UnityHub.AppImage")
tmp_path = target_path + ".tmp-" + str(os.getpid())
with open(tmp_path, "wb") as f:
f.write(res.content)
if os.path.isfile(target_path):
os.unlink(target_path)
os.rename(tmp_path, target_path)
os.chmod(target_path, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
print("Installed UnityHub at %s" % target_path)
@task
def install_unity_editor(context, version=None, changeset=None):
import yaml
import re
unity_hub_path = None
if sys.platform.startswith("linux"):
unity_hub_path = os.path.join(os.path.expanduser("~"), "local/bin/UnityHub.AppImage")
elif sys.platform.startswith("darwin"):
unity_hub_path = "/Applications/Unity\ Hub.app/Contents/MacOS/Unity\ Hub --"
else:
raise Exception("UnityHub CLI not supported")
if version is None:
with open("unity/ProjectSettings/ProjectVersion.txt") as pf:
project_version = yaml.load(pf.read(), Loader=yaml.FullLoader)
m = re.match(r'^([^\s]+)\s+\(([a-zAZ0-9]+)\)', project_version["m_EditorVersionWithRevision"])
assert m, "Could not extract version/changeset from %s" % project_version["m_EditorVersionWithRevision"]
version = m.group(1)
changeset = m.group(2)
command = "%s --headless install --version %s" % (unity_hub_path, version)
if changeset:
command += " --changeset %s" % changeset
platform_modules = dict(
linux=["mac-mono", "linux-il2cpp", "webgl"],
darwin=["mac-il2cpp", "linux-il2cpp", "linux-mono", "webgl"],
)
for m in platform_modules[sys.platform]:
command += " -m %s" % m
subprocess.check_call(command, shell=True)
@task
def generate_unity_alf(context):
# generates Unity License Acitivation file for use
# with manual activation https://docs.unity3d.com/Manual/ManualActivationGuide.html
alf_path = "Unity_v%s.alf" % _unity_version()
subprocess.run("%s -batchmode -createManualActivationFile" % _unity_path(), shell=True)
assert os.path.isfile(alf_path), "ALF not found at %s" % alf_path
print("ALF created at %s. Activate license at: https://license.unity3d.com/manual" % alf_path)
@task
def activate_unity_license(context, ulf_path):
assert os.path.isfile(ulf_path), "License file '%s' not found" % ulf_path
subprocess.run('%s -batchmode -manualLicenseFile "%s"' % (_unity_path(), ulf_path), shell=True)
def test_utf(base_dir=None):
"""
Generates a module named ai2thor/tests/test_utf.py with test_XYZ style methods
that include failures (if any) extracted from the xml output
of the Unity Test Runner
"""
if base_dir is None:
base_dir = os.getcwd()
project_path = os.path.join(base_dir, "unity")
commit_id = git_commit_id()
test_results_path = os.path.join(project_path, "utf_testResults-%s.xml" % commit_id)
logfile_path = os.path.join(base_dir, "thor-testResults-%s.log" % commit_id)
command = (
"%s -runTests -testResults %s -logFile %s -testPlatform PlayMode -projectpath %s "
% (_unity_path(), test_results_path, logfile_path, project_path)
)
subprocess.call(command, shell=True, cwd=base_dir)
return test_results_path, logfile_path
def generate_pytest_utf(test_results_path):
import xml.etree.ElementTree as ET
with open(test_results_path) as f:
root = ET.fromstring(f.read())
from collections import defaultdict
class_tests = defaultdict(list)
for test_case in root.findall(".//test-case"):
# print(test_case.attrib['methodname'])
class_tests[test_case.attrib["classname"]].append(test_case)
class_data = []
class_data.append(
f"""
# GENERATED BY tasks.generate_pytest_utf - DO NOT EDIT/COMMIT
import pytest
import json
import os
def test_testresults_exist():
test_results_path = "{test_results_path}"
assert os.path.isfile("{test_results_path}"), "TestResults at: {test_results_path} do not exist"
"""
)
for class_name, test_cases in class_tests.items():
test_records = []
for test_case in test_cases:
methodname = test_case.attrib["methodname"]
if test_case.attrib["result"] == "Failed":
fail_message = test_case.find("failure/message")
stack_trace = test_case.find("failure/stack-trace")
message = json.dumps(fail_message.text + " " + stack_trace.text)
test_data = f"""
def test_{methodname}(self):
pytest.fail(json.loads(r\"\"\"
{message}
\"\"\"
))
"""
else:
test_data = f"""
def test_{methodname}(self):
pass
"""
test_records.append(test_data)
test_record_data = " pass"
if test_records:
test_record_data = "\n".join(test_records)
encoded_class_name = re.sub(
r"[^a-zA-Z0-9_]", "_", re.sub("_", "__", class_name)
)
class_data.append(
f"""
class {encoded_class_name}:
{test_record_data}
"""
)
return class_data
|
Chatcast.py | import socket, threading
def oku(bruh, nigga):
soket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
soket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
while True:
try:
soket.bind((bruh, nigga))
except:
continue
break
print("\nSuccessfully connected the room!\n")
while True:
try:
mesaj, kimlik = soket.recvfrom(1024)
print(mesaj.decode())
except:
continue
soket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
soket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
soket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
print("Chatcast v1.0.0")
print("2021 (c) Ruzgar Sepci. All rights reserved.")
adres = input("\nYour IP address in the network you want to connect:\n>>>")
port = int(input("\nThe port you want to connect:\n>>>"))
rumuz = input("\nYour nickname:\n>>>")
hosgeldin = "[SYSTEM] " + rumuz + " joined the room!"
threading.Thread(target=oku, args=(adres, port)).start()
while True:
try:
soket.bind((adres, 0))
except:
continue
break
while True:
try:
soket.sendto(hosgeldin, ("<broadcast>", port))
except:
continue
break
while True:
mesaj = input("")
mesaj = "<" + rumuz + ">" + mesaj
mesaj = mesaj.encode()
while True:
try:
soket.sendto(mesaj, ("<broadcast>", port))
except:
continue
break
|
test_imperative_thread_local_has_grad.py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle
import time
import paddle.nn as nn
import numpy as np
import threading
class SimpleNet(nn.Layer):
def __init__(self, in_dim, out_dim):
super(SimpleNet, self).__init__()
self.fc = nn.Linear(in_dim, out_dim)
def forward(self, x):
return self.fc(x)
class TestCases(unittest.TestCase):
@paddle.no_grad()
def thread_1_main(self):
time.sleep(8)
def thread_2_main(self):
in_dim = 10
out_dim = 3
net = SimpleNet(in_dim, out_dim)
for _ in range(1000):
x = paddle.to_tensor(np.random.rand(32, in_dim).astype('float32'))
self.assertTrue(x.stop_gradient)
x = net(x)
self.assertFalse(x.stop_gradient)
def test_main(self):
threads = []
for _ in range(10):
threads.append(threading.Thread(target=self.thread_1_main))
threads.append(threading.Thread(target=self.thread_2_main))
for t in threads:
t.start()
for t in threads:
t.join()
if __name__ == "__main__":
unittest.main()
|
visualizer.py | import math
import numpy as np
import threading
import open3d as o3d
from open3d.visualization import gui
from open3d.visualization import rendering
from collections import deque
from .boundingbox import *
from .colormap import *
from .labellut import *
import time
class Model:
"""Attributes, data, and methods of visulization models."""
bounding_box_prefix = "Bounding Boxes/"
class BoundingBoxData:
def __init__(self, name, boxes):
self.name = name
self.boxes = boxes
def __init__(self):
# Note: the tpointcloud cannot store the actual data arrays, because
# the tpointcloud requires specific names for some arrays (e.g. "points",
# "colors"). So the tpointcloud exists for rendering and initially only
# contains the "points" array.
self.tclouds = {} # name -> tpointcloud
self.data_names = [] # the order data will be displayed / animated
self.bounding_box_data = [] # [BoundingBoxData]
self._data = {} # name -> {attr_name -> numpyarray}
self._known_attrs = {} # name -> set(attrs)
self._attr2minmax = {} # only access in _get_attr_minmax()
self._attr_rename = {"label": "labels", "feat": "feature"}
def _init_data(self, name):
tcloud = o3d.t.geometry.PointCloud(o3d.core.Dtype.Float32,
o3d.core.Device("CPU:0"))
self.tclouds[name] = tcloud
self._data[name] = {}
self.data_names.append(name)
def is_loaded(self, name):
if name in self._data:
return len(self._data[name]) > 0
else:
# if the name isn't in the data, presumably it is loaded
# (for instance, if this is a bounding box).
return True
def load(self, name, fail_if_no_space=False):
assert (False) # pure virtual
def unload(self, name):
assert (False) # pure virtual
def create_point_cloud(self, data):
assert ("name" in data) # name is a required field
assert ("points" in data) # 'points' is a required field
name = data["name"]
pts = self._convert_to_numpy(data["points"])
tcloud = o3d.t.geometry.PointCloud(o3d.core.Dtype.Float32,
o3d.core.Device("CPU:0"))
known_attrs = set()
if pts.shape[1] >= 4:
# We can't use inplace Tensor creation (e.g. from_numpy())
# because the resulting arrays won't be contiguous. However,
# TensorList can be inplace.
xyz = pts[:, [0, 1, 2]]
tcloud.point["points"] = Visualizer._make_tcloud_array(xyz,
copy=True)
else:
tcloud.point["points"] = Visualizer._make_tcloud_array(pts)
self.tclouds[name] = tcloud
# Add scalar attributes and vector3 attributes
attrs = {}
for k, v in data.items():
attr = self._convert_to_numpy(v)
if attr is None:
continue
attr_name = k
if attr_name == "point":
continue
new_name = self._attr_rename.get(attr_name)
if new_name is not None:
attr_name = new_name
if len(attr.shape) == 1 or len(attr.shape) == 2:
attrs[attr_name] = attr
known_attrs.add(attr_name)
self._data[name] = attrs
self._known_attrs[name] = known_attrs
def _convert_to_numpy(self, ary):
if isinstance(ary, list):
try:
return np.array(ary, dtype='float32')
except TypeError:
return None
elif isinstance(ary, np.ndarray):
if len(ary.shape) == 2 and ary.shape[0] == 1:
ary = ary[0] # "1D" array as 2D: [[1, 2, 3,...]]
if ary.dtype.name.startswith('int'):
return np.array(ary, dtype='float32')
else:
return ary
try:
import tensorflow as tf
if isinstance(ary, tf.Tensor):
return self._convert_to_numpy(ary.numpy())
except:
pass
try:
import torch
if isinstance(ary, torch.Tensor):
return self._convert_to_numpy(ary.detach().cpu().numpy())
except:
pass
return None
def get_attr(self, name, attr_name):
if name in self._data:
attrs = self._data[name]
if attr_name in attrs:
return attrs[attr_name]
return None
def get_attr_shape(self, name, attr_name):
attr = self.get_attr(name, attr_name)
if attr is not None:
return attr.shape
return []
def get_attr_minmax(self, attr_name, channel):
attr_key_base = attr_name + ":" + str(channel)
attr_min = 1e30
attr_max = -1e30
for name in self._data.keys():
key = name + ":" + attr_key_base
if key not in self._attr2minmax:
attr = self.get_attr(name, attr_name)
if attr is None: # clouds may not have all the same attributes
continue
if len(attr.shape) > 1:
attr = attr[:, channel]
self._attr2minmax[key] = (attr.min(), attr.max())
amin, amax = self._attr2minmax[key]
attr_min = min(attr_min, amin)
attr_max = max(attr_max, amax)
if attr_min > attr_max:
return (0.0, 0.0)
return (attr_min, attr_max)
def get_available_attrs(self, names):
attr_names = None
for n in names:
known = self._known_attrs.get(n)
if known is not None:
if attr_names is None:
attr_names = known
else:
attr_names = attr_names.intersection(known)
if attr_names is None:
return []
return sorted(attr_names)
def calc_bounds_for(self, name):
if name in self.tclouds and not self.tclouds[name].is_empty():
tcloud = self.tclouds[name]
# Ideally would simply return tcloud.compute_aabb() here, but it can
# be very slow on macOS with clang 11.0
pts = tcloud.point["points"].as_tensor().numpy()
min_val = (pts[:, 0].min(), pts[:, 1].min(), pts[:, 2].min())
max_val = (pts[:, 0].max(), pts[:, 1].max(), pts[:, 2].max())
return [min_val, max_val]
else:
return [(0.0, 0.0, 0.0), (0.0, 0.0, 0.0)]
class DataModel(Model):
"""Class for data i/o and storage."""
def __init__(self, userdata):
super().__init__()
# We could just create the TPointCloud here, but that would cause the UI
# to block. If we do it on load then the loading dialog will display.
self._name2srcdata = {}
for d in userdata:
name = d["name"]
while name in self._data: # ensure each name is unique
name = name + "_"
self._init_data(name)
self._name2srcdata[name] = d
def load(self, name, fail_if_no_space=False):
if self.is_loaded(name):
return
self.create_point_cloud(self._name2srcdata[name])
def unload(self, name):
pass
class DatasetModel(Model):
def __init__(self, dataset, split, indices):
super().__init__()
self._dataset = None
self._name2datasetidx = {}
self._memory_limit = 8192 * 1024 * 1024 # memory limit in bytes
self._current_memory_usage = 0
self._cached_data = deque()
self._dataset = dataset.get_split(split)
if len(self._dataset) > 0:
if indices is None:
indices = range(0, len(self._dataset))
# Some results from get_split() (like "training") are randomized.
# Sort, so that the same index always returns the same piece of data.
path2idx = {}
for i in range(0, len(self._dataset.path_list)):
path2idx[self._dataset.path_list[i]] = i
real_indices = [path2idx[p] for p in sorted(path2idx.keys())]
indices = [real_indices[idx] for idx in indices]
# SemanticKITTI names its items <sequence#>_<timeslice#>,
# "mm_nnnnnn". We'd like to use the hierarchical feature of the tree
# to separate the sequences. We cannot change the name in the dataset
# because this format is used to report algorithm results, so do it
# here.
underscore_to_slash = False
if dataset.__class__.__name__ == "SemanticKITTI":
underscore_to_slash = True
for i in indices:
info = self._dataset.get_attr(i)
name = info["name"]
if underscore_to_slash:
name = name.replace("_", "/")
while name in self._data: # ensure each name is unique
name = name + "_"
self._init_data(name)
self._name2datasetidx[name] = i
if dataset.__class__.__name__ in [
"Toronto3D", "Semantic3D", "S3DIS"
]:
self._attr_rename["feat"] = "colors"
self._attr_rename["feature"] = "colors"
else:
print("[ERROR] Dataset split has no data")
def is_loaded(self, name):
loaded = super().is_loaded(name)
if loaded and name in self._cached_data:
# make this point cloud the most recently used
self._cached_data.remove(name)
self._cached_data.append(name)
return loaded
def load(self, name, fail_if_no_space=False):
assert (name in self._name2datasetidx)
if self.is_loaded(name):
return True
idx = self._name2datasetidx[name]
data = self._dataset.get_data(idx)
data["name"] = name
data["points"] = data["point"]
if 'bounding_boxes' in data:
self.bounding_box_data.append(
Model.BoundingBoxData(name, data['bounding_boxes']))
self.create_point_cloud(data)
size = self._calc_pointcloud_size(self._data[name], self.tclouds[name])
if size + self._current_memory_usage > self._memory_limit:
if fail_if_no_space:
self.unload(name)
return False
else:
# Remove oldest from cache
remove_name = self._cached_data.popleft()
remove_size = self._calc_pointcloud_size(
self._data[remove_name], self.tclouds[remove_name])
self._current_memory_usage -= remove_size
self.unload(remove_name)
# Add new point cloud to cache
self._cached_data.append(name)
self._current_memory_usage += size
return True
else:
self._current_memory_usage += size
self._cached_data.append(name)
return True
def _calc_pointcloud_size(self, raw_data, pcloud):
pcloud_size = 0
for (attr, arr) in raw_data.items():
pcloud_size += arr.size * 4
# Point cloud consumes 64 bytes of per point of GPU memory
pcloud_size += pcloud.point["points"].size * 64
return pcloud_size
def unload(self, name):
# Only unload if this was loadable; we might have an in-memory,
# user-specified data created directly through create_point_cloud().
if name in self._name2datasetidx:
tcloud = o3d.t.geometry.PointCloud(o3d.core.Dtype.Float32,
o3d.core.Device("CPU:0"))
self.tclouds[name] = tcloud
self._data[name] = {}
bbox_name = Model.bounding_box_prefix + name
for i in range(0, len(self.bounding_box_data)):
if self.bounding_box_data[i].name == bbox_name:
self.bounding_box_data.pop(i)
break
class Visualizer:
"""Visualizer for Dataset objects and custom point clouds"""
class LabelLUTEdit:
def __init__(self):
self.widget = gui.TreeView()
self._on_changed = None # takes no args, returns no value
self.clear()
def clear(self):
self.widget.clear()
self._label2color = {}
def is_empty(self):
return len(self._label2color) == 0
def get_colors(self):
return [
self._label2color[label]
for label in sorted(self._label2color.keys())
]
def set_on_changed(self, callback): # takes no args, no return value
self._on_changed = callback
def set_labels(self, labellut):
self.widget.clear()
root = self.widget.get_root_item()
for key in sorted(labellut.labels.keys()):
lbl = labellut.labels[key]
color = lbl.color
if len(color) == 3:
color += [1.0]
self._label2color[key] = color
color = gui.Color(lbl.color[0], lbl.color[1], lbl.color[2])
cell = gui.LUTTreeCell(
str(key) + ": " + lbl.name, True, color, None, None)
cell.checkbox.set_on_checked(
self._make_on_checked(key, self._on_label_checked))
cell.color_edit.set_on_value_changed(
self._make_on_color_changed(key,
self._on_label_color_changed))
self.widget.add_item(root, cell)
def _make_on_color_changed(self, label, member_func):
def on_changed(color):
member_func(label, color)
return on_changed
def _on_label_color_changed(self, label, gui_color):
self._label2color[label] = [
gui_color.red, gui_color.green, gui_color.blue,
self._label2color[label][3]
]
if self._on_changed is not None:
self._on_changed()
def _make_on_checked(self, label, member_func):
def on_checked(checked):
member_func(label, checked)
return on_checked
def _on_label_checked(self, label, checked):
if checked:
alpha = 1.0
else:
alpha = 0.0
color = self._label2color[label]
self._label2color[label] = [color[0], color[1], color[2], alpha]
if self._on_changed is not None:
self._on_changed()
class ColormapEdit:
def __init__(self, window, em):
self.colormap = None
self.widget = gui.Vert()
self._window = window
self._min_value = 0.0
self._max_value = 1.0
self._on_changed = None # takes no args, no return value
self._itemid2idx = {}
self._min_label = gui.Label("")
self._max_label = gui.Label("")
grid = gui.VGrid(2)
grid.add_child(gui.Label("Range (min):"))
grid.add_child(self._min_label)
grid.add_child(gui.Label("Range (max):"))
grid.add_child(self._max_label)
self.widget.add_child(grid)
self.widget.add_fixed(0.5 * em)
self.widget.add_child(gui.Label("Colormap"))
self._edit = gui.TreeView()
self._edit.set_on_selection_changed(self._on_selection_changed)
self.widget.add_child(self._edit)
self._delete = gui.Button("Delete")
self._delete.horizontal_padding_em = 0.5
self._delete.vertical_padding_em = 0
self._delete.set_on_clicked(self._on_delete)
self._add = gui.Button("Add")
self._add.horizontal_padding_em = 0.5
self._add.vertical_padding_em = 0
self._add.set_on_clicked(self._on_add)
h = gui.Horiz()
h.add_stretch()
h.add_child(self._delete)
h.add_fixed(0.25 * em)
h.add_child(self._add)
h.add_stretch()
self.widget.add_fixed(0.5 * em)
self.widget.add_child(h)
self.widget.add_fixed(0.5 * em)
def set_on_changed(self, callback): # takes no args, no return value
self._on_changed = callback
def update(self, colormap, min_val, max_val):
self.colormap = colormap
self._min_value = min_val
self._max_value = max_val
self._min_label.text = str(min_val)
self._max_label.text = str(max_val)
if self._min_value >= self._max_value:
self._max_value = self._min_value + 1.0
self._edit.clear()
self._itemid2idx = {}
root_id = self._edit.get_root_item()
for i in range(0, len(self.colormap.points)):
p = self.colormap.points[i]
color = gui.Color(p.color[0], p.color[1], p.color[2])
val = min_val + p.value * (max_val - min_val)
cell = gui.ColormapTreeCell(val, color, None, None)
cell.color_edit.set_on_value_changed(
self._make_on_color_changed(i, self._on_color_changed))
cell.number_edit.set_on_value_changed(
self._make_on_value_changed(i, self._on_value_changed))
item_id = self._edit.add_item(root_id, cell)
self._itemid2idx[item_id] = i
self._update_buttons_enabled()
def _make_on_color_changed(self, idx, member_func):
def on_changed(color):
member_func(idx, color)
return on_changed
def _on_color_changed(self, idx, gui_color):
self.colormap.points[idx].color = [
gui_color.red, gui_color.green, gui_color.blue
]
if self._on_changed is not None:
self._on_changed()
def _make_on_value_changed(self, idx, member_func):
def on_changed(value):
member_func(idx, value)
return on_changed
def _on_value_changed(self, idx, value):
value = (value - self._min_value) / (self._max_value -
self._min_value)
needs_update = False
value = min(1.0, max(0.0, value))
if ((idx > 0 and value < self.colormap.points[idx - 1].value) or
(idx < len(self.colormap.points) - 1 and
value > self.colormap.points[idx + 1].value)):
self.colormap.points[idx].value = value
o = self.colormap.points[idx]
self.colormap.points.sort(key=lambda cmap_pt: cmap_pt.value)
for i in range(0, len(self.colormap.points)):
if self.colormap.points[i] is o:
idx = i
break
needs_update = True
if idx > 0 and value == self.colormap.points[idx - 1].value:
if idx < len(self.colormap.points):
upper = self.colormap.points[idx + 1].value
else:
upper = 1.0
value = value + 0.5 * (upper - value)
needs_update = True
if idx < len(self.colormap.points
) - 1 and value == self.colormap.points[idx + 1].value:
if idx > 0:
lower = self.colormap.points[idx - 1].value
else:
lower = 0.0
value = lower + 0.5 * (value - lower)
needs_update = True
self.colormap.points[idx].value = value
if needs_update:
self._update_later()
if self._on_changed is not None:
self._on_changed()
def _on_selection_changed(self, item_id):
self._update_buttons_enabled()
def _on_delete(self):
if len(self.colormap.points) > 2:
idx = self._itemid2idx[self._edit.selected_item]
self.colormap.points = self.colormap.points[:
idx] + self.colormap.points[
idx + 1:]
del self._itemid2idx[self._edit.selected_item]
self._update_later()
if self._on_changed is not None:
self._on_changed()
def _on_add(self):
if self._edit.selected_item in self._itemid2idx: # maybe no selection
idx = self._itemid2idx[self._edit.selected_item]
if idx < len(self.colormap.points) - 1:
lower = self.colormap.points[idx]
upper = self.colormap.points[idx + 1]
else:
lower = self.colormap.points[len(self.colormap.points) - 2]
upper = self.colormap.points[len(self.colormap.points) - 1]
add_idx = min(idx + 1, len(self.colormap.points) - 1)
new_value = lower.value + 0.5 * (upper.value - lower.value)
new_color = [
0.5 * lower.color[0] + 0.5 * upper.color[0],
0.5 * lower.color[1] + 0.5 * upper.color[1],
0.5 * lower.color[2] + 0.5 * upper.color[2]
]
new_point = Colormap.Point(new_value, new_color)
self.colormap.points = self.colormap.points[:add_idx] + [
new_point
] + self.colormap.points[add_idx:]
self._update_later()
if self._on_changed is not None:
self._on_changed()
def _update_buttons_enabled(self):
if self._edit.selected_item in self._itemid2idx:
self._delete.enabled = len(self.colormap.points) > 2
self._add.enabled = True
else:
self._delete.enabled = False
self._add.enabled = False
def _update_later(self):
def update():
self.update(self.colormap, self._min_value, self._max_value)
self._window.post_redraw() # need to manually request redraw
gui.Application.instance.post_to_main_thread(self._window, update)
class ProgressDialog:
def __init__(self, title, window, n_items):
self._window = window
self._n_items = n_items
em = window.theme.font_size
self.dialog = gui.Dialog(title)
self._label = gui.Label(title + " ")
self._layout = gui.Vert(0, gui.Margins(em, em, em, em))
self.dialog.add_child(self._layout)
self._layout.add_child(self._label)
self._layout.add_fixed(0.5 * em)
self._progress = gui.ProgressBar()
self._progress.value = 0.0
self._layout.add_child(self._progress)
def set_text(self, text):
self._label.text = text + " "
def post_update(self, text=None):
if text is None:
gui.Application.instance.post_to_main_thread(
self._window, self.update)
else:
def update_with_text():
self.update()
self._label.text = text
gui.Application.instance.post_to_main_thread(
self._window, update_with_text)
def update(self):
value = min(1.0, self._progress.value + 1.0 / self._n_items)
self._progress.value = value
SOLID_NAME = "Solid Color"
LABELS_NAME = "Label Colormap"
RAINBOW_NAME = "Colormap (Rainbow)"
GREYSCALE_NAME = "Colormap (Greyscale)"
COLOR_NAME = "RGB"
X_ATTR_NAME = "x position"
Y_ATTR_NAME = "y position"
Z_ATTR_NAME = "z position"
def __init__(self):
self._objects = None
self._name2treenode = {}
self._name2treeid = {}
self._treeid2name = {}
self._attrname2lut = {}
self._colormaps = {}
self._shadername2panelidx = {}
self._gradient = rendering.Gradient()
self._scalar_min = 0.0
self._scalar_max = 1.0
self._animation_frames = []
self._last_animation_time = time.time()
self._animation_delay_secs = 0.100
self._consolidate_bounding_boxes = False
self._dont_update_geometry = False
def _init_dataset(self, dataset, split, indices):
self._objects = DatasetModel(dataset, split, indices)
def _init_data(self, data):
self._objects = DataModel(data)
def _init_user_interface(self, title, width, height):
self.window = gui.Application.instance.create_window(
title, width, height)
self.window.set_on_layout(self._on_layout)
em = self.window.theme.font_size
self._3d = gui.SceneWidget()
self._3d.enable_scene_caching(True) # makes UI _much_ more responsive
self._3d.scene = rendering.Open3DScene(self.window.renderer)
self.window.add_child(self._3d)
self._panel = gui.Vert()
self.window.add_child(self._panel)
indented_margins = gui.Margins(em, 0, em, 0)
# View controls
ctrl = gui.CollapsableVert("Mouse Controls", 0, indented_margins)
arcball = gui.Button("Arcball")
arcball.set_on_clicked(self._on_arcball_mode)
arcball.horizontal_padding_em = 0.5
arcball.vertical_padding_em = 0
fly = gui.Button("Fly")
fly.set_on_clicked(self._on_fly_mode)
fly.horizontal_padding_em = 0.5
fly.vertical_padding_em = 0
reset = gui.Button("Re-center")
reset.set_on_clicked(self._on_reset_camera)
reset.horizontal_padding_em = 0.5
reset.vertical_padding_em = 0
h = gui.Horiz(0.25 * em)
h.add_stretch()
h.add_child(arcball)
h.add_child(fly)
h.add_fixed(em)
h.add_child(reset)
h.add_stretch()
ctrl.add_child(h)
ctrl.add_fixed(em)
self._panel.add_child(ctrl)
# Dataset
model = gui.CollapsableVert("Dataset", 0, indented_margins)
vgrid = gui.VGrid(2, 0.25 * em)
model.add_child(vgrid)
model.add_fixed(0.5 * em)
bgcolor = gui.ColorEdit()
bgcolor.color_value = gui.Color(1, 1, 1)
self._on_bgcolor_changed(bgcolor.color_value)
bgcolor.set_on_value_changed(self._on_bgcolor_changed)
vgrid.add_child(gui.Label("BG Color"))
vgrid.add_child(bgcolor)
view_tab = gui.TabControl()
view_tab.set_on_selected_tab_changed(self._on_display_tab_changed)
model.add_child(view_tab)
# ... model list
self._dataset = gui.TreeView()
self._dataset.set_on_selection_changed(
self._on_dataset_selection_changed)
view_tab.add_tab("List", self._dataset)
# ... animation slider
v = gui.Vert()
view_tab.add_tab("Animation", v)
v.add_fixed(0.25 * em)
grid = gui.VGrid(2)
v.add_child(grid)
self._slider = gui.Slider(gui.Slider.INT)
self._slider.set_limits(0, len(self._objects.data_names))
self._slider.set_on_value_changed(self._on_animation_slider_changed)
grid.add_child(gui.Label("Index"))
grid.add_child(self._slider)
self._slider_current = gui.Label("")
grid.add_child(gui.Label("Showing"))
grid.add_child(self._slider_current)
v.add_fixed(em)
self._play = gui.Button("Play")
self._play.horizontal_padding_em = 0.5
self._play.vertical_padding_em = 0
self._play.set_on_clicked(self._on_start_animation)
h = gui.Horiz()
h.add_stretch()
h.add_child(self._play)
h.add_stretch()
v.add_child(h)
self._panel.add_child(model)
# Coloring
properties = gui.CollapsableVert("Properties", 0, indented_margins)
grid = gui.VGrid(2, 0.25 * em)
# ... data source
self._datasource_combobox = gui.Combobox()
self._datasource_combobox.set_on_selection_changed(
self._on_datasource_changed)
self._colormap_channel = gui.Combobox()
self._colormap_channel.add_item("0")
self._colormap_channel.set_on_selection_changed(
self._on_channel_changed)
h = gui.Horiz()
h.add_child(self._datasource_combobox)
h.add_fixed(em)
h.add_child(gui.Label("Index"))
h.add_child(self._colormap_channel)
grid.add_child(gui.Label("Data"))
grid.add_child(h)
# ... shader
self._shader = gui.Combobox()
self._shader.add_item(self.SOLID_NAME)
self._shader.add_item(self.LABELS_NAME)
self._shader.add_item(self.RAINBOW_NAME)
self._shader.add_item(self.GREYSCALE_NAME)
self._shader.add_item(self.COLOR_NAME)
self._colormaps[self.RAINBOW_NAME] = Colormap.make_rainbow()
self._colormaps[self.GREYSCALE_NAME] = Colormap.make_greyscale()
self._shader.selected_index = 0
self._shader.set_on_selection_changed(self._on_shader_changed)
grid.add_child(gui.Label("Shader"))
grid.add_child(self._shader)
properties.add_child(grid)
# ... shader panels
self._shader_panels = gui.StackedWidget()
panel_idx = 0
# ... sub-panel: single color
self._color_panel = gui.Vert()
self._shader_panels.add_child(self._color_panel)
self._shadername2panelidx[self.SOLID_NAME] = panel_idx
panel_idx += 1
self._color = gui.ColorEdit()
self._color.color_value = gui.Color(0.5, 0.5, 0.5)
self._color.set_on_value_changed(self._on_shader_color_changed)
h = gui.Horiz()
h.add_child(gui.Label("Color"))
h.add_child(self._color)
self._color_panel.add_child(h)
# ... sub-panel: labels
self._labels_panel = gui.Vert()
self._shader_panels.add_child(self._labels_panel)
self._shadername2panelidx[self.LABELS_NAME] = panel_idx
panel_idx += 1
self._label_edit = self.LabelLUTEdit()
self._label_edit.set_on_changed(self._on_labels_changed)
self._labels_panel.add_child(gui.Label("Labels"))
self._labels_panel.add_child(self._label_edit.widget)
# ... sub-panel: colormap
self._colormap_panel = gui.Vert()
self._shader_panels.add_child(self._colormap_panel)
self._shadername2panelidx[self.RAINBOW_NAME] = panel_idx
self._shadername2panelidx[self.GREYSCALE_NAME] = panel_idx
panel_idx += 1
self._colormap_edit = self.ColormapEdit(self.window, em)
self._colormap_edit.set_on_changed(self._on_colormap_changed)
self._colormap_panel.add_child(self._colormap_edit.widget)
# ... sub-panel: RGB
self._rgb_panel = gui.Vert()
self._shader_panels.add_child(self._rgb_panel)
self._shadername2panelidx[self.COLOR_NAME] = panel_idx
panel_idx += 1
self._rgb_combo = gui.Combobox()
self._rgb_combo.add_item("255")
self._rgb_combo.add_item("1.0")
self._rgb_combo.set_on_selection_changed(self._on_rgb_multiplier)
h = gui.Horiz(0.5 * em)
h.add_child(gui.Label("Max value"))
h.add_child(self._rgb_combo)
self._rgb_panel.add_child(h)
properties.add_fixed(em)
properties.add_child(self._shader_panels)
self._panel.add_child(properties)
# Populate tree, etc.
for name in self._objects.data_names:
self._add_tree_name(name)
self._update_datasource_combobox()
def set_lut(self, attr_name, lut):
"""Sets the LUT for a specific attribute
Args:
attr_name: Attribute name as string.
lut: A LabelLUT object.
"""
self._attrname2lut[attr_name] = lut
def setup_camera(self):
selected_names = self._get_selected_names()
selected_bounds = [
self._objects.calc_bounds_for(n) for n in selected_names
]
min_val = [1e30, 1e30, 1e30]
max_val = [-1e30, -1e30, -1e30]
for b in selected_bounds:
for i in range(0, 3):
min_val[i] = min(min_val[i], b[0][i])
max_val[i] = max(max_val[i], b[1][i])
bounds = o3d.geometry.AxisAlignedBoundingBox(min_val, max_val)
self._3d.setup_camera(60, bounds, bounds.get_center())
def show_geometries_under(self, name, show):
prefix = name
for (n, node) in self._name2treenode.items():
if n.startswith(prefix):
self._3d.scene.show_geometry(n, show)
node.checkbox.checked = show
self._3d.force_redraw()
def _add_tree_name(self, name, is_geometry=True):
names = name.split("/")
parent = self._dataset.get_root_item()
for i in range(0, len(names) - 1):
n = "/".join(names[:i + 1]) + "/"
if n in self._name2treeid:
parent = self._name2treeid[n]
else:
def on_parent_checked(checked):
self.show_geometries_under(n, checked)
cell = gui.CheckableTextTreeCell(n, True, on_parent_checked)
parent = self._dataset.add_item(parent, cell)
self._name2treenode[n] = cell
self._name2treeid[n] = parent
self._treeid2name[parent] = n
def on_checked(checked):
self._3d.scene.show_geometry(name, checked)
if self._is_tree_name_geometry(name):
# available attrs could change
self._update_datasource_combobox()
self._3d.force_redraw()
cell = gui.CheckableTextTreeCell(names[-1], True, on_checked)
if is_geometry:
cell.label.text_color = gui.Color(1.0, 0.0, 0.0, 1.0)
node = self._dataset.add_item(parent, cell)
self._name2treenode[name] = cell
self._treeid2name[node] = name
self._slider.set_limits(0, len(self._objects.data_names) - 1)
if len(self._objects.data_names) == 1:
self._slider_current.text = name
def _load_geometry(self, name, ui_done_callback):
progress_dlg = Visualizer.ProgressDialog("Loading...", self.window, 2)
progress_dlg.set_text("Loading " + name + "...")
def load_thread():
result = self._objects.load(name)
progress_dlg.post_update("Loading " + name + "...")
gui.Application.instance.post_to_main_thread(
self.window, ui_done_callback)
gui.Application.instance.post_to_main_thread(
self.window, self.window.close_dialog)
self.window.show_dialog(progress_dlg.dialog)
threading.Thread(target=load_thread).start()
def _load_geometries(self, names, ui_done_callback):
# Progress has: len(names) items + ui_done_callback
progress_dlg = Visualizer.ProgressDialog("Loading...", self.window,
len(names) + 1)
progress_dlg.set_text("Loading " + names[0] + "...")
def load_thread():
for i in range(0, len(names)):
result = self._objects.load(names[i], True)
if i + 1 < len(names):
text = "Loading " + names[i + 1] + "..."
else:
text = "Creating GPU objects..."
progress_dlg.post_update(text)
if result:
self._name2treenode[names[i]].label.text_color = gui.Color(
0.0, 1.0, 0.0, 1.0)
else:
break
gui.Application.instance.post_to_main_thread(
self.window, ui_done_callback)
gui.Application.instance.post_to_main_thread(
self.window, self.window.close_dialog)
self.window.show_dialog(progress_dlg.dialog)
threading.Thread(target=load_thread).start()
def _update_geometry(self, check_unloaded=False):
if check_unloaded:
for name in self._objects.data_names:
if not self._objects.is_loaded(name):
self._3d.scene.remove_geometry(name)
material = self._get_material()
for n, tcloud in self._objects.tclouds.items():
self._update_point_cloud(n, tcloud, material)
if not tcloud.is_empty():
self._name2treenode[n].label.text_color = gui.Color(
0.0, 1.0, 0.0, 1.0)
if self._3d.scene.has_geometry(n):
self._3d.scene.modify_geometry_material(n, material)
else:
self._name2treenode[n].label.text_color = gui.Color(
1.0, 0.0, 0.0, 1.0)
self._name2treenode[n].checkbox.checked = False
self._3d.force_redraw()
def _update_point_cloud(self, name, tcloud, material):
if self._dont_update_geometry:
return
if tcloud.is_empty():
return
attr_name = self._datasource_combobox.selected_text
attr = None
flag = 0
attr = self._objects.get_attr(name, attr_name)
# Update scalar values
if attr is not None:
if len(attr.shape) == 1:
scalar = attr
else:
channel = max(0, self._colormap_channel.selected_index)
scalar = attr[:, channel]
else:
shape = [len(tcloud.point["points"].as_tensor().numpy())]
scalar = np.zeros(shape, dtype='float32')
tcloud.point["__visualization_scalar"] = Visualizer._make_tcloud_array(
scalar)
flag |= rendering.Scene.UPDATE_UV0_FLAG
# Update RGB values
if attr is not None and (len(attr.shape) == 2 and attr.shape[1] >= 3):
max_val = float(self._rgb_combo.selected_text)
if max_val <= 0:
max_val = 255.0
colors = attr[:, [0, 1, 2]] * (1.0 / max_val)
tcloud.point["colors"] = Visualizer._make_tcloud_array(colors)
flag |= rendering.Scene.UPDATE_COLORS_FLAG
# Update geometry
if self._3d.scene.scene.has_geometry(name):
self._3d.scene.scene.update_geometry(name, tcloud, flag)
else:
self._3d.scene.add_geometry(name, tcloud, material)
node = self._name2treenode[name]
if node is not None:
self._3d.scene.show_geometry(name, node.checkbox.checked)
def _get_material(self):
self._update_gradient()
material = rendering.Material()
if self._shader.selected_text == self.SOLID_NAME:
material.shader = "unlitSolidColor"
c = self._color.color_value
material.base_color = [c.red, c.green, c.blue, 1.0]
elif self._shader.selected_text == self.COLOR_NAME:
material.shader = "defaultUnlit"
material.base_color = [1.0, 1.0, 1.0, 1.0]
else:
material.shader = "unlitGradient"
material.gradient = self._gradient
material.scalar_min = self._scalar_min
material.scalar_max = self._scalar_max
return material
def _update_bounding_boxes(self, animation_frame=None):
if len(self._attrname2lut) == 1:
# Can't do dict.values()[0], so have to iterate over the 1 element
for v in self._attrname2lut.values():
lut = v
elif "labels" in self._attrname2lut:
lut = self._attrname2lut["labels"]
elif "label" in self._attrname2lut:
lut = self._attrname2lut["label"]
else:
lut = None
mat = rendering.Material()
mat.shader = "defaultUnlit"
if self._consolidate_bounding_boxes:
name = Model.bounding_box_prefix.split("/")[0]
boxes = []
# When consolidated we assume bbox_data.name is the geometry name.
if animation_frame is None:
for bbox_data in self._objects.bounding_box_data:
boxes += bbox_data.boxes
else:
geom_name = self._animation_frames[animation_frame]
for bbox_data in self._objects.bounding_box_data:
if bbox_data.name == geom_name:
boxes = bbox_data.boxes
break
self._3d.scene.remove_geometry(name)
if len(boxes) > 0:
lines = BoundingBox3D.create_lines(boxes, lut)
self._3d.scene.add_geometry(name, lines, mat)
if name not in self._name2treenode:
self._add_tree_name(name, is_geometry=False)
self._3d.force_redraw()
else:
# Don't run this more than once if we aren't consolidating,
# because nothing will change.
if len(self._objects.bounding_box_data) > 0:
if self._objects.bounding_box_data[
0].name in self._name2treenode:
return
for bbox_data in self._objects.bounding_box_data:
lines = BoundingBox3D.create_lines(bbox_data.boxes, lut)
self._3d.scene.add_geometry(bbox_data.name, lines, mat)
for bbox_data in self._objects.bounding_box_data:
self._add_tree_name(bbox_data.name, is_geometry=False)
self._3d.force_redraw()
def _update_gradient(self):
if self._shader.selected_text == self.LABELS_NAME:
colors = self._label_edit.get_colors()
n = float(len(colors) - 1)
if n >= 1:
self._gradient.points = [
rendering.Gradient.Point(
float(i) / n, [
colors[i][0], colors[i][1], colors[i][2],
colors[i][3]
]) for i in range(0, len(colors))
]
else:
self._gradient.points = [
rendering.Gradient.Point(0.0, [1.0, 0.0, 1.0, 1.0])
]
self._gradient.mode = rendering.Gradient.LUT
else:
cmap = self._colormaps.get(self._shader.selected_text)
if cmap is not None:
self._gradient.points = [
rendering.Gradient.Point(
p.value, [p.color[0], p.color[1], p.color[2], 1.0])
for p in cmap.points
]
self._gradient.mode = rendering.Gradient.GRADIENT
def _update_geometry_colors(self):
material = self._get_material()
for name, tcloud in self._objects.tclouds.items():
if not tcloud.is_empty() and self._3d.scene.has_geometry(name):
self._3d.scene.modify_geometry_material(name, material)
self._3d.force_redraw()
def _update_datasource_combobox(self):
current = self._datasource_combobox.selected_text
self._datasource_combobox.clear_items()
available_attrs = self._get_available_attrs()
for attr_name in available_attrs:
self._datasource_combobox.add_item(attr_name)
if current in available_attrs:
self._datasource_combobox.selected_text = current
elif len(available_attrs) > 0:
self._datasource_combobox.selected_text = available_attrs[0]
else:
# If no attributes, two possibilities:
# 1) no geometries are selected: don't change anything
# 2) geometries are selected: color solid
has_checked = False
for n, node in self._name2treenode.items():
if node.checkbox.checked and self._is_tree_name_geometry(n):
has_checked = True
break
if has_checked:
self._set_shader(self.SOLID_NAME)
def _update_shaders_combobox(self):
current_attr = self._datasource_combobox.selected_text
current_shader = self._shader.selected_text
has_lut = (current_attr in self._attrname2lut)
is_scalar = True
selected_names = self._get_selected_names()
if len(selected_names) > 0 and len(
self._objects.get_attr_shape(selected_names[0],
current_attr)) > 1:
is_scalar = False
self._shader.clear_items()
if not is_scalar:
self._shader.add_item(self.COLOR_NAME)
if has_lut:
self._shader.add_item(self.LABELS_NAME)
self._label_edit.set_labels(self._attrname2lut[current_attr])
self._shader.add_item(self.RAINBOW_NAME)
self._shader.add_item(self.GREYSCALE_NAME)
self._shader.add_item(self.SOLID_NAME)
if current_shader == self.LABELS_NAME and has_lut:
self._set_shader(self.LABELS_NAME)
elif is_scalar:
self._set_shader(self.RAINBOW_NAME)
def _update_attr_range(self):
attr_name = self._datasource_combobox.selected_text
current_channel = self._colormap_channel.selected_index
self._scalar_min, self._scalar_max = self._objects.get_attr_minmax(
attr_name, current_channel)
if self._shader.selected_text in self._colormaps:
cmap = self._colormaps[self._shader.selected_text]
self._colormap_edit.update(cmap, self._scalar_min, self._scalar_max)
def _set_shader(self, shader_name, force_update=False):
# Disable channel if we are using a vector shader. Always do this to
# ensure that the UI is consistent.
if shader_name == Visualizer.COLOR_NAME:
self._colormap_channel.enabled = False
else:
self._colormap_channel.enabled = True
if shader_name == self._shader.selected_text and not force_update:
return
self._shader.selected_text = shader_name
idx = self._shadername2panelidx[self._shader.selected_text]
self._shader_panels.selected_index = idx
if shader_name in self._colormaps:
cmap = self._colormaps[shader_name]
self._colormap_edit.update(cmap, self._scalar_min, self._scalar_max)
self._update_geometry_colors()
def _on_layout(self, theme):
frame = self.window.content_rect
em = theme.font_size
panel_width = 20 * em
panel_rect = gui.Rect(frame.get_right() - panel_width, frame.y,
panel_width, frame.height - frame.y)
self._panel.frame = panel_rect
self._3d.frame = gui.Rect(frame.x, frame.y, panel_rect.x - frame.x,
frame.height - frame.y)
def _on_arcball_mode(self):
self._3d.set_view_controls(gui.SceneWidget.ROTATE_CAMERA)
def _on_fly_mode(self):
self._3d.set_view_controls(gui.SceneWidget.FLY)
def _on_reset_camera(self):
self.setup_camera()
def _on_dataset_selection_changed(self, item):
name = self._treeid2name[item]
if not self._is_tree_name_geometry(name):
return
def ui_callback():
self._update_attr_range()
self._update_geometry(check_unloaded=True)
self._update_bounding_boxes()
if not self._objects.is_loaded(name):
self._load_geometry(name, ui_callback)
def _on_display_tab_changed(self, index):
if index == 1:
self._animation_frames = self._get_selected_names()
self._slider.set_limits(0, len(self._animation_frames) - 1)
self._on_animation_slider_changed(self._slider.int_value)
# _on_animation_slider_changed() calls _update_bounding_boxes()
else:
for name, node in self._name2treenode.items():
self._3d.scene.show_geometry(name, node.checkbox.checked)
self._update_bounding_boxes()
def _on_animation_slider_changed(self, new_value):
idx = int(new_value)
for i in range(0, len(self._animation_frames)):
self._3d.scene.show_geometry(self._animation_frames[i], (i == idx))
self._update_bounding_boxes(animation_frame=idx)
self._3d.force_redraw()
self._slider_current.text = self._animation_frames[idx]
r = self._slider_current.frame
self._slider_current.frame = gui.Rect(r.x, r.y,
self._slider.frame.get_right(),
r.height)
def _on_start_animation(self):
def on_tick():
return self._on_animate()
self._play.text = "Stop"
self._play.set_on_clicked(self._on_stop_animation)
self._last_animation_time = 0.0
self.window.set_on_tick_event(on_tick)
def _on_animate(self):
now = time.time()
if now >= self._last_animation_time + self._animation_delay_secs:
idx = (self._slider.int_value + 1) % len(self._animation_frames)
self._slider.int_value = idx
self._on_animation_slider_changed(idx)
self._last_animation_time = now
return True
return False
def _on_stop_animation(self):
self.window.set_on_tick_event(None)
self._play.text = "Play"
self._play.set_on_clicked(self._on_start_animation)
def _on_bgcolor_changed(self, new_color):
bg_color = [
new_color.red, new_color.green, new_color.blue, new_color.alpha
]
self._3d.scene.set_background_color(bg_color)
self._3d.force_redraw()
def _on_datasource_changed(self, attr_name, idx):
selected_names = self._get_selected_names()
n_channels = 1
if len(selected_names) > 0:
shape = self._objects.get_attr_shape(selected_names[0], attr_name)
if len(shape) <= 1:
n_channels = 1
else:
n_channels = max(1, shape[1])
current_channel = max(0, self._colormap_channel.selected_index)
current_channel = min(n_channels - 1, current_channel)
self._colormap_channel.clear_items()
for i in range(0, n_channels):
self._colormap_channel.add_item(str(i))
self._colormap_channel.selected_index = current_channel
self._update_attr_range()
self._update_shaders_combobox()
# Try to intelligently pick a shader.
current_shader = self._shader.selected_text
if current_shader == Visualizer.SOLID_NAME:
pass
elif attr_name in self._attrname2lut:
self._set_shader(Visualizer.LABELS_NAME)
elif attr_name == "colors":
self._set_shader(Visualizer.COLOR_NAME)
elif n_channels >= 3:
self._set_shader(Visualizer.RAINBOW_NAME)
elif current_shader == Visualizer.COLOR_NAME: # vector -> scalar
self._set_shader(Visualizer.RAINBOW_NAME)
else: # changing from one scalar to another, don't change
pass
self._update_geometry()
def _on_channel_changed(self, name, idx):
self._update_attr_range()
self._update_geometry() # need to recompute scalars array
def _on_shader_changed(self, name, idx):
# _shader.current_text is already name, so we need to force an update
self._set_shader(name, force_update=True)
def _on_shader_color_changed(self, color):
self._update_geometry_colors()
def _on_labels_changed(self):
self._update_geometry_colors()
def _on_colormap_changed(self):
self._colormaps[
self._shader.selected_text] = self._colormap_edit.colormap
self._update_geometry_colors()
def _on_rgb_multiplier(self, text, idx):
self._update_geometry()
def _get_selected_names(self):
# Note that things like bounding boxes could be in the tree, and we
# do not want to include them in the list of things selected, even if
# they are checked.
selected_names = []
for n in self._objects.data_names:
if self._name2treenode[n].checkbox.checked:
selected_names.append(n)
return selected_names
def _get_available_attrs(self):
selected_names = self._get_selected_names()
return self._objects.get_available_attrs(selected_names)
def _is_tree_name_geometry(self, name):
return (name in self._objects.data_names)
@staticmethod
def _make_tcloud_array(np_array, copy=False):
if copy or not np_array.data.c_contiguous:
t = o3d.core.Tensor(np_array)
else:
t = o3d.core.Tensor.from_numpy(np_array)
return o3d.core.TensorList.from_tensor(t, inplace=True)
def visualize_dataset(self,
dataset,
split,
indices=None,
width=1024,
height=768):
"""Visualizes a dataset
Example:
Minimal example for visualizing a dataset::
import open3d.ml.torch as ml3d # or open3d.ml.tf as ml3d
dataset = ml3d.datasets.SemanticKITTI(dataset_path='/path/to/SemanticKITTI/')
vis = ml3d.vis.Visualizer()
vis.visualize_dataset(dataset, 'all', indices=range(100))
Args:
dataset: A dataset object.
split: A string that identifies the split, e.g., 'test'.
indices: An iterable with a subset of the data points to visualize.
E.g., [0,2,3,4].
width: window width.
height: window height.
"""
# Setup the labels
lut = LabelLUT()
for val in sorted(dataset.label_to_names.keys()):
lut.add_label(dataset.label_to_names[val], val)
self.set_lut("labels", lut)
self._consolidate_bounding_boxes = True
self._init_dataset(dataset, split, indices)
self._visualize("Open3D - " + dataset.name, width, height)
def visualize(self,
data,
lut=None,
bounding_boxes=None,
width=1024,
height=768):
"""Visualizes custom point cloud data
Example:
Minimal example for visualizing a single point cloud with an
attribute::
import numpy as np
import open3d.ml.torch as ml3d
# or import open3d.ml.tf as ml3d
data = [ {
'name': 'my_point_cloud',
'points': np.random.rand(100,3).astype(np.float32),
'point_attr1': np.random.rand(100).astype(np.float32),
} ]
vis = ml3d.vis.Visualizer()
vis.visualize(data)
Args:
data: A list of dictionaries. Each dictionary is a point cloud with
attributes. Each dictionary must have the entries 'name' and
'points'. Points and point attributes can be passed as numpy
arrays, PyTorch tensors or TensorFlow tensors.
width: window width.
height: window height.
"""
self._init_data(data)
if lut is not None:
self.set_lut("labels", lut)
if bounding_boxes is not None:
prefix = Model.bounding_box_prefix
# Filament crashes if you have to many items, and anyway, hundreds
# of items is unweildy in a list. So combine items if we have too
# many.
group_size = int(math.floor(float(len(bounding_boxes)) / 100.0))
if group_size < 2:
box_data = [
Model.BoundingBoxData(prefix + str(bbox), [bbox])
for bbox in bounding_boxes
]
else:
box_data = []
current_group = []
n = len(bounding_boxes)
for i in range(0, n):
current_group.append(bounding_boxes[i])
if len(current_group) >= group_size or i == n - 1:
if i < n - 1:
name = prefix + "Boxes " + str(
i + 1 - group_size) + " - " + str(i)
else:
if len(current_group) > 1:
name = prefix + "Boxes " + str(
i + 1 - len(current_group)) + " - " + str(i)
else:
name = prefix + "Box " + str(i)
data = Model.BoundingBoxData(name, current_group)
box_data.append(data)
current_group = []
self._objects.bounding_box_data = box_data
self._visualize("Open3D", width, height)
def _visualize(self, title, width, height):
gui.Application.instance.initialize()
self._init_user_interface(title, width, height)
self._3d.scene.downsample_threshold = 400000
# Turn all the objects off except the first one
for name, node in self._name2treenode.items():
node.checkbox.checked = False
self._3d.scene.show_geometry(name, False)
for name in [self._objects.data_names[0]]:
self._name2treenode[name].checkbox.checked = True
self._3d.scene.show_geometry(name, True)
def on_done_ui():
# Add bounding boxes here: bounding boxes belonging to the dataset
# will not be loaded until now.
self._update_bounding_boxes()
self._update_datasource_combobox()
self._update_shaders_combobox()
# Display "colors" by default if available, "points" if not
available_attrs = self._get_available_attrs()
self._set_shader(self.SOLID_NAME, force_update=True)
if "colors" in available_attrs:
self._datasource_combobox.selected_text = "colors"
elif "points" in available_attrs:
self._datasource_combobox.selected_text = "points"
self._dont_update_geometry = True
self._on_datasource_changed(
self._datasource_combobox.selected_text,
self._datasource_combobox.selected_index)
self._update_geometry_colors()
self._dont_update_geometry = False
# _datasource_combobox was empty, now isn't, re-layout.
self.window.set_needs_layout()
self._update_geometry()
self.setup_camera()
self._load_geometries(self._objects.data_names, on_done_ui)
gui.Application.instance.run()
|
keep_alive.py | #this code is so uptimerobot or an external server can refresh this page and keep the bot online
#will no longer be required as repl will be boosted and kept online 24/7
#important imports for system
import flask
import threading
from flask import Flask
from threading import Thread
app = Flask("")
@app.route("/")
def home():
return "Bezyl Beta is online and active"
def run():
app.run(host = "0.0.0.0", port = 8080)
def keep_alive():
t = Thread(target = run)
t.start()
|
multilock1.py | import threading
import time
num = 100
def fun_sub():
global num
lock.acquire()
print('----ๅ ้----')
print('็ฐๅจๆไฝๅ
ฑไบซ่ตๆบ็็บฟ็จๅๅญๆฏ:', t.name)
num2 = num
time.sleep(0.001)
num = num2-1
lock.release()
print('----้ๆพ้----')
if __name__ == '__main__':
print('ๅผๅงๆต่ฏๅๆญฅ้ at %s' % time.ctime())
lock = threading.Lock() # ๅๅปบไธๆๅๆญฅ้
thread_list = []
for thread in range(100):
t = threading.Thread(target=fun_sub)
t.start()
thread_list.append(t)
for t in thread_list:
t.join() # ๆๆๅญ็บฟ็จ้ฝ็ปๆไบ, ไธป็บฟ็จๆ็ปๆ
print('num is %d' % num)
print('็ปๆๆต่ฏๅๆญฅ้ at %s' % time.ctime())
|
ray.py | #! /usr/bin/env python
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import contextlib
import math
import queue
import threading
from distutils.version import LooseVersion
from functools import lru_cache
from typing import Any, Dict, Iterator, Union
import numpy as np
import pandas as pd
import ray
from ray.data import from_dask, read_parquet
from ray.data.dataset_pipeline import DatasetPipeline
from ray.data.extensions import TensorDtype
from ludwig.constants import BINARY, CATEGORY, NAME, NUMERICAL, TYPE
from ludwig.data.batcher.base import Batcher
from ludwig.data.dataset.base import Dataset, DatasetManager
from ludwig.utils.data_utils import DATA_TRAIN_HDF5_FP
from ludwig.utils.misc_utils import get_proc_features
from ludwig.utils.types import DataFrame
_ray18 = LooseVersion(ray.__version__) >= LooseVersion("1.8")
_SCALAR_TYPES = {BINARY, CATEGORY, NUMERICAL}
class RayDataset(Dataset):
"""Wrapper around ray.data.Dataset."""
def __init__(self, df: Union[str, DataFrame], features: Dict[str, Dict], training_set_metadata: Dict[str, Any]):
self.ds = from_dask(df) if not isinstance(df, str) else read_parquet(df)
self.features = features
self.training_set_metadata = training_set_metadata
self.data_hdf5_fp = training_set_metadata.get(DATA_TRAIN_HDF5_FP)
# TODO ray 1.8: convert to Tensors before shuffle
# def to_tensors(df: pd.DataFrame) -> pd.DataFrame:
# for c in features.keys():
# df[c] = df[c].astype(TensorDtype())
# return df
# self.ds = self.ds.map_batches(to_tensors, batch_format="pandas")
def pipeline(self, shuffle=True) -> DatasetPipeline:
pipe = self.ds.repeat()
if shuffle:
if _ray18:
pipe = pipe.random_shuffle_each_window()
else:
pipe = pipe.random_shuffle()
return pipe
@contextlib.contextmanager
def initialize_batcher(self, batch_size=128, should_shuffle=True, seed=0, ignore_last=False, horovod=None):
yield RayDatasetBatcher(
self.ds.repeat().iter_datasets(),
self.features,
self.training_set_metadata,
batch_size,
self.size,
)
def __len__(self):
return self.ds.count()
@property
def size(self):
return len(self)
class RayDatasetManager(DatasetManager):
def __init__(self, backend):
self.backend = backend
def create(self, dataset: Union[str, DataFrame], config: Dict[str, Any], training_set_metadata: Dict[str, Any]):
return RayDataset(dataset, get_proc_features(config), training_set_metadata)
def save(
self,
cache_path: str,
dataset: DataFrame,
config: Dict[str, Any],
training_set_metadata: Dict[str, Any],
tag: str,
):
self.backend.df_engine.to_parquet(dataset, cache_path)
return cache_path
def can_cache(self, skip_save_processed_input):
return not skip_save_processed_input
@property
def data_format(self):
return "parquet"
class RayDatasetShard(Dataset):
def __init__(
self,
dataset_shard: DatasetPipeline,
features: Dict[str, Dict],
training_set_metadata: Dict[str, Any],
):
self.dataset_shard = dataset_shard
self.features = features
self.training_set_metadata = training_set_metadata
self.dataset_iter = dataset_shard.iter_datasets()
@contextlib.contextmanager
def initialize_batcher(self, batch_size=128, should_shuffle=True, seed=0, ignore_last=False, horovod=None):
yield RayDatasetBatcher(
self.dataset_iter,
self.features,
self.training_set_metadata,
batch_size,
self.size,
)
@lru_cache(1)
def __len__(self):
# TODO(travis): find way to avoid calling this, as it's expensive
return next(self.dataset_iter).count()
@property
def size(self):
return len(self)
class RayDatasetBatcher(Batcher):
def __init__(
self,
dataset_epoch_iterator: Iterator[ray.data.Dataset],
features: Dict[str, Dict],
training_set_metadata: Dict[str, Any],
batch_size: int,
samples_per_epoch: int,
):
self.dataset_epoch_iterator = dataset_epoch_iterator
self.batch_size = batch_size
self.samples_per_epoch = samples_per_epoch
self.training_set_metadata = training_set_metadata
self.features = features
self.columns = list(features.keys())
self.reshape_map = {
proc_column: training_set_metadata[feature[NAME]].get("reshape")
for proc_column, feature in features.items()
}
self.dataset_batch_iter = None
self._epoch = 0
self._next_batch = None
self._last_batch = False
self._step = 0
self._fetch_next_epoch()
def next_batch(self):
if self.last_batch():
raise StopIteration()
batch = self._next_batch
self._fetch_next_batch()
self._step += 1
return batch
def last_batch(self):
return self._last_batch
def set_epoch(self, epoch, batch_size):
self.batch_size = batch_size
if epoch != self._epoch:
self._fetch_next_epoch()
self._epoch = epoch
@property
def step(self):
return self._step
@property
def steps_per_epoch(self):
return math.ceil(self.samples_per_epoch / self.batch_size)
def _fetch_next_epoch(self):
dataset = next(self.dataset_epoch_iterator)
read_parallelism = 1
if read_parallelism == 1:
self.dataset_batch_iter = self._create_async_reader(dataset)
elif read_parallelism > 1:
self.dataset_batch_iter = self._create_async_parallel_reader(dataset, read_parallelism)
else:
# TODO: consider removing this. doesn't work currently and read performance seems generally
# very good with 1 parallelism
self.dataset_batch_iter = self._create_sync_reader(dataset)
self._step = 0
self._fetch_next_batch()
def _fetch_next_batch(self):
if self.dataset_batch_iter is None:
self._last_batch = True
return
self._last_batch = False
try:
self._next_batch = next(self.dataset_batch_iter)
except StopIteration:
self._last_batch = True
def _to_tensors_fn(self):
columns = self.columns
features = self.features
def to_tensors(df: pd.DataFrame) -> pd.DataFrame:
for c in columns:
# do not convert scalar columns: https://github.com/ray-project/ray/issues/20825
if features[c][TYPE] not in _SCALAR_TYPES:
df[c] = df[c].astype(TensorDtype())
return df
return to_tensors
def _prepare_batch(self, batch: pd.DataFrame) -> Dict[str, np.ndarray]:
res = {c: batch[c].to_numpy() for c in self.columns}
for c in self.columns:
reshape = self.reshape_map.get(c)
if reshape is not None:
res[c] = res[c].reshape((-1, *reshape))
return res
def _create_sync_reader(self, dataset: ray.data.Dataset):
to_tensors = self._to_tensors_fn()
def sync_read():
for batch in dataset.map_batches(to_tensors, batch_format="pandas").iter_batches(
prefetch_blocks=0, batch_size=self.batch_size, batch_format="pandas"
):
yield self._prepare_batch(batch)
return sync_read()
def _create_async_reader(self, dataset: ray.data.Dataset):
q = queue.Queue(maxsize=100)
batch_size = self.batch_size
to_tensors = self._to_tensors_fn()
def producer():
for batch in dataset.map_batches(to_tensors, batch_format="pandas").iter_batches(
prefetch_blocks=0, batch_size=batch_size, batch_format="pandas"
):
res = self._prepare_batch(batch)
q.put(res)
q.put(None)
def async_read():
t = threading.Thread(target=producer)
t.start()
while True:
batch = q.get(block=True)
if batch is None:
break
yield batch
t.join()
return async_read()
def _create_async_parallel_reader(self, dataset: ray.data.Dataset, num_threads: int):
q = queue.Queue(maxsize=100)
batch_size = self.batch_size
to_tensors = self._to_tensors_fn()
splits = dataset.split(n=num_threads)
def producer(i):
for batch in (
splits[i]
.map_batches(to_tensors, batch_format="pandas")
.iter_batches(prefetch_blocks=0, batch_size=batch_size, batch_format="pandas")
):
res = self._prepare_batch(batch)
q.put(res)
q.put(None)
def async_parallel_read():
threads = [threading.Thread(target=producer, args=(i,)) for i in range(num_threads)]
for t in threads:
t.start()
active_threads = num_threads
while True:
batch = q.get(block=True)
if batch is None:
active_threads -= 1
if active_threads == 0:
break
yield batch
for t in threads:
t.join()
return async_parallel_read()
|
file_utils_unittest.py | #!/usr/bin/env python3
# Copyright 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for file_utils.py."""
import base64
import binascii
import hashlib
import logging
import multiprocessing
import os
import re
import shutil
import tempfile
import threading
import time
import unittest
from unittest import mock
from cros.factory.device import device_utils
from cros.factory.utils import file_utils
from cros.factory.utils import process_utils
class MakeDirsUidGidTest(unittest.TestCase):
FILE_PERMISSION_MASK = 0o777
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.temp_dir)
def GetPermissionBits(self, path):
return os.stat(path).st_mode & self.FILE_PERMISSION_MASK
def testDefault(self):
target_path = os.path.join(self.temp_dir, 'foo', 'bar', 'baz')
file_utils.MakeDirsUidGid(target_path)
path_to_check = self.temp_dir
for tail in ['foo', 'bar', 'baz']:
path_to_check = os.path.join(path_to_check, tail)
self.assertEqual(0o777, self.GetPermissionBits(path_to_check))
def testMode(self):
target_path = os.path.join(self.temp_dir, 'foo', 'bar', 'baz')
mode = 0o770
file_utils.MakeDirsUidGid(target_path, mode=mode)
path_to_check = self.temp_dir
for tail in ['foo', 'bar', 'baz']:
path_to_check = os.path.join(path_to_check, tail)
self.assertEqual(mode, self.GetPermissionBits(path_to_check))
def testEmpty(self):
file_utils.MakeDirsUidGid('')
def testNoSlash(self):
cwd = os.getcwd()
os.chdir(self.temp_dir)
file_utils.MakeDirsUidGid('foo')
self.assertTrue(os.path.isdir(os.path.join(self.temp_dir, 'foo')))
os.chdir(cwd)
def testRelative(self):
cwd = os.getcwd()
os.chdir(self.temp_dir)
file_utils.MakeDirsUidGid(os.path.join('foo', 'bar'))
self.assertTrue(os.path.isdir(os.path.join(self.temp_dir, 'foo', 'bar')))
os.chdir(cwd)
class UnopenedTemporaryFileTest(unittest.TestCase):
"""Unittest for UnopenedTemporaryFile."""
def testUnopenedTemporaryFile(self):
with file_utils.UnopenedTemporaryFile(
prefix='prefix', suffix='suffix') as x:
self.assertTrue(os.path.exists(x))
self.assertEqual(0, os.path.getsize(x))
assert re.match(r'prefix.+suffix', os.path.basename(x))
self.assertEqual(tempfile.gettempdir(), os.path.dirname(x))
self.assertFalse(os.path.exists(x))
class ReadLinesTest(unittest.TestCase):
"""Unittest for ReadLines."""
def testNormalFile(self):
tmp = tempfile.NamedTemporaryFile('w', delete=False)
tmp.write('line 1\nline 2\n')
tmp.close()
try:
lines = file_utils.ReadLines(tmp.name)
self.assertEqual(len(lines), 2)
self.assertEqual(lines[0], 'line 1\n')
self.assertEqual(lines[1], 'line 2\n')
finally:
os.unlink(tmp.name)
def testEmptyFile(self):
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp.close()
try:
lines = file_utils.ReadLines(tmp.name)
self.assertTrue(isinstance(lines, list))
self.assertEqual(len(lines), 0)
finally:
os.unlink(tmp.name)
def testNonExistFile(self):
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp.close()
os.unlink(tmp.name)
lines = file_utils.ReadLines(tmp.name)
self.assertTrue(lines is None)
def testNormalFileWithDUT(self):
tmp = tempfile.NamedTemporaryFile('w', delete=False)
tmp.write('line 1\nline 2\n')
tmp.close()
try:
lines = file_utils.ReadLines(tmp.name, device_utils.CreateDUTInterface())
self.assertEqual(len(lines), 2)
self.assertEqual(lines[0], 'line 1\n')
self.assertEqual(lines[1], 'line 2\n')
finally:
os.unlink(tmp.name)
def testEmptyFileWithDUT(self):
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp.close()
try:
lines = file_utils.ReadLines(tmp.name, device_utils.CreateDUTInterface())
self.assertTrue(isinstance(lines, list))
self.assertEqual(len(lines), 0)
finally:
os.unlink(tmp.name)
def testNonExistFileWithDUT(self):
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp.close()
os.unlink(tmp.name)
lines = file_utils.ReadLines(tmp.name, device_utils.CreateDUTInterface())
self.assertTrue(lines is None)
class TempDirectoryTest(unittest.TestCase):
"""Unittest for TempDirectory."""
def testNormal(self):
with file_utils.TempDirectory(prefix='abc') as d:
self.assertTrue(os.path.basename(d).startswith('abc'))
self.assertTrue(os.path.isdir(d))
self.assertFalse(os.path.exists(d))
def testRemoveBeforeExit(self):
with file_utils.TempDirectory() as d:
self.assertTrue(os.path.isdir(d))
shutil.rmtree(d)
self.assertFalse(os.path.exists(d))
self.assertFalse(os.path.exists(d))
def testRenameBeforeExit(self):
with file_utils.TempDirectory() as d:
self.assertTrue(os.path.isdir(d))
new_name = d + '.another'
os.rename(d, new_name)
self.assertFalse(os.path.exists(d))
self.assertTrue(os.path.exists(new_name))
shutil.rmtree(new_name)
class CopyFileSkipBytesTest(unittest.TestCase):
"""Unittest for CopyFileSkipBytes."""
def setUp(self):
self.in_file = None
self.out_file = None
def tearDown(self):
if self.in_file:
os.unlink(self.in_file.name)
if self.out_file:
os.unlink(self.out_file.name)
def PrepareFile(self, in_file_content, out_file_content):
self.in_file = tempfile.NamedTemporaryFile('w', delete=False)
if in_file_content:
self.in_file.write(in_file_content)
self.in_file.close()
self.out_file = tempfile.NamedTemporaryFile('w', delete=False)
if out_file_content:
self.out_file.write(out_file_content)
self.out_file.close()
def testNormal(self):
self.PrepareFile('1234567890', '')
file_utils.CopyFileSkipBytes(self.in_file.name, self.out_file.name, 3)
with open(self.out_file.name, 'r') as o:
result = o.read()
self.assertEqual(result, '4567890')
def testSkipTooMany(self):
self.PrepareFile('1234567890', '')
# Skip too many bytes.
self.assertRaises(ValueError, file_utils.CopyFileSkipBytes,
self.in_file.name, self.out_file.name, 100)
with open(self.out_file.name, 'r') as o:
self.assertEqual(len(o.read()), 0)
def testNoInput(self):
self.PrepareFile('abc', '')
self.assertRaises(OSError, file_utils.CopyFileSkipBytes,
'no_input', self.out_file.name, 1)
def testOverrideOutput(self):
self.PrepareFile('1234567890', 'abcde')
file_utils.CopyFileSkipBytes(self.in_file.name, self.out_file.name, 3)
with open(self.out_file.name, 'r') as o:
result = o.read()
self.assertEqual(result, '4567890')
def testSkipLargeFile(self):
# 10000 bytes input.
self.PrepareFile('1234567890' * 1000, '')
file_utils.CopyFileSkipBytes(self.in_file.name, self.out_file.name, 5)
with open(self.out_file.name, 'r') as o:
result = o.read()
self.assertEqual(len(result), 10000 - 5)
self.assertTrue(result.startswith('67890'))
class ExtractFileTest(unittest.TestCase):
"""Unit tests for ExtractFile."""
@mock.patch.object(process_utils, 'Spawn', return_value=True)
def testExtractZip(self, mock_spawn):
with file_utils.TempDirectory() as temp_dir:
zipfile = os.path.join(temp_dir, 'foo.zip')
file_utils.TouchFile(zipfile)
output_dir = os.path.join(temp_dir, 'extracted')
file_utils.ExtractFile(zipfile, output_dir)
mock_spawn.assert_called_with(['unzip', '-o', zipfile, '-d', output_dir],
log=True, check_call=True)
file_utils.ExtractFile(zipfile, output_dir, quiet=True)
mock_spawn.assert_called_with(
['unzip', '-o', '-qq', zipfile, '-d', output_dir],
log=True, check_call=True)
file_utils.ExtractFile(zipfile, output_dir, only_extracts=['bar', 'buz'])
mock_spawn.assert_called_with(['unzip', '-o', zipfile, '-d', output_dir,
'bar', 'buz'], log=True, check_call=True)
file_utils.ExtractFile(zipfile, output_dir, only_extracts=['bar', 'buz'],
overwrite=False)
mock_spawn.assert_called_with(
['unzip', zipfile, '-d', output_dir, 'bar', 'buz'],
log=True, check_call=True)
@mock.patch.object(os, 'system', return_value=0)
@mock.patch.object(process_utils, 'Spawn', return_value=True)
def testExtractTar(self, mock_spawn, mock_system):
with file_utils.TempDirectory() as temp_dir:
output_dir = os.path.join(temp_dir, 'extracted')
targz = os.path.join(temp_dir, 'foo.tar.gz')
file_utils.TouchFile(targz)
file_utils.ExtractFile(targz, output_dir)
mock_spawn.assert_called_with(
['tar', '-xf', targz, '-C', output_dir, '-vv'],
log=True, check_call=True)
file_utils.ExtractFile(targz, output_dir, quiet=True)
mock_spawn.assert_called_with(['tar', '-xf', targz, '-C', output_dir],
log=True, check_call=True)
tbz2 = os.path.join(temp_dir, 'foo.tbz2')
file_utils.TouchFile(tbz2)
file_utils.ExtractFile(tbz2, output_dir, only_extracts=['bar', 'buz'])
mock_spawn.assert_called_with(
['tar', '-xf', tbz2, '-C', output_dir, '-vv', 'bar', 'buz'],
log=True, check_call=True)
xz = os.path.join(temp_dir, 'foo.tar.xz')
file_utils.TouchFile(xz)
file_utils.ExtractFile(xz, output_dir, only_extracts='bar',
overwrite=False)
mock_spawn.assert_called_with(
['tar', '-xf', xz, '-C', output_dir, '--keep-old-files', '-vv',
'bar'],
log=True, check_call=True)
file_utils.ExtractFile(tbz2, output_dir, use_parallel=True)
mock_system.assert_called_with('type lbzip2 >/dev/null 2>&1')
mock_spawn.assert_has_calls([
mock.call(
['tar', '-xf', tbz2, '-C', output_dir, '-vv', '-I', 'lbzip2'],
log=True, check_call=True)])
def testMissingCompressFile(self):
self.assertRaisesRegex(
file_utils.ExtractFileError, 'Missing compressed file',
file_utils.ExtractFile, 'itdoesnotexist', 'foo_dir')
def testPermissionDenied(self):
with file_utils.TempDirectory() as temp_dir:
targz = os.path.join(temp_dir, 'foo.tar.gz')
file_utils.TouchFile(targz)
output_dir = os.path.join(temp_dir, 'extracted')
try:
os.chmod(targz, 0)
self.assertRaisesRegex(
file_utils.ExtractFileError, 'Permission denied',
file_utils.ExtractFile, targz, output_dir)
finally:
os.chmod(targz, 0o600)
class ForceSymlinkTest(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.temp_dir)
def testNoTarget(self):
target_path = os.path.join(self.temp_dir, 'non_exist_target')
link_path = os.path.join(self.temp_dir, 'link_to_target')
self.assertRaisesRegex(
Exception, 'Missing symlink target', file_utils.ForceSymlink,
target_path, link_path)
def testNormal(self):
target_path = os.path.join(self.temp_dir, 'target')
link_path = os.path.join(self.temp_dir, 'link_to_target')
file_utils.WriteFile(target_path, 'target')
file_utils.ForceSymlink(target_path, link_path)
self.assertEqual(target_path, os.path.realpath(link_path))
self.assertEqual('target', file_utils.ReadLines(link_path)[0])
def testForceOverwrite(self):
target_path = os.path.join(self.temp_dir, 'target')
link_path = os.path.join(self.temp_dir, 'link_to_target')
file_utils.WriteFile(target_path, 'target')
file_utils.WriteFile(link_path, 'something else')
file_utils.ForceSymlink(target_path, link_path)
self.assertEqual(target_path, os.path.realpath(link_path))
self.assertEqual('target', file_utils.ReadLines(link_path)[0])
def testRelativeSymlink(self):
absolute_target_path = os.path.join(self.temp_dir, 'target')
relative_target_path = 'target'
link_path = os.path.join(self.temp_dir, 'link_to_target')
file_utils.WriteFile(absolute_target_path, 'target')
file_utils.ForceSymlink(relative_target_path, link_path)
self.assertEqual(absolute_target_path, os.path.realpath(link_path))
self.assertEqual('target', file_utils.ReadLines(link_path)[0])
class AtomicCopyTest(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.temp_dir)
def testNoSource(self):
self.assertRaisesRegex(
IOError, 'Missing source', file_utils.AtomicCopy,
'/foo/non_exist_source', '/foo/non_exist_dest')
self.assertFalse(os.path.exists('/foo/non_exist_source'))
self.assertFalse(os.path.exists('/foo/non_exist_dest'))
def testNormal(self):
source_path = os.path.join(self.temp_dir, 'source')
dest_path = os.path.join(self.temp_dir, 'dest')
file_utils.WriteFile(source_path, 'source')
self.assertFalse(os.path.exists(dest_path))
file_utils.AtomicCopy(source_path, dest_path)
self.assertTrue(os.path.exists(dest_path))
self.assertEqual('source', file_utils.ReadLines(dest_path)[0])
def testOverwrite(self):
source_path = os.path.join(self.temp_dir, 'source')
dest_path = os.path.join(self.temp_dir, 'dest')
file_utils.WriteFile(source_path, 'source')
file_utils.WriteFile(dest_path, 'dest')
file_utils.AtomicCopy(source_path, dest_path)
# dest is overwritten.
self.assertEqual('source', file_utils.ReadLines(dest_path)[0])
@mock.patch('shutil.copy2')
def testCopyFailed(self, copy2_mock):
source_path = os.path.join(self.temp_dir, 'source')
dest_path = os.path.join(self.temp_dir, 'dest')
file_utils.WriteFile(source_path, 'source')
file_utils.WriteFile(dest_path, 'dest')
copy2_mock.side_effect = IOError
self.assertRaises(IOError, file_utils.AtomicCopy, source_path,
dest_path)
# Verify that dest file is unchanged after a failed copy.
self.assertEqual('dest', file_utils.ReadLines(dest_path)[0])
copy2_mock.assert_called_once_with(source_path, mock.ANY)
class FileHashTest(unittest.TestCase):
def setUp(self):
self.test_string = 'FileHash test'
f = tempfile.NamedTemporaryFile('w', delete=False)
self.temp_file = f.name
f.write(self.test_string)
f.close()
def tearDown(self):
os.unlink(self.temp_file)
def testFileHash(self):
self.assertEqual(file_utils.FileHash(self.temp_file, 'md5').hexdigest(),
file_utils.MD5InHex(self.temp_file))
self.assertEqual('5e8c0fb0a780eff4947e1d76cfc5ee27',
file_utils.MD5InHex(self.temp_file))
self.assertEqual('XowPsKeA7/SUfh12z8XuJw==',
file_utils.MD5InBase64(self.temp_file))
self.assertEqual('e7c60cc7247d49ffcac5f7db0176ad7ad5f9795f',
file_utils.SHA1InHex(self.temp_file))
self.assertEqual('58YMxyR9Sf/KxffbAXatetX5eV8=',
file_utils.SHA1InBase64(self.temp_file))
def testMultiBlockHash(self):
with open(self.temp_file, 'rb') as f:
with mock.patch('builtins.open', mock.mock_open()) as m:
m_file = m.return_value
m_file.read.side_effect = f.read
# Test with 1 block.
block_size = len(self.test_string)
one_ret = file_utils.FileHash(
self.temp_file, 'md5', block_size=block_size).hexdigest()
m_file.read.assert_has_calls([mock.call(block_size)] * 2)
f.seek(0)
# Test with 2 blocks.
block_size = len(self.test_string) // 2 + 1
two_ret = file_utils.FileHash(
self.temp_file, 'md5', block_size=block_size).hexdigest()
m_file.read.assert_has_calls([mock.call(block_size)] * 3)
f.seek(0)
self.assertEqual(one_ret, two_ret)
def testLegacyMatchesMD5InHex(self):
# Legacy method calculates the hash all at once.
old_hash = hashlib.md5(open(self.temp_file, 'rb').read()).hexdigest()
new_hash = file_utils.MD5InHex(self.temp_file)
self.assertEqual(old_hash, new_hash)
def testLegacyMatchesSHA1InBase64(self):
# Legacy method calculates the hash all at once.
old_hash = base64.standard_b64encode(hashlib.sha1(
open(self.temp_file, 'rb').read()).digest()).decode('utf-8')
new_hash = file_utils.SHA1InBase64(self.temp_file)
self.assertEqual(old_hash, new_hash)
class FileLockTest(unittest.TestCase):
def setUp(self):
self.temp_file = file_utils.CreateTemporaryFile()
def tearDown(self):
os.unlink(self.temp_file)
def testFileLockMultiProcess(self):
def Target():
file_utils.FileLock(self.temp_file).Acquire()
time.sleep(2)
p = multiprocessing.Process(target=Target)
p.start()
time.sleep(0.5)
self.assertRaisesRegex(
IOError, r'Resource temporarily unavailable',
file_utils.FileLock(self.temp_file).Acquire)
p.terminate()
def testFileLockMultiProcessWithTimeout(self):
def Target(idle_secs):
lock = file_utils.FileLock(self.temp_file)
lock.Acquire()
time.sleep(idle_secs)
lock.Release()
# One process hold lock for 1 second, and another wait for the lock for at
# most 3 seconds.
p = multiprocessing.Process(target=lambda: Target(1))
p.start()
time.sleep(0.5)
lock = file_utils.FileLock(self.temp_file, timeout_secs=3)
# These two Acquire() and Release() calls should not raise exception.
lock.Acquire()
lock.Release()
p.terminate()
# One process hold lock for 3 seconds, and another wait for the lock for at
# most 1 second.
p = multiprocessing.Process(target=lambda: Target(3))
p.start()
time.sleep(0.5)
lock = file_utils.FileLock(self.temp_file, timeout_secs=1)
self.assertRaisesRegex(
file_utils.FileLockTimeoutError,
r'Could not acquire file lock of .* in 1 second\(s\)', lock.Acquire)
p.terminate()
def testFileLockSingleProcess(self):
# Lock and unlock a file twice.
with file_utils.FileLock(self.temp_file):
pass
lock = file_utils.FileLock(self.temp_file)
# These two Acquire() and Release() calls should not raise exception.
lock.Acquire()
lock.Release()
# Try to grab lock on a locked file.
file_utils.FileLock(self.temp_file).Acquire()
self.assertRaisesRegex(
IOError, r'Resource temporarily unavailable',
file_utils.FileLock(self.temp_file).Acquire)
def testFileLockSingleProcessWithTimeout(self):
file_utils.FileLock(self.temp_file).Acquire()
self.assertRaisesRegex(
file_utils.FileLockTimeoutError,
r'Could not acquire file lock of .* in 1 second\(s\)',
file_utils.FileLock(self.temp_file, timeout_secs=1).Acquire)
def testLocksReleaseFileDescriptor(self):
for unused_i in range(3333):
c = file_utils.FileLock(self.temp_file)
c.Acquire()
c.Release()
class FileLockContextManagerTest(unittest.TestCase):
def setUp(self):
self.temp_file = file_utils.CreateTemporaryFile()
self.manager = file_utils.FileLockContextManager(self.temp_file, 'w')
def tearDown(self):
os.unlink(self.temp_file)
def testMultithreadClose(self):
start_event = threading.Event()
def Target():
with self.manager as f:
start_event.set()
f.write('!' * 1024 * 1024)
t = threading.Thread(target=Target)
t.start()
start_event.wait()
self.manager.Close()
class ReadWriteFileTest(unittest.TestCase):
def runTest(self):
with file_utils.UnopenedTemporaryFile() as tmp:
data = 'abc\n\0'
file_utils.WriteFile(tmp, data)
self.assertEqual(data, file_utils.ReadFile(tmp))
class GlobSingleFileTest(unittest.TestCase):
def runTest(self):
with file_utils.TempDirectory() as d:
for f in ('a', 'b'):
file_utils.TouchFile(os.path.join(d, f))
self.assertEqual(
os.path.join(d, 'a'),
file_utils.GlobSingleFile(os.path.join(d, '[a]')))
self.assertRaisesRegex(
ValueError,
r"Expected one match for .+/\* but got \['.+/(a|b)', '.+/(a|b)'\]",
file_utils.GlobSingleFile, os.path.join(d, '*'))
self.assertRaisesRegex(
ValueError, r'Expected one match for .+/nomatch but got \[\]',
file_utils.GlobSingleFile, os.path.join(d, 'nomatch'))
class HashFilesTest(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix='HashFilesTest.')
for relpath in ['a', 'b', 'c', 'd/e', 'd/f']:
path = os.path.join(self.tmpdir, relpath)
file_utils.TryMakeDirs(os.path.dirname(path))
file_utils.WriteFile(path, 'Contents of %s' % relpath)
# ...and create a symlink cc -> c (it should be skipped)
os.symlink('c', os.path.join(self.tmpdir, 'cc'))
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testDefault(self):
self.assertEqual({
'a': 'fbd313f05f277535c6f0bb2e9b0cff43cebef360',
'b': '1ac13620623e6ff9049a7a261e04dda284b2c52a',
'c': 'eef64cf8244577e292e46fc6a12e64261239d972',
'd/e': '585a50860871f4df30be233ace89b3c83f776c9b',
'd/f': '025b55bbf9d628147696b63970edca695109e9ba'
}, file_utils.HashFiles(self.tmpdir))
def testSimpleHash(self):
self.assertEqual({
'a': 2937989080,
'b': 907507298,
'c': 1091585780,
'd/e': 2218600652,
'd/f': 489978230
}, file_utils.HashFiles(
self.tmpdir,
hash_function=lambda data: binascii.crc32(data) & 0xffffffff))
def testFilter(self):
# Get checksum only everything but 'c'.
self.assertEqual({
'a': 'fbd313f05f277535c6f0bb2e9b0cff43cebef360',
'b': '1ac13620623e6ff9049a7a261e04dda284b2c52a',
'd/e': '585a50860871f4df30be233ace89b3c83f776c9b',
'd/f': '025b55bbf9d628147696b63970edca695109e9ba'
}, file_utils.HashFiles(
self.tmpdir,
path_filter=lambda path: path != os.path.join(self.tmpdir, 'c')))
class AtomicWriteTest(unittest.TestCase):
"""Unittests for AtomicWrite."""
def setUp(self):
self.tmp_dir = tempfile.mkdtemp(prefix='AtomicWriteTest.')
# Store the current working directory for restoring in tearDown.
self.orig_cwd = os.getcwd()
os.chdir(self.tmp_dir)
def tearDown(self):
os.chdir(self.orig_cwd)
shutil.rmtree(self.tmp_dir)
def testCurrentDirectory(self):
with file_utils.AtomicWrite('dummy'):
pass
def testRelativePathWithDirectory(self):
"""Tests using a relative path with a file contained in a subdirectory."""
SUBDIR_NAME = 'subdir'
WRITE_STRING = 'Hello World!'
os.mkdir(SUBDIR_NAME)
path = os.path.join(SUBDIR_NAME, 'atomic_write_file')
with file_utils.AtomicWrite(path) as f:
f.write(WRITE_STRING)
self.assertEqual(WRITE_STRING, file_utils.ReadOneLine(path))
def testNonExistentDirectoryPath(self):
"""Tests using a path to a directory that doesn't exist."""
with self.assertRaises(AssertionError):
with file_utils.AtomicWrite('dir/'):
pass
def testExistingDirectoryPath(self):
"""Tests using a path to a directory that does exist."""
SUBDIR_NAME = 'subdir'
os.mkdir(SUBDIR_NAME)
with self.assertRaises(OSError):
with file_utils.AtomicWrite(SUBDIR_NAME):
pass
class SymlinkRelativeTest(unittest.TestCase):
"""Unittests for SymlinkRelative."""
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.target = os.path.join(self.temp_dir, 'foo', 'target')
self.link_path = None
file_utils.TryMakeDirs(os.path.dirname(self.target))
file_utils.TouchFile(self.target)
def tearDown(self):
if os.path.isdir(self.temp_dir):
shutil.rmtree(self.temp_dir)
def SymlinkRelativeAndVerify(self, **kwargs):
file_utils.TryMakeDirs(os.path.dirname(self.link_path))
file_utils.SymlinkRelative(self.target, self.link_path, **kwargs)
self.assertEqual(os.path.realpath(self.link_path), self.target)
def testNormal(self):
self.link_path = os.path.join(self.temp_dir, 'bar', 'link')
self.SymlinkRelativeAndVerify()
self.assertFalse(os.path.isabs(os.readlink(self.link_path)))
def testForce(self):
self.link_path = os.path.join(self.temp_dir, 'link')
file_utils.TouchFile(self.link_path)
with self.assertRaises(OSError):
self.SymlinkRelativeAndVerify()
self.SymlinkRelativeAndVerify(force=True)
def testBaseBothInside(self):
self.link_path = os.path.join(self.temp_dir, 'bar', 'link')
self.SymlinkRelativeAndVerify(base=self.temp_dir)
self.assertFalse(os.path.isabs(os.readlink(self.link_path)))
def testBaseLinkNotInside(self):
self.link_path = os.path.join(self.temp_dir, 'bar', 'link')
self.SymlinkRelativeAndVerify(base=os.path.join(self.temp_dir, 'foo'))
self.assertTrue(os.path.isabs(os.readlink(self.link_path)))
def testBaseTargetNotInside(self):
self.link_path = os.path.join(self.temp_dir, 'bar', 'link')
self.SymlinkRelativeAndVerify(base=os.path.join(self.temp_dir, 'bar'))
self.assertTrue(os.path.isabs(os.readlink(self.link_path)))
def testBaseBothNotInside(self):
self.link_path = os.path.join(self.temp_dir, 'fux', 'link')
self.SymlinkRelativeAndVerify(base=os.path.join(self.temp_dir, 'f'))
self.assertTrue(os.path.isabs(os.readlink(self.link_path)))
def testTargetAlreadyRelative(self):
self.link_path = os.path.join(self.temp_dir, 'bar', 'link')
file_utils.TryMakeDirs(os.path.dirname(self.link_path))
file_utils.SymlinkRelative('../foo/target', self.link_path)
self.assertEqual(os.path.realpath(self.link_path), self.target)
self.assertFalse(os.path.isabs(os.readlink(self.link_path)))
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main()
|
config_store_client_tests.py | #!/usr/bin/env python3
#
# Copyright (c) 2014-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from builtins import range
from builtins import object
from openr.utils import socket
from openr.utils.serializer import serialize_thrift_object
from openr.clients import config_store_client
from openr.PersistentStore import ttypes as ps_types
from openr.LinkMonitor import ttypes as lm_types
import zmq
import unittest
from multiprocessing import Process
store_db = {'key1': serialize_thrift_object(lm_types.DumpLinksReply(
thisNodeName='node1')),
'key2': serialize_thrift_object(lm_types.DumpLinksReply(
thisNodeName='node2'))}
class ConfigStore(object):
def __init__(self, zmq_ctx, url):
self._cs_server_socket = socket.Socket(zmq_ctx, zmq.REP)
self._cs_server_socket.bind(url)
self._store_db = store_db
def process_request(self):
req = self._cs_server_socket.recv_thrift_obj(ps_types.StoreRequest)
if req.requestType == ps_types.StoreRequestType.LOAD:
if req.key in self._store_db:
resp = ps_types.StoreResponse(success=1, key=req.key,
data=self._store_db[req.key])
else:
resp = ps_types.StoreResponse(success=0, key=req.key)
if req.requestType == ps_types.StoreRequestType.ERASE:
if req.key in self._store_db:
resp = ps_types.StoreResponse(success=1, key=req.key)
del store_db[req.key]
else:
resp = ps_types.StoreResponse(success=0, key=req.key)
if req.requestType == ps_types.StoreRequestType.STORE:
store_db[req.key] = req.data
resp = ps_types.StoreResponse(success=1, key=req.key)
self._cs_server_socket.send_thrift_obj(resp)
class TestConfigStoreClient(unittest.TestCase):
def test(self):
num_req = 6
ctx = zmq.Context()
def _cs_server():
cs_server = ConfigStore(ctx, "inproc://openr_config_store_cmd")
for _ in range(num_req):
cs_server.process_request()
def _cs_client():
cs_client_inst = config_store_client.ConfigStoreClient(
ctx, "inproc://openr_config_store_cmd")
self.assertEqual(cs_client_inst.load('key1'), store_db['key1'])
with self.assertRaises(Exception):
cs_client_inst.load('key3')
self.assertTrue(cs_client_inst.erase('key1'))
with self.assertRaises(Exception):
cs_client_inst.load('key1')
value = serialize_thrift_object(lm_types.DumpLinksReply(
thisNodeName='node5'))
self.assertTrue(cs_client_inst.store('key5', value))
self.assertEqual(cs_client_inst.load('key5'), value)
p = Process(target=_cs_server)
p.start()
q = Process(target=_cs_client)
q.start()
p.join()
q.join()
|
video.py | import os
import time
from queue import Queue, Empty, Full
from threading import Thread
import ffmpeg
import numpy
from .constant import PX_RGB, PX_BGR, DEFAULT_QUALITY, THUMBNAIL_RESOLUTION, SD_RESOLUTION
from .image import ENCODE_JPEG, ENCODE_PNG, imwrite
from .image import imencode, imdecode
from .tool import show_image, destroy_windows
from ..multiprocess.threading import QueueOverflow
# Options pixel format. `ffmpeg -pix_fmts` get more options
RGB24 = "rgb24"
BGR24 = "bgr24"
YUV420P = "yuv420p"
YUV422P = "yuv422p"
YUV444P = "yuv444p"
# Option encode format. `ffmpeg -codecs` get more options
H264_ENCODER = "libx264"
H265_ENCODER = "libx265"
# LOGGING
# Show nothing at all; be silent.
LOG_QUITE = "quite"
# Only show fatal errors which could lead the process to crash, such as an assertion failure.
# This is not currently used for anything.
LOG_ERROR = "error"
# Show all warnings and errors. Any message related to possibly incorrect or unexpected events will be shown.
LOG_WARNING = "warning"
# Show informative messages during processing. This is in addition to warnings and errors.
LOG_INFO = "info"
# Show everything, including debugging information.
LOG_DEBUG = "debug"
class CaptureError(Exception):
"""
CaptureError contain all error of VideoCapture
Error message is error detail if msg is ffmpeg._run.Error
Parameters
----------
msg: str | Exception | ffmpeg.Error
Error message
"""
START_ERROR = "START_ERROR"
STOP_ERROR = "STOP_ERROR"
DESCRIPTION = {
START_ERROR: "Reader was started!",
STOP_ERROR: "Reader wasn't started!"
}
def __init__(self, msg=None, code=None):
if isinstance(msg, ffmpeg.Error) and hasattr(msg, 'stderr'):
reason = msg.stderr.decode().strip().split("\n")[-1]
msg = f"{msg.__repr__()}\noutput: {msg.stdout}\nreason: {reason}"
if code and code in self.DESCRIPTION:
msg = f"{code}: {self.DESCRIPTION[code]}{f'| {msg}' if msg else ''}"
super(Exception, self).__init__(msg)
class Frame(object):
def __init__(self, buffer, size):
self.size = size
self.buffer = buffer
def __bool__(self):
return True
def __bytes__(self):
return self.buffer
def tobytes(self):
return self.buffer
def decode(self):
return numpy.frombuffer(self.buffer, dtype=numpy.uint8).reshape((self.size[1], self.size[0], 3))
def encode(self, encode_type=ENCODE_JPEG, quality=DEFAULT_QUALITY):
return imencode(self.decode(), encode_type=encode_type, quality=quality)
def save(self, img_path, encode_type=ENCODE_JPEG, quality=DEFAULT_QUALITY, over_write=False):
if os.path.isfile(img_path) and not over_write:
raise FileExistsError
with open(img_path, "wb") as img:
img.write(self.encode(encode_type=encode_type, quality=quality))
class BufferReader(object):
def __init__(self, stream, chunk_size):
self.source_stream = stream
self.stream = None
self.chunk_size = chunk_size
# prepare pool frame
self.pool_frames = Queue()
self.thread = None
def __iter__(self):
self.thread = Thread(target=self.read_buffer).start()
return self
def __next__(self):
return self.pool_frames.get()
def read_buffer(self):
self.stream = ffmpeg.run_async(self.source_stream, pipe_stdout=True)
while self.stream.poll() is None:
buffer = self.stream.stdout.read(self.chunk_size)
self.pool_frames.put(buffer)
class VideoIterator(object):
"""
VideoCapture iterator
Parameters
----------
stream: ffmpeg.OutputStream
Output stream
size: tuple[int, int]
Size of source's frame
cache_frames: int
The number of frame is cached in memory.
is_stream: bool
True, if output stream from camera device or RTSP link
auto_stop: int
(Default: infinite) If process wasn't read frame in seconds, reader would automatic stopped.
"""
__STOP_BYTES = b'U0U='
def __init__(self, stream, size, cache_frames=30, is_stream=False, auto_stop=None):
# prepare reader
assert isinstance(stream, ffmpeg.nodes.OutputStream)
self.source_stream = stream
self.stream = None
self.size = size
# prepare pool frame
self.pool_frames = QueueOverflow(1) if is_stream else Queue(cache_frames)
self.thread = None
self.read_byte_size = self.size[0] * self.size[1] * 3
# metadata
self.counter = 0
self.start_time = 0
self.end_time = 0
self.auto_stop = auto_stop
def __iter__(self):
if not self.thread:
self.start()
return self
def __next__(self):
frame = self.get_frame()
if not frame:
raise StopIteration
return frame
def fps(self):
return int(round(self.counter // (self.end_time - self.start_time)))
def start(self):
if self.thread and self.thread.is_alive():
raise CaptureError(code=CaptureError.START_ERROR)
# clear queue
with self.pool_frames.mutex:
self.pool_frames.queue.clear()
self.thread = Thread(target=self.__read_buffer)
self.thread.start()
# metadata
self.start_time = time.time()
self.end_time = self.start_time
self.counter = 0
def get_frame(self, time_out=10):
try:
buffer = self.pool_frames.get(timeout=time_out)
self.end_time = time.time()
if buffer == self.__STOP_BYTES:
return None
except Empty:
return None
self.counter += 1
return Frame(buffer, self.size)
def __read_buffer(self):
self.stream = ffmpeg.run_async(self.source_stream, pipe_stdout=True)
while self.stream.poll() is None:
buffer = self.stream.stdout.read(self.read_byte_size)
if self.auto_stop and time.time() - self.end_time > self.auto_stop:
break
if not buffer:
continue
try:
self.pool_frames.put(buffer, timeout=self.auto_stop)
except Full:
break
# Stop record
try:
self.pool_frames.get_nowait()
except IndexError:
pass
except Empty:
pass
self.pool_frames.put(self.__STOP_BYTES)
self.stream.kill()
if self.stream.stdout:
self.stream.stdout.close()
self.stream = None
self.thread = None
def stop(self):
if not self.thread:
raise CaptureError(code=CaptureError.STOP_ERROR)
self.stream.kill()
__enter__ = (lambda self: self)
__exit__ = (lambda self, exc_type, exc_val, exc_tb: self.stop())
class VideoInfo(object):
def __init__(self, src, transport="tcp"):
# check source
super().__init__()
opts = {
"probesize": 32
}
# stream
if src.startswith("rtsp"):
opts["rtsp_transport"] = transport
try:
info_streams = ffmpeg.probe(src, **opts)
self.info = next(stream for stream in info_streams['streams'] if stream['codec_type'] == "video")
except ffmpeg.Error as e:
raise CaptureError(e) from None
except StopIteration:
raise CaptureError("No video stream from source!") from None
# meta-data
self.src = src
try:
self.fps = round(eval(self.info['avg_frame_rate']))
except ZeroDivisionError:
self.fps = round(eval(self.info['r_frame_rate']))
self.height = self.info['height']
self.width = self.info['width']
try:
self.rotation = self.info['tags']['rotate']
except KeyError:
self.rotation = None
class VideoCapture(object):
"""
VideoCapture implement FFmpeg - High Performance Video IO.
Parameters
----------
src: str | int
Support local camera device (int), RTSP, Video.
transport: str
Transport protocol of RTSP streaming. If source link is RTSP link.
Raises
------
CaptureError
error during initial capture
Examples
--------
Create capture and show using OpenCV.
>>> capture = VideoCapture(0)
capture camera 0
# capture = VideoCapture("rtsp://****")
# capture = VideoCapture("file_path")
>>> with capture.read(realtime=True) as reader:
>>> for frame in reader:
>>> frame_ndarray = frame.decode()
>>> if not show_image(, 'asd', windows_size=FHD_RESOLUTION):
>>> break
OR
>>> reader = capture.read(realtime=True)
>>> for frame in reader:
>>> frame_ndarray = frame.decode()
>>> if not show_image(, 'asd', windows_size=FHD_RESOLUTION):
>>> break
>>> reader.close()
"""
def __init__(self, src, transport="tcp"):
self.is_stream = False
# options
opts = {
'an': None
}
# camera
if isinstance(src, int):
src = f"/dev/video{src}"
self.is_stream = True
# stream
if src.startswith("rtsp"):
opts["rtsp_transport"] = transport
opts["re"] = None
self.is_stream = True
# source metadata
self.__meta = VideoInfo(src, transport=transport)
# create input cmd
self.__input_stream = ffmpeg.input(src, **opts)
def __repr__(self):
return f"VideoCapture implement FFmpeg - High Performance Video IO.\n" \
f"Source: {self.source}\n" \
f"Size: {self.size}\n" \
f"FPS: {self.fps}"
def __iter__(self):
"""
Create iterator with default options of <video:func:read()>
Returns
-------
VideoIterator
Frame fetcher from capture.
"""
return iter(self.read(auto_stop=3))
@property
def source(self):
return self.__meta.src
@property
def size(self):
return self.__meta.width, self.__meta.height
@property
def fps(self):
return self.__meta.fps
def read2pipe(self, encoder=H264_ENCODER, chunk_size=128, output_size=None, keep_ratio=True, duration=0, fps=0,
pix_fmt=RGB24, log_level=LOG_ERROR):
"""
Generate VideoIterator which yield once frame by frame.
Parameters
---------
encoder: str
chunk_size: int
output_size: tuple[int, int]
Output size of stream
keep_ratio: bool
If True, width will change to fix with height ratio. w *= h_new / h_old
duration: int
Limited stream duration if set.
fps: int
(Default: None|0 - infinite) Limited stream FPS, which no effect with the video source.
pix_fmt: str
(Default: RGB24) Format of each pixel in frame.
log_level: LogLevel
Log level of ffmpeg
Returns
-------
VideoIterator
Frame fetcher.
Raises
------
CaptureError
If output_size <= (-1, -1)
"""
input_stream = self.__input_stream
if not output_size:
output_size = self.size
if keep_ratio:
output_size = (
int(round(self.__meta.width * (output_size[1] / self.__meta.height))),
output_size[1]
)
if self.__meta.rotation in ['90', '270']:
output_size = output_size[::-1]
output_options = {
"c:v": encoder,
"preset": "veryfast",
"vprofile": "baseline",
"format": 'h264' if encoder == H264_ENCODER else "h265",
"pix_fmt": pix_fmt,
"loglevel": log_level,
'probesize': 32
}
if output_size != self.size:
output_options["s"] = f'{output_size[0]}x{output_size[1]}'
# handle FPS
if fps > 0:
# manual set
output_options["r"] = fps
elif self.is_stream:
# sync with time
output_options["vsync"] = "vfr"
else:
# same source file
output_options["r"] = self.fps
if duration:
output_options['t'] = duration
capture = ffmpeg.output(
input_stream,
'pipe:',
**output_options
)
return BufferReader(capture, chunk_size)
def read(self, output_size=None, keep_ratio=True, duration=0, fps=0, pix_fmt=RGB24, auto_stop=None,
log_level=LOG_ERROR):
"""
Generate VideoIterator which yield once frame by frame.
Parameters
---------
output_size: tuple[int, int]
Output size of stream
keep_ratio: bool
If True, width will change to fix with height ratio. w *= h_new / h_old
duration: int
Limited stream duration if set.
fps: int
(Default: None|0 - infinite) Limited stream FPS, which no effect with the video source.
pix_fmt: str
(Default: RGB24) Format of each pixel in frame.
auto_stop: int
(Default: infinite) If process wasn't read frame in seconds, reader would automatic stopped.
log_level: LogLevel
Log level of ffmpeg
Returns
-------
VideoIterator
Frame fetcher.
Raises
------
CaptureError
If output_size <= (-1, -1)
"""
input_stream = self.__input_stream
if not output_size:
output_size = self.size
if keep_ratio:
output_size = (
int(round(self.__meta.width * (output_size[1] / self.__meta.height))),
output_size[1]
)
if self.__meta.rotation in ['90', '270']:
output_size = output_size[::-1]
output_options = {
"format": 'rawvideo',
"pix_fmt": pix_fmt,
"loglevel": log_level,
'probesize': 32
}
if output_size != self.size:
output_options["s"] = f'{output_size[0]}x{output_size[1]}'
# handle FPS
if fps > 0:
# manual set
output_options["r"] = fps
elif self.is_stream:
# sync with time
output_options["vsync"] = "vfr"
else:
# same source file
output_options["r"] = self.fps
if duration:
output_options['t'] = duration
capture = ffmpeg.output(
input_stream,
'pipe:',
**output_options
)
return VideoIterator(
capture,
output_size,
is_stream=self.is_stream,
auto_stop=auto_stop
)
def write_images(self, folder_path, prefix="", output_size=None, keep_ratio=True, duration=0, pix_fmt=RGB24,
encode_type=ENCODE_JPEG, quality=DEFAULT_QUALITY, over_write=False, log_level=LOG_ERROR):
"""
Read all frame from source and write it to images.
Same manual way:
Get reader with capture.read and write frame into file.
Parameters
----------
folder_path: str
Path of output folder
prefix: str
prefix of each image's name
output_size: tuple[int, int]
Output size of stream
keep_ratio: bool
If True, width will change to fix with height ratio. w *= h_new / h_old
duration: int
Limited stream duration if set.
pix_fmt: str
(Default: RGB24) Format of each pixel in frame.
encode_type: int
ENCODE_JPEG (default) | ENCODE_PNG. Output encode format of images
quality: int
0 -> (default) 95 -> 100. Quality of images
over_write: bool
force write data in existed file.
log_level: LogLevel
Log level of ffmpeg
"""
if not os.path.isdir(folder_path):
os.mkdir(folder_path)
ext = ".jpg"
if encode_type == ENCODE_PNG:
ext = ".png"
reader = self.read(output_size=output_size, keep_ratio=keep_ratio, duration=duration, pix_fmt=pix_fmt,
log_level=log_level)
for idx, frame in enumerate(reader):
file_path = f"{os.path.abspath(folder_path)}/{prefix}{idx}{ext}"
imwrite(frame.decode(), file_path, encode_type=encode_type, quality=quality, over_write=over_write)
reader.stop()
def write(self, output, over_write=False, encoder=H265_ENCODER, pix_fmt=YUV420P,
output_size=None, keep_ratio=True, fps=-1, duration=0,
preview=False, preview_size=SD_RESOLUTION,
log_level=LOG_ERROR):
"""
Capture input stream to file. With preview options
Preview mode can down speed processing 1.5x -> 2x.
Parameters
----------
output: str
Support: file's path | pipeline ("pipe:") | URL
over_write: bool
Overwrite existed file if True.
encoder: str
(Default: H265_ENCODER) Encode codec.
pix_fmt: str
(Default: YUV420P) Pixel format
output_size: tuple[int, int]
Output size
output_size: tuple[int, int]
Output size of stream
keep_ratio: bool
If True, width will change to fix with height ratio. w *= h_new / h_old
fps: int
(Default: -1 - autoset) FPS of output video
duration: int
(Default: 0 - infinite) Limited record time if set.
preview: bool
Show frame during process. If True.
preview_size: tuple[int, int]
Window previewer size.
log_level: LogLevel
Log level of ffmpeg
"""
if output.startswith("pipe") and os.path.isfile(output) and not over_write:
raise FileExistsError
if not output_size:
output_size = self.size
if keep_ratio:
output_size = (
int(round(self.__meta.width * (output_size[1] / self.__meta.height))),
output_size[1]
)
# file output settings
output_options = {
"pix_fmt": pix_fmt,
"loglevel": log_level,
"c:v": encoder,
"crf": 27,
'preset': 'veryfast',
's': f'{output_size[0]}x{output_size[1]}'
}
if duration:
output_options['t'] = duration
if encoder == H265_ENCODER:
output_options["x265-params"] = f"log-level={log_level if log_level != 'quiet' else -1}"
# handle FPS
if fps > 0:
# manual set
output_options["r"] = fps
elif self.is_stream:
# sync with time
output_options["vsync"] = "vfr"
else:
# same source file
output_options["r"] = self.fps
capture_output = self.__input_stream \
.output(output, **output_options) \
.overwrite_output()
# pipe output settings if preview.
if preview:
preview_output_size = THUMBNAIL_RESOLUTION
if keep_ratio:
preview_output_size = (
int(round(self.__meta.width * (preview_output_size[1] / self.__meta.height))),
preview_output_size[1]
)
pipe_output_opts = {
"format": "rawvideo",
"pix_fmt": RGB24,
"loglevel": log_level,
's': f'{preview_output_size[0]}x{preview_output_size[1]}'
}
if fps > 0:
# manual set
pipe_output_opts["r"] = fps
elif self.is_stream:
# sync with time
pipe_output_opts["vsync"] = "vfr"
else:
# same source file
pipe_output_opts["r"] = self.fps
if duration:
pipe_output_opts['t'] = duration
pipe_output = self.__input_stream.output('pipe:', **pipe_output_opts)
capture_output = ffmpeg.merge_outputs(pipe_output, capture_output)
capture_output = VideoIterator(
capture_output,
preview_output_size,
is_stream=self.is_stream
)
window_name = f"Preview - {self.__meta.src}"
for frame in capture_output:
if not show_image(frame.decode(), window_name, windows_size=preview_size):
break
destroy_windows(window_name)
capture_output.stop()
else:
capture_output.run()
def images2video(file_path, images_list, fps,
pix_fmt=YUV420P, encoder=H265_ENCODER,
output_size=None, keep_ratio=True,
over_write=False, duration=0, log_level=LOG_ERROR):
"""
Make video file from sequence images.
*images_list: list path file of images.
Tip: `fast way get list of images`.
1) Get all file path by glob.glob
2) Sort with dpsutil.sort.natsorted
Parameters
----------
file_path: str
Path of output video
images_list: list[str]
List of images which used to re-sequence to video
fps: int
FPS of output video
encoder: str
(Default: H265_ENCODER) Encode codec.
pix_fmt: str
(Default: YUV420P) Pixel format
output_size: tuple[int, int]
Output size of output video
keep_ratio: bool
If True, width will change to fix with height ratio. w *= h_new / h_old
over_write: bool
Overwrite existed file if True.
duration: int
(Default: 0 - infinite) Limited record time if set.
log_level: LogLevel
Log level of ffmpeg
"""
if os.path.isfile(file_path) and not over_write:
raise FileExistsError
img_info = ffmpeg.probe(images_list[0])['streams'][0]
origin_size = (img_info['width'], img_info['height'])
if not output_size:
output_size = origin_size
if keep_ratio:
output_size = (
int(round(origin_size[0] * (output_size[1] / origin_size[1]))),
output_size[1]
)
output_options = {
"pix_fmt": pix_fmt,
"c:v": H265_ENCODER,
'loglevel': log_level.value
}
if encoder == H265_ENCODER:
output_options["x265-params"] = f"log-level={log_level if log_level != 'quiet' else -1}"
if duration:
output_options['t'] = duration
capture_output = ffmpeg.input("pipe:", format='rawvideo', pix_fmt=RGB24, framerate=fps,
s=f'{output_size[0]}x{output_size[1]}') \
.output(file_path, **output_options) \
.overwrite_output() \
.run_async(pipe_stdin=True)
try:
for image_path in images_list:
with open(image_path, "rb") as image_file:
img = imdecode(image_file.read())
capture_output.stdin.write(img.tobytes())
except Exception as e:
raise e
finally:
capture_output.stdin.close()
capture_output.wait()
__all__ = ['images2video', 'VideoCapture', 'PX_RGB', 'PX_BGR', 'ENCODE_JPEG', 'ENCODE_PNG']
|
op_util.py | # Copyright 2017-2022 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import csv
import importlib
import io
import logging
import os
import re
import struct
import sys
import threading
import time
import six
import yaml
from guild import _api
from guild import config
from guild import guildfile
from guild import file_util
from guild import flag_util
from guild import log as loglib
from guild import main
from guild import op_cmd as op_cmd_lib
from guild import op_dep
from guild import run as runlib
from guild import util
from guild import var
from guild import vcs_util
from guild import yaml_util
log = logging.getLogger("guild")
MAX_DEFAULT_SOURCECODE_FILE_SIZE = 1024 * 1024
MAX_DEFAULT_SOURCECODE_COUNT = 100
DEFAULT_EXEC = "${python_exe} -um guild.op_main ${main_args} -- ${flag_args}"
STEPS_EXEC = "${python_exe} -um guild.steps_main"
LABEL_TOKENS_P = re.compile(r"(\${.+?})")
LABEL_FLAG_REF_P = re.compile(r"\${(.+?)}")
RUN_OUTPUT_STREAM_BUFFER = 4096
RESTART_NEEDED_STATUS = ("pending",)
DEFAULT_PROC_POLL_INTERVAL = 5
DEFAULT_PROC_KILL_DELAY = 30
NoCurrentRun = _api.NoCurrentRun
try:
bytes('')
except TypeError:
# Python 3
LF = 10
BYTES_JOIN = bytes
else:
# Python 2
LF = b"\n"
BYTES_JOIN = lambda l: b"".join(l)
###################################################################
# Error classes
###################################################################
class ArgValueError(ValueError):
def __init__(self, arg):
super(ArgValueError, self).__init__(arg)
self.arg = arg
class FlagError(Exception):
pass
class MissingRequiredFlags(FlagError):
def __init__(self, missing):
super(MissingRequiredFlags, self).__init__(missing)
self.missing = missing
class InvalidFlagChoice(FlagError):
def __init__(self, val, flag):
super(InvalidFlagChoice, self).__init__(val, flag)
self.val = val
self.flag = flag
class InvalidFlagValue(FlagError):
def __init__(self, value, flag, msg):
super(InvalidFlagValue, self).__init__(value, flag, msg)
self.value = value
self.flag = flag
self.msg = msg
class OpDefLookupError(LookupError):
pass
class InvalidOpSpec(OpDefLookupError):
def __init__(self, opspec):
super(InvalidOpSpec, self).__init__(opspec)
self.opspec = opspec
class NoSuchModel(OpDefLookupError):
def __init__(self, opspec):
super(NoSuchModel, self).__init__(opspec)
self.opspec = opspec
class NoSuchOperation(OpDefLookupError):
def __init__(self, model, op_name):
super(NoSuchOperation, self).__init__(model, op_name)
self.model = model
self.op_name = op_name
class CwdGuildfileError(OpDefLookupError):
def __init__(self, guildfile_error):
super(CwdGuildfileError, self).__init__(guildfile_error)
self.msg = guildfile_error.msg
self.path = guildfile_error.path
class MultipleMatchingModels(OpDefLookupError):
def __init__(self, model_ref, matches):
super(MultipleMatchingModels, self).__init__(model_ref, matches)
self.model_ref = model_ref
self.matches = matches
class NoMatchingModel(OpDefLookupError):
def __init__(self, model_ref):
super(NoMatchingModel, self).__init__(model_ref)
self.model_ref = model_ref
class ModelOpProxyError(Exception):
def __init__(self, opspec, msg):
super(ModelOpProxyError, self).__init__(opspec, msg)
self.opspec = opspec
self.msg = msg
class NoSuchFlagError(FlagError):
def __init__(self, flag_name):
super(NoSuchFlagError, self).__init__(flag_name)
self.flag_name = flag_name
class InvalidOpDef(ValueError):
def __init__(self, opdef, msg):
super(InvalidOpDef, self).__init__(opdef, msg)
self.opdef = opdef
self.msg = msg
def __str__(self):
return "invalid definition for %s: %s" % (self.opdef.fullname, self.msg)
class OpCmdError(Exception):
pass
class BatchFileError(Exception):
def __init__(self, path, msg):
super(BatchFileError, self).__init__(path, msg)
self.path = path
self.msg = msg
def __str__(self):
return "cannot read trials for %s: %s" % (self.path, self.msg)
class ProcessError(Exception):
pass
###################################################################
# Run output
###################################################################
class RunOutput(object):
def __init__(self, run, quiet=False, output_cb=None):
"""Creates a run output object.
Run output is not automatically opened. Use `open(proc)` to
open output for a process.
"""
assert run
self._run = run
self._quiet = quiet
self._output_cb = output_cb
self._output_lock = threading.Lock()
self._open = False
self._proc = None
self._output = None
self._index = None
self._out_tee = None
self._err_tee = None
@property
def closed(self):
return not self._open
def open(self, proc):
"""Opens output.
When open, threads are started for reading from proc.stdout
and proc.stderr and writing to sys.stdout and sys.stderr
respectively.
Generates an error if run output is closed.
"""
self._assert_closed()
if proc.stdout is None:
raise RuntimeError("proc stdout must be a PIPE")
self._proc = proc
self._output = self._open_output()
self._index = self._open_index()
self._out_tee = threading.Thread(target=self._out_tee_run)
self._out_tee.start()
if proc.stderr:
self._err_tee = threading.Thread(target=self._err_tee_run)
self._err_tee.start()
self._open = True
def _assert_closed(self):
if self._open:
raise RuntimeError("already open")
assert self._proc is None
assert self._output is None
assert self._index is None
assert self._out_tee is None
assert self._err_tee is None
def _open_output(self):
path = self._run.guild_path("output")
return open(path, "wb")
def _open_index(self):
path = self._run.guild_path("output.index")
return open(path, "wb")
def _out_tee_run(self):
assert self._proc
self._gen_tee_run(self._proc.stdout, sys.stdout, 0)
def _err_tee_run(self):
assert self._proc
self._gen_tee_run(self._proc.stderr, sys.stderr, 1)
def _gen_tee_run(self, input_stream, output_stream, stream_type):
assert self._output
assert self._index
os_read = os.read
os_write = os.write
input_fileno = input_stream.fileno()
if not self._quiet and hasattr(output_stream, "fileno"):
try:
stream_fileno = output_stream.fileno()
except io.UnsupportedOperation:
stream_fileno = None
else:
stream_fileno = None
output_fileno = self._output.fileno()
index_fileno = self._index.fileno()
lock = self._output_lock
line = []
while True:
buf = os_read(input_fileno, RUN_OUTPUT_STREAM_BUFFER)
if not buf:
if line:
self._output_eol(index_fileno, line, stream_type)
break
with lock:
if stream_fileno is not None:
os_write(stream_fileno, buf)
os_write(output_fileno, buf)
for b in buf:
if b < 9: # non-printable
continue
line.append(b)
if b == LF:
self._output_eol(index_fileno, line, stream_type)
del line[:]
def _output_eol(self, index_fileno, line, stream_type):
line_bytes = BYTES_JOIN(line)
entry = struct.pack("!QB", int(time.time() * 1000), stream_type)
os.write(index_fileno, entry)
if self._output_cb:
try:
self._output_cb.write(line_bytes)
except Exception:
log.exception("error in output callback (will be " "removed)")
self._output_cb = None
def wait(self):
"""Wait for run output reader threads to exit.
This call will block until the reader threads exit. Reader
threads exit when the underlying streams they read from are
closed. If these streams do not close, this call will not
return. Streams close when their associated OS process
terminates or they're otherwise explicitly closed.
"""
self._assert_open()
self._out_tee.join()
if self._err_tee:
self._err_tee.join()
def _assert_open(self):
if not self._open:
raise RuntimeError("not open")
assert self._proc
assert self._output
assert self._index
assert self._out_tee
assert not self._proc.stderr or self._err_tee
def close(self):
lock = self._acquire_output_lock()
try:
self._close()
finally:
lock.release()
def _acquire_output_lock(self, timeout=60):
"""Polling verison of acquire to support timeouts on Python 2."""
timeout_at = time.time() + timeout
while time.time() < timeout_at:
if self._output_lock.acquire(False):
return self._output_lock
time.sleep(1)
raise RuntimeError("timeout")
def _close(self):
self._assert_open()
self._output.close()
self._index.close()
if self._output_cb:
try:
self._output_cb.close()
except Exception:
log.exception("closing output callback")
assert not self._out_tee.is_alive()
assert not self._err_tee or not self._err_tee.is_alive()
self._proc = None
self._output = None
self._index = None
self._out_tee = None
self._err_tee = None
self._open = False
def wait_and_close(self):
self.wait()
self.close()
###################################################################
# OpDef for spec
###################################################################
def opdef_for_opspec(opspec):
try:
return _model_opdef(opspec)
except OpDefLookupError:
opdef = _try_model_proxy(opspec)
if not opdef:
raise
return opdef
def _model_opdef(opspec):
model, op_name = _model_op(opspec)
opdef = _opdef_for_model_op(model, op_name)
if not opdef:
raise NoSuchOperation(model, op_name)
opdef.set_modelref(model.reference)
return opdef
def _try_model_proxy(opspec):
from guild import model_proxy
if not opspec:
return None
try:
model, op_name = model_proxy.resolve_model_op(opspec)
except model_proxy.NotSupported:
return None
except model_proxy.OpSpecError as e:
raise ModelOpProxyError(opspec, str(e))
else:
opdef = model.modeldef.get_operation(op_name)
if opdef:
opdef.set_modelref(model.reference)
return opdef
def _model_op(opspec):
model_ref, op_name = _parsed_opspec(opspec)
model = _resolve_model(model_ref)
if not model:
raise NoSuchModel(opspec)
return model, op_name
def _parsed_opspec(opspec):
parsed = parse_opspec(opspec)
if parsed is None:
raise InvalidOpSpec(opspec)
return parsed
###################################################################
# Opdef for model paths
###################################################################
def opdef_model_paths(opdef):
return _opdef_paths(opdef) + _model_parent_paths(opdef.modeldef)
def _opdef_paths(opdef):
if not opdef.guildfile.dir:
return []
abs_gf_dir = os.path.abspath(opdef.guildfile.dir)
if opdef.python_path is not None:
return [os.path.join(abs_gf_dir, p) for p in opdef.python_path]
if opdef.sourcecode and opdef.sourcecode.root:
return [os.path.join(abs_gf_dir, opdef.sourcecode.root)]
return [abs_gf_dir]
def _model_parent_paths(modeldef):
return [os.path.abspath(parent.dir) for parent in modeldef.parents]
###################################################################
# Parse opspec
###################################################################
def parse_opspec(spec):
return util.find_apply(
[
_empty_spec,
_op_spec,
_model_op_spec,
_package_model_op_spec,
_package_op_spec,
],
spec,
)
def _empty_spec(spec):
if spec:
return None
return None, None
def _op_spec(spec):
if "/" in spec or ":" in spec:
return None
return None, spec
def _model_op_spec(spec):
m = re.match(r"([^/:]*):([^/:]+)$", spec)
if not m:
return None
return m.groups()
def _package_model_op_spec(spec):
m = re.match(r"([^/:]*/[^/:?]*):([^/:]+)$", spec)
if not m:
return None
return m.groups()
def _package_op_spec(spec):
m = re.match(r"([^/:]+/):?([^/:]+)$", spec)
if not m:
return None
return m.groups()
def _resolve_model(model_ref):
return util.find_apply(
[
_resolve_cwd_model,
_resolve_system_model,
],
model_ref,
)
def _resolve_cwd_model(model_ref):
from guild import model as modellib # expensive
cwd_guildfile = _cwd_guildfile()
if not cwd_guildfile:
return None
with modellib.SetPath([cwd_guildfile.dir], clear_cache=True):
return _match_one_model(model_ref, cwd_guildfile)
def _cwd_guildfile():
try:
return guildfile.for_dir(config.cwd())
except guildfile.NoModels:
return None
except guildfile.GuildfileError as e:
raise CwdGuildfileError(e)
def _resolve_system_model(model_ref):
return _match_one_model(model_ref)
def _match_one_model(model_ref, cwd_guildfile=None):
matches = list(_iter_matching_models(model_ref, cwd_guildfile))
if len(matches) == 1:
return matches[0]
if len(matches) > 0 and model_ref:
return _complete_match_one_model(model_ref, matches)
return None
def _iter_matching_models(model_ref, cwd_guildfile):
from guild import model as modellib # expensive
for model in modellib.iter_models():
if model_ref:
if _match_model_ref(model_ref, model):
yield model
else:
if cwd_guildfile and _is_default_cwd_model(model, cwd_guildfile):
yield model
break
if not model.name:
yield model
def _is_default_cwd_model(model, cwd_guildfile):
default_model = cwd_guildfile.default_model
return (
default_model
and default_model.guildfile.dir == model.modeldef.guildfile.dir
and default_model.name == model.name
)
def _match_model_ref(model_ref, model):
if "/" in model_ref:
return model_ref in model.fullname
else:
return model_ref in model.name
def _complete_match_one_model(model_ref, matches):
complete_match = _model_by_name(model_ref, matches)
if complete_match:
return complete_match
raise MultipleMatchingModels(model_ref, matches)
def _model_by_name(name, models):
for model in models:
if model.name == name:
return model
return None
def _maybe_no_model_error(model_ref):
if model_ref:
raise NoMatchingModel(model_ref)
def _opdef_for_model_op(model, op_name):
if op_name:
return model.modeldef.get_operation(op_name)
return model.modeldef.default_operation
###################################################################
# Run support
###################################################################
def init_run(path=None):
if not path:
run_id = runlib.mkid()
path = os.path.join(var.runs_dir(), run_id)
else:
run_id = os.path.basename(path)
return runlib.Run(run_id, path)
def set_run_marker(run, marker):
open(run.guild_path(marker), "w").close()
def clear_run_marker(run, marker):
util.ensure_deleted(run.guild_path(marker))
def set_run_pending(run):
set_run_marker(run, "PENDING")
clear_run_marker(run, "STAGED")
def clear_run_pending(run):
clear_run_marker(run, "PENDING")
def write_sourcecode_digest(run, sourcecode_root):
src = os.path.join(run.dir, sourcecode_root)
digest = file_util.files_digest(src)
run.write_attr("sourcecode_digest", digest)
def write_vcs_commit(opdef, run):
if not opdef.guildfile.dir:
return
try:
commit, status = vcs_util.commit_for_dir(opdef.guildfile.dir)
except vcs_util.NoCommit:
pass
except vcs_util.CommitReadError as e:
log.warning("error reading VCS commit: %s", e)
else:
run.write_attr("vcs_commit", _format_vcs_commit(commit, status))
def _format_vcs_commit(commit, status):
if status:
return commit + "*"
return commit
def set_run_started(run):
started = runlib.timestamp()
run.write_attr("started", started)
def set_run_staged(run):
set_run_marker(run, "STAGED")
clear_run_pending(run)
set_run_started(run)
###################################################################
# Run labels
###################################################################
def run_label(label_template, flag_vals):
"""Returns a run label for template and flag vals."""
default_label = _default_run_label(flag_vals)
if label_template is None:
return default_label
return _render_label_template(label_template, flag_vals, default_label)
def _default_run_label(flag_vals):
"""Returns a default run label for a map of flag values.
The default label is a string containing flag assign as NAME=VALUE.
"""
non_null = {name: val for name, val in flag_vals.items() if val is not None}
return " ".join(
flag_util.flag_assigns(non_null, truncate_floats=True, shorten_paths=True)
)
def _render_label_template(label_template, flag_vals, default_label):
"""Returns a rendered label template.
`label_template` is a string containing flag references. Flag
references are resolved with values defined in `flag_values.`
`default_label` is provided as an additional supported value,
which may be referenced using the name 'default_label' in the
template.
"""
formatted_vals = _render_template_formatted_vals(flag_vals, default_label)
return _render_label_template_formatted(label_template, formatted_vals)
def _render_template_formatted_vals(flag_vals, default_label):
formatted_vals = {
"default_label": default_label,
}
formatted_vals.update(
{
name: FormattedValue(val)
for name, val in flag_vals.items()
if val is not None
}
)
return formatted_vals
class FormattedValue(object):
def __init__(self, value):
self._value = value
self._str = None
@property
def wrapped_value(self):
return self._value
@wrapped_value.setter
def wrapped_value(self, value):
self._value = value
self._str = None
def __str__(self):
if self._str is None:
self._str = flag_util.format_flag(
self._value, truncate_floats=True, shorten_paths=True
)
return self._str
def _render_label_template_formatted(label_template, formatted_vals):
"""Renders a label template with formatted values.
`formatted_vals` is a map of names to formatted values. A
formatted value is a value wrapped as a `FormattedValue` instance.
This function supports value filters in form
``${NAME|FILTER:ARG1,ARG2}``, which require values to be be
wrapped with `FormattedValue`.
"""
tokens = LABEL_TOKENS_P.split(label_template)
return "".join([_rendered_str(_render_token(t, formatted_vals)) for t in tokens])
def _render_token(token, vals):
m = LABEL_FLAG_REF_P.match(token)
if not m:
return token
ref_parts = m.group(1).split("|")
name = ref_parts[0]
transforms = ref_parts[1:]
val = vals.get(name)
for t in transforms:
val = _apply_template_transform(t, val)
return val
def _apply_template_transform(t, val):
if hasattr(val, "wrapped_value"):
val = val.wrapped_value
parts = t.split(":", 1)
if len(parts) == 1:
name, arg = parts[0], None
else:
name, arg = parts
if name[:1] == "%":
return _t_python_format(val, name)
elif name == "default":
return _t_default(val, arg)
elif name == "basename":
if arg:
log.warning("ignoring argment to baseline in %r", t)
return _t_basename(val)
elif name == "unquote":
return _t_unquote(val)
else:
log.warning("unsupported template transform: %r", t)
return "#error#"
def _t_python_format(val, fmt):
try:
return fmt % val
except ValueError as e:
log.warning("error formatting %r with %r: %s", val, fmt, e)
return val
except TypeError:
# Silently ignore type errors. ValueErrors (logged above)
# indicate an invalid formatting string, which is of
# interest. Running into an unexpected value type should let
# that value pass through.
return val
def _t_default(val, arg):
if val is None:
return arg or ""
return val
def _t_basename(val):
if not val:
return ""
return os.path.basename(util.strip_trailing_sep(val))
def _t_unquote(val):
if (
isinstance(val, six.string_types)
and len(val) >= 2
and val[0] == "'"
and val[-1] == "'"
):
return val[1:-1]
return val
def _rendered_str(s):
if s is None:
return ""
return str(s)
###################################################################
# Source code support
###################################################################
def sourcecode_select_for_opdef(opdef):
root = _opdef_sourcecode_root(opdef)
rules = _select_rules_for_opdef(opdef)
return file_util.FileSelect(root, rules)
def _opdef_sourcecode_root(opdef):
return opdef.sourcecode.root or opdef.modeldef.sourcecode.root
def _select_rules_for_opdef(opdef):
if _sourcecode_disabled(opdef):
return [file_util.exclude("*")]
root = _opdef_select_rules_root(opdef)
return (
_base_sourcecode_select_rules()
+ _sourcecode_config_rules(opdef.modeldef.sourcecode, root)
+ _sourcecode_config_rules(opdef.sourcecode, root)
)
def _opdef_select_rules_root(opdef):
root_base = opdef.guildfile.dir
sourcecode_root = opdef_sourcecode_root(opdef)
if not sourcecode_root:
return root_base
return os.path.join(root_base, sourcecode_root)
def _sourcecode_disabled(opdef):
op_config = opdef.sourcecode
model_config = opdef.modeldef.sourcecode
return op_config.disabled or model_config.disabled and not op_config.specs
def opdef_sourcecode_root(opdef):
return opdef.sourcecode.root or opdef.modeldef.sourcecode.root
def _base_sourcecode_select_rules():
return [
_rule_exclude_pycache_dirs(),
_rule_exclude_dot_dirs(),
_rule_exclude_nocopy_dirs(),
_rule_exclude_venv_dirs(),
_rule_exclude_venv_dirs_win(),
_rule_exclude_build_dirs(),
_rule_exclude_egg_info_dirs(),
_rule_include_limited_text_files(),
]
def _rule_exclude_pycache_dirs():
return file_util.exclude("__pycache__", type="dir")
def _rule_exclude_dot_dirs():
return file_util.exclude(".*", type="dir")
def _rule_exclude_nocopy_dirs():
return file_util.exclude("*", type="dir", sentinel=".guild-nocopy")
def _rule_exclude_venv_dirs():
return file_util.exclude("*", type="dir", sentinel="bin/activate")
def _rule_exclude_venv_dirs_win():
return file_util.exclude("*", type="dir", sentinel="Scripts/activate")
def _rule_exclude_build_dirs():
return file_util.exclude("build", type="dir")
def _rule_exclude_egg_info_dirs():
return file_util.exclude("*.egg-info", type="dir")
def _rule_include_limited_text_files():
return file_util.include(
"*",
type="text",
size_lt=MAX_DEFAULT_SOURCECODE_FILE_SIZE + 1,
max_matches=MAX_DEFAULT_SOURCECODE_COUNT,
)
def _sourcecode_config_rules(config, root):
return [_rule_for_select_spec(spec, root) for spec in config.specs]
def _rule_for_select_spec(spec, root):
if spec.type == "include":
return _file_util_rule(file_util.include, spec, root)
elif spec.type == "exclude":
return _file_util_rule(file_util.exclude, spec, root)
else:
assert False, spec.type
def _file_util_rule(rule_f, spec, root):
patterns = _spec_patterns(spec, root)
return rule_f(patterns, type=spec.patterns_type)
def _spec_patterns(spec, root):
"""Returns patterns for spec.
If spec patterns_type is not specified, applies glob to and
existing patterns that reference directories relative to root. For
example, if a pattern is 'foo' and root is '/' and the directory
'/foo' exists, the pattern is returned as 'foo/*'. This is a
convenience so that un-globbed directories match all files as a
user might expect.
"""
if spec.patterns_type:
return spec.patterns
return [_apply_dir_glob(root, p) for p in spec.patterns]
def _apply_dir_glob(root, pattern):
if os.path.isdir(os.path.join(root, pattern)):
pattern = os.path.join(pattern, "*")
return pattern
def copy_sourcecode(sourcecode_src, sourcecode_select, dest_dir, handler_cls=None):
handler_cls = handler_cls or SourceCodeCopyHandler
file_util.copytree(
dest_dir, sourcecode_select, sourcecode_src, handler_cls=handler_cls
)
class SourceCodeCopyHandler(file_util.FileCopyHandler):
"""Handler to log warnings when soure code files are skipped.
Only logs warnings when the default rules are in effect.
"""
_warned_max_matches = False
_warning_help_suffix = (
" To control which files are copied, define 'sourcecode' "
"for the operation in a Guild file."
)
def ignore(self, path, rule_results):
fullpath = os.path.join(self.src_root, path)
if self._default_rules_in_effect(rule_results):
assert len(rule_results) == 1, rule_results
(_path, failed_test), _rule = rule_results[0]
if failed_test.name == "max matches":
self._warn_max_matches()
elif failed_test.name == "size":
self._warn_max_size(fullpath)
@staticmethod
def _default_rules_in_effect(results):
return (
len(results) == 1
and results[0][1].result is True
and results[0][1].size_lt == MAX_DEFAULT_SOURCECODE_FILE_SIZE + 1
and results[0][1].max_matches == MAX_DEFAULT_SOURCECODE_COUNT
)
def _warn_max_matches(self):
if self._warned_max_matches:
return
log.warning(
"Found more than %i source code files but will only "
"copy %i as a safety measure.%s",
MAX_DEFAULT_SOURCECODE_COUNT,
MAX_DEFAULT_SOURCECODE_COUNT,
self._warning_help_suffix,
)
self._warned_max_matches = True
def _warn_max_size(self, path):
log.warning(
"Skipping potential source code file %s because it's " "too big.%s",
path,
self._warning_help_suffix,
)
###################################################################
# Op command support
###################################################################
def op_cmd_for_opdef(opdef, extra_cmd_env=None):
"""Returns tuple of op cmd for opdef and associated run attrs.
Some operations require additional information from the opdef,
which is returned as the second element of the two-tuple.
"""
cmd_args, run_attrs = _op_cmd_args_and_run_attrs(opdef)
cmd_env = _op_cmd_env(opdef, extra_cmd_env or {})
cmd_flags = _op_cmd_flags(opdef)
cmd_flags_dest = opdef.flags_dest or "args"
op_cmd = op_cmd_lib.OpCmd(cmd_args, cmd_env, cmd_flags, cmd_flags_dest)
return op_cmd, run_attrs
def _op_cmd_args_and_run_attrs(opdef):
main_args = split_cmd(opdef.main or "")
exec_str, run_attrs = _opdef_exec_and_run_attrs(opdef)
exec_args = split_cmd(exec_str)
_apply_main_args(main_args, exec_args)
_apply_flag_args_marker(exec_args)
_apply_other_args(exec_args, opdef)
return exec_args, run_attrs
def split_cmd(cmd):
if isinstance(cmd, list):
return cmd
return util.shlex_split(cmd or "")
def _opdef_exec_and_run_attrs(opdef):
"""Returns exec template for opdef with required run attrs for opdef.
If exec is specified explicitly, it's returned, otherwise main or
steps are used to generate a template.
"""
if opdef.exec_:
if opdef.main:
log.warning(
"operation 'exec' and 'main' both specified, " "ignoring 'main'"
)
if opdef.steps:
log.warning(
"operation 'exec' and 'steps' both specified, " "ignoring 'steps'"
)
return opdef.exec_, None
elif opdef.main:
if opdef.steps:
log.warning(
"operation 'main' and 'steps' both specified, " "ignoring 'steps'"
)
return DEFAULT_EXEC, None
elif opdef.steps:
return STEPS_EXEC, _run_attrs_for_steps(opdef)
else:
raise InvalidOpDef(opdef, "must define either exec, main, or steps")
def _run_attrs_for_steps(opdef):
return {
"steps": opdef.steps,
}
def _apply_main_args(main_args, exec_args):
i = 0
while i < len(exec_args):
if exec_args[i] == "${main_args}":
exec_args[i : i + 1] = main_args
i += len(main_args)
i += 1
def _apply_flag_args_marker(exec_args):
for i, val in enumerate(exec_args):
if val == "${flag_args}":
exec_args[i] = "__flag_args__"
def _apply_other_args(args, opdef):
repl = [
("${project_dir}", opdef.guildfile.dir),
]
for i, val in enumerate(args):
for pattern, text in repl:
if val and text:
args[i] = val.replace(pattern, text)
def _op_cmd_env(opdef, extra_env):
env = dict(opdef.env or {})
env.update(extra_env or {})
env["GUILD_PLUGINS"] = _op_plugins(opdef)
env["PROJECT_DIR"] = opdef.guildfile.dir or ""
if opdef.flags_dest:
env["FLAGS_DEST"] = opdef.flags_dest
if opdef.handle_keyboard_interrupt:
env["HANDLE_KEYBOARD_INTERRUPT"] = "1"
return env
def _op_plugins(opdef):
from guild import plugin as pluginlib # expensive
project_plugins = _project_plugins(opdef)
op_plugins = []
for name, plugin in pluginlib.iter_plugins():
if not _plugin_selected(plugin, project_plugins):
log.debug("plugin '%s' not configured for operation", name)
continue
enabled, reason = plugin.enabled_for_op(opdef)
if not enabled:
log.debug(
"plugin '%s' configured for operation but cannot be enabled%s",
name,
" (%s)" % reason if reason else "",
)
continue
log.debug(
"plugin '%s' enabled for operation%s",
name,
" (%s)" % reason if reason else "",
)
op_plugins.append(name)
return ",".join(sorted(op_plugins))
def _project_plugins(opdef):
if opdef.plugins is not None:
return opdef.plugins or []
return opdef.modeldef.plugins or []
def _plugin_selected(plugin, selected):
for name in selected:
if name == plugin.name or name in plugin.provides:
return True
return False
def _op_cmd_flags(opdef):
return {flagdef.name: _flag_cmd_for_flagdef(flagdef) for flagdef in opdef.flags}
def _flag_cmd_for_flagdef(flagdef):
return op_cmd_lib.CmdFlag(
arg_name=flagdef.arg_name,
arg_skip=_flagdef_arg_skip(flagdef),
arg_switch=flagdef.arg_switch,
arg_split=flagdef.arg_split,
env_name=flagdef.env_name,
)
def _flagdef_arg_skip(flagdef):
if flagdef.arg_skip is not None:
return flagdef.arg_skip
return flagdef.opdef.default_flag_arg_skip
###################################################################
# Flag vals for opdef
###################################################################
def flag_vals_for_opdef(opdef, user_flag_vals=None, force=False):
flag_vals = dict(user_flag_vals)
_apply_default_flag_vals(opdef.flags, flag_vals)
_apply_coerce_flag_vals(opdef.flags, force, flag_vals)
resource_flagdefs = _resource_flagdefs(opdef, flag_vals)
_apply_coerce_flag_vals(resource_flagdefs, force, flag_vals)
_apply_default_flag_vals(resource_flagdefs, flag_vals)
all_flagdefs = opdef.flags + resource_flagdefs
if not force:
_check_no_such_flags(flag_vals, all_flagdefs)
_check_flag_vals(flag_vals, all_flagdefs)
_check_required_flags(flag_vals, all_flagdefs)
_apply_choice_vals(opdef.flags, user_flag_vals, flag_vals)
return flag_vals, resource_flagdefs
def _apply_coerce_flag_vals(flagdefs, force, vals):
flagdef_lookup = {flagdef.name: flagdef for flagdef in flagdefs}
for name, val in vals.items():
try:
coerced = _coerced_flag_value(name, val, flagdef_lookup)
except InvalidFlagValue:
if not force:
raise
else:
vals[name] = coerced
def _coerced_flag_value(name, val, flagdefs):
flagdef = flagdefs.get(name)
if not flagdef:
return val
try:
return coerce_flag_value(val, flagdef)
except (ValueError, TypeError) as e:
raise InvalidFlagValue(val, flagdef, str(e))
def coerce_flag_value(val, flagdef):
"""Coerces a flag value based on flagdef settings."""
if (
val is None
or not flagdef
or not flagdef.type
or flagdef.type == "auto"
or flag_util.is_flag_function(val)
):
return val
if isinstance(val, list):
return [coerce_flag_value(x, flagdef) for x in val]
elif flagdef.arg_split:
return _coerce_flag_val_split_parts(val, flagdef)
else:
return _coerce_typed_flag_value(val, flagdef)
def _coerce_typed_flag_value(val, flagdef):
assert flagdef.type is not None
if flagdef.type == "string":
return _try_coerce_flag_val(val, str, flagdef)
elif flagdef.type == "int":
if isinstance(val, float):
raise ValueError("invalid value for type 'int'")
return _try_coerce_flag_val(val, int, flagdef)
elif flagdef.type == "float":
return _try_coerce_flag_val(val, float, flagdef)
elif flagdef.type == "boolean":
return _try_coerce_flag_val(val, bool, flagdef)
elif flagdef.type == "number":
if isinstance(val, (float, int)):
return val
return _try_coerce_flag_val(val, (int, float), flagdef)
elif flagdef.type in ("path", "existing-path"):
return _resolve_rel_path(val)
else:
log.warning(
"unknown flag type '%s' for %s - cannot coerce",
flagdef.type,
flagdef.name,
)
return val
def _coerce_flag_val_split_parts(val, flagdef):
assert flagdef.type is not None
encoded = _ensure_encoded_flag_val(val)
parts = flag_util.split_encoded_flag_val(encoded, flagdef.arg_split)
coerced = [_coerce_typed_flag_value(part, flagdef) for part in parts]
return flag_util.join_splittable_flag_vals(coerced, flagdef.arg_split)
def _ensure_encoded_flag_val(val):
if isinstance(val, six.string_types):
return val
return flag_util.encode_flag_val(val)
def _try_coerce_flag_val(val, funs, flagdef):
if not isinstance(funs, tuple):
funs = (funs,)
for f in funs:
try:
return f(val)
except ValueError as e:
log.debug("value error applying %s to %r: %s", f, val, e)
raise ValueError("invalid value for type '%s'" % flagdef.type)
def _resolve_rel_path(val):
val = os.path.expanduser(val)
if val and not os.path.isabs(val):
return os.path.abspath(val)
return val
def _resource_flagdefs(opdef, flag_vals):
return list(_iter_resource_flagdefs(opdef, flag_vals))
def _iter_resource_flagdefs(opdef, flag_vals):
for resdef in iter_opdef_resources(opdef, flag_vals):
if resdef.flag_name:
yield _ResourceFlagDefProxy(resdef.flag_name, opdef)
else:
op_name = _required_operation_name(resdef)
if op_name:
yield _ResourceFlagDefProxy(op_name, opdef)
def iter_opdef_resources(opdef, flag_vals=None):
flag_vals = flag_vals or {}
for dep in opdef.dependencies:
try:
resdef, _location = op_dep.resource_def(dep, flag_vals)
except op_dep.OpDependencyError:
pass
else:
yield resdef
def _required_operation_name(resdef):
for source in resdef.sources:
if op_dep.is_operation_source(source):
return resdef.name
return None
def _ResourceFlagDefProxy(name, opdef):
data = {
"arg-skip": True,
"type": "string",
"null-label": "unspecified",
}
return guildfile.FlagDef(name, data, opdef)
def _check_no_such_flags(flag_vals, flagdefs):
flagdef_names = {flagdef.name for flagdef in flagdefs}
for name in flag_vals:
if name not in flagdef_names:
raise NoSuchFlagError(name)
def _check_flag_vals(vals, flagdefs):
for flag in flagdefs:
val = vals.get(flag.name)
_check_flag_val(val, flag)
def _check_flag_val(val, flagdef):
if isinstance(val, list):
for x in val:
_check_flag_val(x, flagdef)
elif flagdef.arg_split and val is not None:
_check_splittable_flag_val(val, flagdef)
else:
_check_flag_val_(val, flagdef)
def _check_splittable_flag_val(val, flagdef):
assert flagdef.arg_split is not None
encoded = _ensure_encoded_flag_val(val)
split_val = [
flag_util.decode_flag_val(part)
for part in flag_util.split_encoded_flag_val(encoded, flagdef.arg_split)
]
for x in split_val:
_check_flag_val_(x, flagdef)
def _check_flag_val_(val, flagdef):
if flag_util.is_flag_function(val):
return
_check_flag_choice(val, flagdef)
_check_flag_type(val, flagdef)
_check_flag_range(val, flagdef)
def _check_flag_choice(val, flag):
if not val or flag.allow_other or not flag.choices:
return
for choice in flag.choices:
if choice.alias and val == choice.alias:
return
if choice.value == val:
return
raise InvalidFlagChoice(val, flag)
def _check_flag_type(val, flag):
if flag.type == "existing-path":
if val and not os.path.exists(val):
raise InvalidFlagValue(val, flag, "%s does not exist" % val)
def _check_flag_range(val, flag):
if val is None:
return
if flag.min is not None and val < flag.min:
raise InvalidFlagValue(val, flag, "out of range (less than min %s)" % flag.min)
if flag.max is not None and val > flag.max:
raise InvalidFlagValue(
val, flag, "out of range (greater than max %s)" % flag.max
)
def _apply_choice_vals(flagdefs, user_vals, target_vals):
for flagdef in flagdefs:
if not flagdef.choices:
continue
flag_val = target_vals.get(flagdef.name)
if flag_val is None:
continue
for choice in flagdef.choices:
if (choice.alias or choice.value) != flag_val:
continue
if choice.alias:
target_vals[flagdef.name] = choice.value
if choice.flags:
_apply_choice_flags(choice.flags, user_vals, target_vals)
def _apply_choice_flags(choice_flags, user_vals, target_vals):
for flag_name, flag_val in choice_flags.items():
if user_vals.get(flag_name) is None:
target_vals[flag_name] = flag_val
def _check_required_flags(vals, flagdefs):
missing = _missing_flags(vals, flagdefs)
if missing:
raise MissingRequiredFlags(missing)
def _missing_flags(vals, flagdefs):
return [
flag
for flag in flagdefs
if flag.required and _flag_missing(vals.get(flag.name))
]
def _flag_missing(val):
if val is None or val == "":
return True
return False
def _apply_default_flag_vals(flagdefs, flag_vals):
"""Applies default values to flag_vals.
Skips flag values that are already defined in flag_vals.
"""
for flagdef in flagdefs:
if flagdef.name not in flag_vals:
flag_vals[flagdef.name] = flagdef.default
def flag_assigns(flags, skip_none=False):
return [
flag_assign(name, val)
for name, val in sorted(flags.items())
if not skip_none or val is not None
]
def flag_assign(name, val):
return "%s=%s" % (name, flag_util.format_flag(val))
def parse_flag_assigns(args, opdef=None):
flag_types = _flag_types_for_opdef(opdef) if opdef else None
expanded_args = [os.path.expanduser(arg) for arg in args]
parsed_flags = [parse_flag_arg(arg, flag_types) for arg in expanded_args]
return dict(parsed_flags)
def _flag_types_for_opdef(opdef):
types = _resource_flagdef_types(opdef)
types.update(_opdef_flagdef_types(opdef))
return types
def _resource_flagdef_types(opdef):
return {
flagdef.name: flagdef.type
for flagdef in _resource_flagdefs(opdef, {})
if flagdef.type
}
def _opdef_flagdef_types(opdef):
return {flagdef.name: flagdef.type for flagdef in opdef.flags if flagdef.type}
def parse_flag_arg(arg, flag_types=None):
parts = arg.split("=", 1)
if len(parts) == 1:
raise ArgValueError(arg)
name, val = parts
flag_type = flag_types.get(name) if flag_types else None
return name, flag_util.decode_flag_val(val, flag_type)
def args_to_flags(args):
"""Returns `flags, other_args` for `args`.
`other_args` is a list of args that cannot be converted to flag
values.
If args contains `--` then all args before the last occuring `--`
are included in `other_args`.
Uses `yaml_util.decode_yaml()` to decode flag arg values.
"""
flags = {}
flag_args, other_args = split_args_for_flags(args)
name = None
for arg in flag_args:
if arg[:2] == "--":
_maybe_switch(flags, name)
name = arg[2:]
elif arg[:1] == "-":
maybe_num = yaml_util.decode_yaml(arg)
if isinstance(maybe_num, (int, float)):
_set_or_append_flag(flags, name, maybe_num)
elif len(arg) == 2:
_maybe_switch(flags, name)
name = arg[1]
elif len(arg) > 2:
_maybe_switch(flags, name)
name = arg[1]
_set_or_append_flag(flags, name, arg[2:])
elif name is not None:
_set_or_append_flag(flags, name, yaml_util.decode_yaml(arg))
else:
other_args.append(arg)
_maybe_switch(flags, name)
return flags, other_args
def _maybe_switch(flags, name):
if name is not None and name not in flags:
flags[name] = True
def _set_or_append_flag(flags, name, val):
try:
existing = flags[name]
except KeyError:
flags[name] = val
else:
if isinstance(existing, list):
existing.append(val)
else:
flags[name] = [existing, val]
def split_args_for_flags(args):
"""Returns `split_args, other_args` for `args`.
Split occurs using the last occurrence of `--` in `args`.
If `arg` does not contain `--` returns `args, []`.
"""
for i in range(len(args) - 1, -1, -1):
if args[i] == "--":
return args[i + 1 :], args[:i]
return args, []
def global_dest(global_name, flags):
dest = cur = {}
for name in global_name.split("."):
cur = cur.setdefault(name, {})
cur.update(flags)
return dest
def flags_desc(flags, truncate_floats=False, delim=", "):
formatted = flag_util.flag_assigns(flags, truncate_floats)
return delim.join(formatted)
###################################################################
# Op deps IO
###################################################################
def op_deps_as_data(deps):
return [_op_dep_as_data(dep) for dep in deps or []]
def _op_dep_as_data(dep):
data = _resdef_data(dep.resdef)
if dep.res_location:
data["location"] = dep.res_location
if dep.config:
data["config"] = dep.config
return data
def _resdef_data(resdef):
data = dict(resdef._data)
data["name"] = resdef.name
return data
def op_deps_for_data(data):
return [_op_dep_for_data(item_data) for item_data in data or []]
def _op_dep_for_data(data):
resdef = _resdef_from_data(data)
location = data.get("location")
config = data.get("config")
return op_dep.OpDependency(resdef, location, config)
def _resdef_from_data(data):
name = data.get("name")
return guildfile.ResourceDef(name, data, _ModelDefProxy())
class _ModelDefProxy(object):
name = ""
guildfile = None
parents = []
###################################################################
# Trials support
###################################################################
def trials_for_batch_files(files):
trials = []
for path in files:
trials.extend(_read_trials(path))
return trials
def _read_trials(path):
ext = os.path.splitext(path)[1].lower()
if ext in (".json", ".yml", ".yaml"):
return _yaml_trials(path)
elif ext in ("", ".csv"):
return _csv_trials(path)
else:
raise BatchFileError(path, "unsupported extension")
def _yaml_trials(path):
try:
data = yaml.safe_load(open(path, "r"))
except Exception as e:
raise BatchFileError(path, str(e))
else:
return _coerce_trials_data(data, path)
def _coerce_trials_data(data, path):
if not isinstance(data, list):
if not isinstance(data, dict):
raise BatchFileError(
path,
"invalid data type for trials: expected list or dict"
", got %s" % type(data).__name__,
)
data = [data]
for item in data:
if not isinstance(item, dict):
raise BatchFileError(
path, "invalid data type for trial %r: expected dict" % item
)
return data
def _csv_trials(path):
reader = csv.reader(open(path, "r"))
try:
flag_names = next(reader)
except StopIteration:
return []
else:
return [dict(zip(flag_names, _flag_vals(row))) for row in reader]
def _flag_vals(row):
return [flag_util.decode_flag_val(s) for s in row]
###################################################################
# Restart support
###################################################################
def restart_needed(run, flags):
return run.status in RESTART_NEEDED_STATUS or run.get("flags") != flags
def run_params_for_restart(run, user_specified_params=None):
"""Returns params for use in run command for a restart of run.
The set of applicable params in the run "run_params" attribute are
considered. If user_specified_params contains a non-default value
(i.e. the user has indicated she wants to use a specific value)
that param will not be included in the result. If
user_specified_params is None (default) then all applicable params
for a restart that are defined in run are returned.
"""
# Note about applicable run params:
#
# A limited number of params could possibly apply to args - those
# are listed here. This list has to be maintained as new args are
# added to the run command. Params must be included where the user
# would reasonably assume applicability and never in cases where
# the use of the parameter would be clearly surprising to the user
# (e.g. reusing the 'yes' param, which would alter the expected
# behavior of the command on a restart).
#
# Params that are saved as run attrs or otherwise available under
# the run guild path (e.g. opspec, label, flags) should NOT be
# returned in this value in the interest of elimiting redundancy
# and potential mismtach bugs. Anyone needing those values MUST
# read them via run attrs or applicable run interface
# (e.g. opref in the case of opsec).
#
applicable_run_params = [
"force_flags",
"gpus",
"max_trials",
"maximize",
"minimize",
"no_gpus",
"opt_flags",
"optimizer",
"random_seed",
]
from guild.commands.run import run as run_cmd
run_params = run.get("run_params", {})
if not isinstance(run_params, dict):
return
baseline_params = run_cmd.make_context("", []).params
result = {}
for name in run_params:
val = _coerce_run_param(name, run_params[name])
if name not in applicable_run_params:
continue
if user_specified_params is None:
result[name] = val
continue
try:
user_specified_val = user_specified_params[name]
except KeyError:
result[name] = val
continue
if user_specified_val != baseline_params[name]:
continue
result[name] = val
return result
def _coerce_run_param(name, val):
"""Ensures that named param is valid for the run command."""
if name == "flags":
return tuple(val)
return val
###################################################################
# Wait for proc
###################################################################
def wait_for_proc(p, stop_after_min, poll_interval=None, kill_delay=None):
poll_interval = poll_interval or DEFAULT_PROC_POLL_INTERVAL
kill_delay = kill_delay or DEFAULT_PROC_KILL_DELAY
started = time.time()
stop_at = time.time() + stop_after_min * 60
while time.time() < stop_at:
returncode = p.poll()
if returncode is not None:
return returncode
time.sleep(poll_interval)
elapsed = (time.time() - started) / 60
log.info("Stopping process early (pid %i) - %.1f minute(s) elapsed", p.pid, elapsed)
return _terminate(p, poll_interval, kill_delay)
def _terminate(p, poll_interval, kill_delay):
kill_at = time.time() + kill_delay
p.terminate()
while p.poll() is None and time.time() < kill_at:
time.sleep(poll_interval)
if p.poll() is None:
log.warning("Process did not terminate (pid %i), killing", p.pid)
p.kill()
time.sleep(poll_interval)
returncode = p.poll()
if returncode not in (0, -15):
raise ProcessError("Process did not terminate gracefully (pid %i)" % p.pid)
return returncode
###################################################################
# Other utils
###################################################################
def split_batch_files(flag_args):
batch_files = []
rest = []
for arg in flag_args:
if arg[:1] == "@":
batch_files.append(arg[1:])
else:
rest.append(arg)
return batch_files, rest
def find_matching_runs(opref, flag_vals, include_pending=False):
return [
run
for run in var.runs()
if is_matching_run(run, opref, flag_vals, include_pending)
]
def is_matching_run(run, opref, flag_vals, include_pending=False):
return (
run.opref == opref
and run.get("flags") == flag_vals
and (include_pending or run.status != "pending")
)
def op_flag_encoder(flag_encoder):
if not flag_encoder:
return None
parts = flag_encoder.split(":")
if len(parts) != 2:
log.warning("invalid flag decoder %r - must be MODULE:FUNCTION", flag_encoder)
return None
mod_name, fun_name = parts
try:
mod = importlib.import_module(mod_name)
except Exception as e:
if log.getEffectiveLevel() <= logging.DEBUG:
log.exception("importing %s", mod_name)
else:
log.warning("cannot load flag decoder %r: %s", flag_encoder, e)
return None
fun = getattr(mod, fun_name, None)
if fun is None:
log.warning(
"cannot load flag decoder %r: no such attribute in %s",
flag_encoder,
mod_name,
)
return None
return fun
def write_proc_lock(pid, run):
with open(run.guild_path("LOCK"), "w") as f:
f.write(str(pid))
def delete_proc_lock(run):
try:
os.remove(run.guild_path("LOCK"))
except OSError:
pass
def init_logging(default_level=logging.WARN):
if os.getenv("LOG_INIT_SKIP") == "1":
return
level = int(os.getenv("LOG_LEVEL", default_level))
format = os.getenv("LOG_FORMAT", "%(levelname)s: [%(name)s] %(message)s")
loglib.init_logging(level, {"_": format})
def current_run():
return _api.current_run()
def handle_system_exit(e):
main.handle_system_exit(e)
|
mipass.py | #!/usr/bin/env python
'''A password keeping service.
.. moduleauthor:: Lenx Wei <lenx.wei@gmail.com>
A server provides password cache and lookup service.
Depending on pycrypto, python-daemon
Password format::
id_md5_hashed_and_hex = rnd,pass_aes_encrypted_by_master_key1_and_hex
rnd is used to generate the IV, sha256 using the master key1
master key::
master_hash = rnd,(rnd,maseter_key2)_md5_first_2_bytes_and_hex
master key1 = sha256(rand, master key)^1024
master key2 = sha256(rand, master key)^1032
'''
from Crypto.Hash import MD5, SHA256
from Crypto.Cipher import AES
from binascii import b2a_hex, a2b_hex
import socket
import threading
import SocketServer
import os
import stat
import time
import sys
import random
verbose = False
conf_fn = "~/.missh"
conf_fn = os.path.expanduser(conf_fn)
unixsock = "~/.missh.sock"
unixsock = os.path.expanduser(unixsock)
server = 0
ti = None
# if critical_error is True, the server must exit after informing the clients.
critical_error = False
# utility functions
def rand():
return str(random.randrange(2 ** 32))
def kill_self():
os.remove(unixsock)
# sys.exit(0)
os.kill(os.getpid(), 9)
def remove_remark(line):
pos = line.find("#")
if(pos >= 0):
line = line[:pos]
line = line.strip()
return line
def get_key_val(line):
pos = line.find("=")
if(pos < 0):
return "", ""
return line[:pos].strip().lower(), line[pos + 1:].strip()
def get_resp_val(line):
pos = line.find(":")
if(pos < 0):
return None
return line[pos + 1:].strip()
def is_resp_err(line):
'''check whether the response is err or not.
:param line: the whole response line.
:returns: True when sth is wrong, False when ok.
'''
if line == None or line == "":
return True
if line.lower().startswith("error:"):
return True
return False
def get_header(s):
'''
split the line using ' '.
:param s: the input string.
:returns: the header, the tail.
'''
pos = s.find(' ')
if(pos > 0):
return s[:pos].lower(), s[pos + 1:]
return s.lower(), ''
def mi_getseq(enc):
pos = enc.find(',')
if pos >= 0:
seq = enc[:pos]
return seq
print "bad seq:", enc
return 0
def gen_AES_param(seq, key):
m = SHA256.new(key)
k = m.digest() # 32bytes
m.update(seq)
iv = m.digest()[:16]
return k, iv
def mi_decrypt(enc, key):
'''decrypt an encrypted password.
:param enc: the encrypted password
:param key: the key
:returns: the plain if succeeds, None otherwise
'''
seq = None
pos = enc.find(',')
if pos >= 0:
seq = enc[:pos]
if seq == None:
print "bad seq:", enc
return None
try:
body = a2b_hex(enc[pos + 1:])
except:
print "bad enc:", enc
return None
k, iv = gen_AES_param(seq, key)
obj = AES.new(k, AES.MODE_CBC, iv)
try:
plain = obj.decrypt(body)
except Exception, err:
print str(err)
return None
return plain.rstrip('\n')
def mi_encrypt(seq, plain, key):
"""encrypt a plain password.
algorithm::
key1 = SHA256(key)
iv = SHA256(key, seq)
body = plain padded using '\\n' to be aligned with 32bytes
enc = AES.CBC(body, key1, iv)
encrypted password = seq,enc
:param seq: a rand string, used to generate the IV
:param plain: the plain password
:param key: a string, as the key
:returns: the encrypted password
"""
if plain == None:
print "null password"
return "0,"
k, iv = gen_AES_param(seq, key)
obj = AES.new(k, AES.MODE_CBC, iv)
body = plain + '\n' * (32 - (len(plain) - 1) % 32 - 1)
enc = obj.encrypt(body)
return "%s,%s" % (seq, b2a_hex(enc))
def get_seq(s):
p = s.find(",")
if(p > 0):
return s[:p], s[p + 1:]
else:
return "", s[p + 1:]
ms_start = 0
ms_got_master = 1
ms_void_cfg = 2
# configuration file
class pass_db:
'''
the password database
Notice: use self.master_lock to keep threading safety.
'''
fn = None
master1 = None
master_hash = None
master_lock = None
password_enc = {} # id:pass, as in file
timeout = 120 # in min
init_ok = False
def __init__(self, fn):
'''constructor.
read cfg from fn.
:param fn: the configuration file name
'''
self.fn = fn
self.init_ok = False
self.read_cfg()
self.master_lock = threading.RLock()
def fname(self):
return self.fn
def read_cfg(self):
'''
read configuration from file
'''
line_cnt = 0
if not os.path.exists(self.fn):
return
try:
f = open(self.fn)
for line in f:
line_cnt = line_cnt + 1
# strip and remove remarks
line = remove_remark(line)
if line == '':
continue
# fetch the key and value
try:
key, val = get_key_val(line)
if(key == "master"):
self.master_hash = val
# print "master:",val
elif(key == "timeout"):
self.timeout = int(val)
# print "timeout:", val
elif(key == ""):
raise "no key"
else:
self.password_enc[key] = val
except:
print "error config line #%d : %s" % (line_cnt, line)
continue
self.init_ok = self.master_hash != None
# print "init:", self.init_ok
f.close()
except:
print "bad configuration file:", self.fn
return
def get_master_hash(self, master, old=None):
'''master hash.
only store master_hash in cfg.
algorithm::
hex(MD5(master))[:3]
:param master: the master key
:returns: the master1 key, the hash
'''
assert master != None
m = SHA256.new()
if old != None:
seq = get_seq(old)[0]
else:
seq = rand()
m.update(seq)
m.update(master)
for i in xrange(1023):
m.update(m.digest())
master1 = m.digest()
for i in xrange(8):
m.update(m.digest())
return master1, seq + "," + m.hexdigest()[:4]
def check_id(self, id):
"""validate an id that it should only contain alpha or number characters.
:param id: the id
:returns: True or False
"""
return id.isalnum();
def set_pass(self, id, pwd):
"""set password for id
self.master should be valid.
:param id: the user id
:param pwd: the new password
:returns: True if succeeds, False otherwise
"""
assert self.master1 != None
if not self.check_id(id):
return False
with self.master_lock:
self.password_enc[id] = mi_encrypt(rand(), pwd, self.master1)
return self.write_cfg()
def get_pass(self, id):
"""get password of id
self.master1 should be valid.
:param id: the user id
:returns: the password, None if not existed
"""
assert self.master1 != None
with self.master_lock:
enc = self.password_enc.get(id)
if enc != None:
return mi_decrypt(enc, self.master1)
return None
def set_master(self, master):
"""set a new master key.
self.master1 should be valid when some passwords already exist.
:param master: the new master key
:returns: True if succeeds, False otherwise
"""
new_pass = {}
if len(self.password_enc) > 0 and self.master1 == None:
return False
master1, master_hash = self.get_master_hash(master)
with self.master_lock:
for i in self.password_enc:
new_pass[i] = mi_encrypt(rand(), mi_decrypt(self.password_enc[i], self.master1), master1)
print new_pass[i]
self.master1 = master1
self.master_hash = master_hash
self.password_enc = new_pass
self.init_ok = True
return self.write_cfg()
def check_master(self, master):
''' Check whether the master is correct or not.
If master is correct, remember the master key.
:param master: the key to be tested
:returns: 1 if succeeds, 0 otherwise
'''
with self.master_lock:
master1, master_hash = self.get_master_hash(master, self.master_hash)
if self.master_hash == None or master_hash == self.master_hash:
self.master1 = master1
self.master_hash = master_hash
return 1
return 0
def write_cfg(self):
'''write the new configuration file.
using two files to replace each other, in order to control the potential leakage.
'''
if(self.master_hash == None):
print "Error: can't write cfg without a master password"
return False
new_fn = self.fn + ".new"
old_fn = self.fn + ".old"
try:
os.rename(old_fn, new_fn)
except:
pass
with self.master_lock:
# write to new_fn
try:
try:
f = open(new_fn, 'r+b')
except:
f = open(new_fn, 'wb')
f.write("# don't edit this file manually. please use 'missh -c'.\n")
f.write("timeout = %d\n" % self.timeout)
f.write("master = %s\n" % self.master_hash)
f.write("\n")
for i in self.password_enc:
f.write("%s = %s\n" % (i, self.password_enc[i]))
f.truncate()
f.flush()
os.fsync(f.fileno())
f.close()
except Exception, e:
print str(e)
print "Error: can't write to %s." % new_fn
critical_error = True
return False
try:
os.rename(self.fn, old_fn)
except:
# print "Error: can't rotate %s" % self.fn
# critical_error = True
# return False
pass
try:
os.rename(new_fn, self.fn)
except:
print "Error: can't replace %s" % self.fn
critical_error = True
return False
return True
def state(self):
"""return the current state.
There are 3 possible states:
* ms_void_cfg : the database is not correctly initialized.
* ms_start : the database is opened, but the master key is not known.
* ms_got_master : the database is opened with a correct master key.
"""
if self.init_ok == False:
return ms_void_cfg
elif self.master1 == None:
return ms_start
else:
return ms_got_master
db = None
class master_handler(SocketServer.BaseRequestHandler):
"""The unix socket service for pass_db.
"""
data = ""
fin = False
def state(self):
return db.state()
def _recv_line(self):
"""
return a line without '\n' from the buffer
"""
while(1):
pos = self.data.find('\n')
if pos >= 0:
s = self.data[:pos]
self.data = self.data[pos + 1:]
return s
d = self.request.recv(1024)
if len(d) == 0:
# connection closed
s = self.data
self.data = ""
self.fin = True
return s
self.data = self.data + d
def _check_error(self):
if critical_error:
self._send("Error: updating the config file %s failed. The password service must stop. Please check its privilege.\n" % db.fname())
time.sleep(1)
kill_self()
def _send(self, msg):
if verbose:
print "-> %s" % msg
self.request.sendall(msg)
def handle(self):
"""handle input.
messages:
* state
* state: [0|1|2]
* version
* version 0.0.1
* check_master master_key
* Error: no valid config file
* Error: bad master key
* check_master: ok
* set_master master_key
* set_master: master_key
* Error: updating the config file %s failed. The password service must stop. Please check its privilege.
* Error: need old master key for existing passwords
* get_pass id
* Error: 'id' doesn't exist
* Error: no master key
* Error: invalid id '%s'
* get_pass: %s
* set_pass id
* Error: bad input '%s'
* Error: no master key
* Error: updating the config file %s failed. The password service must stop. Please check its privilege.
* Error: invalid id '%s'
* set_pass: %s, %s
* get_timeout
* get_timeout: %d
* set_timeout %d
* Error: bad timeout '%d'
* set_timeout: %d
* kill
* kill: pid
* unknown_header: %s
"""
# [a:master:server:handle]
global ti
while not self.fin:
ti.cancel()
ti = threading.Timer(db.timeout * 60, kill_self)
ti.start()
line = self._recv_line()
header, body = get_header(line)
if verbose:
print '<- %s %s' % (header, body)
if header == 'state':
self._send('state: %s\n' % self.state())
continue
elif header == 'version':
self._send('version: 0.0.1\n')
continue
if header == 'get_timeout':
self._send('get_timeout: %d\n' % db.timeout)
continue
if header == 'set_timeout':
try:
to = int(body)
except:
to = -1
if to <= 0:
self._send('Error: bad timeout %s\n' % body)
else:
if db.timeout != to:
db.timeout = to
db.write_cfg()
self._send('set_timeout: %d\n' % db.timeout)
continue
elif header == "get_pass":
if self.state() == ms_got_master:
pwd = db.get_pass(body)
if pwd == None:
self._send("Error: '%s' doesn't exist\n" % body)
else:
self._send("get_pass: %s\n" % pwd)
else:
self._send("Error: no master key\n")
continue
elif header == "check_master":
if self.state() == ms_void_cfg:
self._send("Error: no valid config file\n")
continue
r = db.check_master(body)
if r:
self._send("check_master: ok\n")
else:
self._send("Error: bad master key\n")
continue
elif header == 'set_master':
if db.set_master(body):
if db.write_cfg():
self._send("set_master: %s\n" % body)
else:
self._check_error()
assert 0 # should never arrive here
else:
self._send("Error: needs old master key for existing passwords\n")
continue
elif header == 'set_pass':
pos = body.find(",")
if(pos <= 0):
self._send("Error: bad input '%s'\n" % body)
continue
if self.state() != ms_got_master:
self._send("Error: no master key\n")
continue
id = body[:pos]
pwd = body[pos + 1:]
if not db.set_pass(id, pwd):
self._check_error()
self._send("Error: invalid id '%s'\n" % id)
continue
self._send("set_pass: %s, %s\n" % (id, pwd))
continue
elif header == 'kill':
self._send("kill: %d\n" % os.getpid())
try:
os.remove(unixsock) # [FIXME]
except:
pass
kill_self()
break
elif header == '':
continue
print "unknown_header:'%s'" % header
class master_server(SocketServer.ThreadingMixIn, SocketServer.UnixStreamServer):
pass
def start_service(unixsock):
"""Start the password keeping service.
:param unixsock: the socket file
"""
global db, ti
db = pass_db(conf_fn)
try:
os.remove(unixsock)
except:
pass
server = master_server(unixsock, master_handler)
os.chmod(unixsock, stat.S_IREAD | stat.S_IWRITE)
ti=threading.Timer(db.timeout*60, kill_self)
ti.start()
server.serve_forever()
# # main loop
# server_thread = threading.Thread(target=server.serve_forever)
# server_thread.daemon = True
# server_thread.start()
# time.sleep(db.timeout * 60)
# # todo: check time, delay if needed
#
# server.shutdown()
# os.remove(unixsock)
# os._exit(0)
def start_service_daemon(unixsock):
'''Start the service daemon.
:param unixsock: the socket filename
'''
try:
pid = os.fork()
if(pid > 0):
time.sleep(1)
else:
import daemon
with daemon.DaemonContext():
start_service(unixsock)
except:
print "Error: can't start the master service."
def url_hash(url):
"""generate the id used in the password keeping service.
algorithm::
hex(SHA256(url))[:32]
"""
assert url != None
m = SHA256.new()
m.update(url)
return m.hexdigest()[:32]
# [a:master:client]
class client:
"""The client to communicate with the password keeping service.
"""
sock_fn = ""
connected = False
master_status = 0 # 0 unconnected, 1 got master, -1 have an unknown master, -2 need to set a master
data = ""
def __init__(self, unixsock):
self.sock_fn = unixsock
def _recv_line(self):
'''get a line from the receive buffer.
:returns: the input line
'''
assert self.connected
while(1):
pos = self.data.find('\n')
if pos >= 0:
s = self.data[:pos]
self.data = self.data[pos + 1:]
return s
d = self.sock.recv(1024)
if len(d) == 0:
# connection closed
s = self.data
self.data = ""
self.close()
return s
self.data = self.data + d
def _connect(self, try_hard):
'''Connect to the password keeping service without retrying.
'''
if self.connected:
return
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
self.sock.connect(self.sock_fn)
self.connected = True
# print "get state"
self.sock.sendall("state\n")
response = self._recv_line()
state = get_resp_val(response)
if state == str(ms_got_master):
self.master_status = 1
elif state == str(ms_void_cfg):
self.master_status = -2
else:
self.master_status = -1
# print "resp:", response
except Exception, err:
if verbose and try_hard:
print "Error:", str(err)
self.sock.close()
if verbose and try_hard:
print "Error: can't connect to the password keeping service."
self.connected = False
pass
def connect(self, try_hard=True):
'''Connect to the password keeping service.
:param try_hard: True means that if connecting failed, try to start the service again and retry.
'''
self._connect(try_hard)
if try_hard and not self.connected:
try:
start_service_daemon(self.sock_fn)
except:
pass
self.connect()
if not self.connected:
print "Error: can't connect to the master password service."
sys.exit(2)
def need_master(self):
'''test whether a master key is needed.
:returns: 0 unconnected, 1 got master, -1 have an unknown master, -2 need to set a master
'''
if not self.connected:
self.connect()
return self.master_status
def kill(self):
'''kill the service.
:returns: success or not, the response.
'''
if not self.connected:
self.connect(False)
if self.connected:
self.sock.sendall("kill\n")
resp = self._recv_line()
self.close()
return not is_resp_err(resp), get_resp_val(resp)
else:
return False, "No password keeping service found."
def check_master(self, master):
'''check and set the master key.
:param master: the master key.
:returns: success or not, the response.
'''
global verbose
if not self.connected:
self.connect()
self.sock.sendall("check_master %s\n" % master)
resp = self._recv_line()
if verbose:
print resp
if not is_resp_err(resp):
self.master_status = 1
return not is_resp_err(resp), get_resp_val(resp)
def set_master(self, master):
'''set the master key.
:param master: the master key.
:returns: success or not, the response.
'''
if not self.connected:
self.connect()
self.sock.sendall("set_master %s\n" % master)
resp = self._recv_line()
if not is_resp_err(resp):
self.master_status = 1
return not is_resp_err(resp), get_resp_val(resp)
def set_pass(self, url, password):
'''set the password for url.
:param url: the service.
:param password: the password.
:returns: success or not, the response.
'''
if not self.connected:
self.connect()
self.sock.sendall("set_pass %s,%s\n" % (url_hash(url), password))
resp = self._recv_line()
# print "set_pass, resp:", resp
return not is_resp_err(resp), get_resp_val(resp)
def get_pass(self, url):
'''get the password.
:param url: url of the service.
:returns: success or not, the response.
'''
global verbose
if not self.connected:
self.connect()
self.sock.sendall("get_pass %s\n" % url_hash(url))
resp = self._recv_line()
if verbose:
print resp
return not is_resp_err(resp), get_resp_val(resp)
def get_timeout(self):
'''get the timeout.
:returns: success or not, the response.
'''
global verbose
if not self.connected:
self.connect()
self.sock.sendall("get_timeout\n")
resp = self._recv_line()
if verbose:
print resp
return not is_resp_err(resp), get_resp_val(resp)
def set_timeout(self, to):
'''set the timeout.
:param to: the new timeout.
:returns: success or not, the response.
'''
if not self.connected:
self.connect()
self.sock.sendall("set_timeout %s\n" % to)
resp = self._recv_line()
# print "set_pass, resp:", resp
return not is_resp_err(resp), get_resp_val(resp)
def close(self):
'''close the service.
'''
# print "closed"
if self.connected:
try:
self.sock.close()
except:
pass
self.connected = False
|
corehandlers.py | """
socket server request handlers leveraged by core servers.
"""
import logging
import os
import shlex
import shutil
import sys
import threading
import time
from itertools import repeat
import socketserver
from builtins import range
from queue import Queue, Empty
from core import utils
from core.api.tlv import coreapi, dataconversion, structutils
from core.config import ConfigShim
from core.emulator.data import ConfigData, ExceptionData
from core.emulator.data import EventData
from core.emulator.data import FileData
from core.emulator.emudata import InterfaceData
from core.emulator.emudata import LinkOptions
from core.emulator.emudata import NodeOptions
from core.emulator.enumerations import ConfigDataTypes
from core.emulator.enumerations import ConfigFlags
from core.emulator.enumerations import ConfigTlvs
from core.emulator.enumerations import EventTlvs
from core.emulator.enumerations import EventTypes
from core.emulator.enumerations import ExceptionTlvs
from core.emulator.enumerations import ExecuteTlvs
from core.emulator.enumerations import FileTlvs
from core.emulator.enumerations import LinkTlvs
from core.emulator.enumerations import LinkTypes
from core.emulator.enumerations import MessageFlags
from core.emulator.enumerations import MessageTypes
from core.emulator.enumerations import NodeTlvs
from core.emulator.enumerations import NodeTypes
from core.emulator.enumerations import RegisterTlvs
from core.emulator.enumerations import SessionTlvs
from core.location.mobility import BasicRangeModel
from core.nodes import nodeutils
from core.services.coreservices import ServiceManager
from core.services.coreservices import ServiceShim
class CoreHandler(socketserver.BaseRequestHandler):
"""
The CoreHandler class uses the RequestHandler class for servicing requests.
"""
def __init__(self, request, client_address, server):
"""
Create a CoreRequestHandler instance.
:param request: request object
:param str client_address: client address
:param CoreServer server: core server instance
"""
self.done = False
self.message_handlers = {
MessageTypes.NODE.value: self.handle_node_message,
MessageTypes.LINK.value: self.handle_link_message,
MessageTypes.EXECUTE.value: self.handle_execute_message,
MessageTypes.REGISTER.value: self.handle_register_message,
MessageTypes.CONFIG.value: self.handle_config_message,
MessageTypes.FILE.value: self.handle_file_message,
MessageTypes.INTERFACE.value: self.handle_interface_message,
MessageTypes.EVENT.value: self.handle_event_message,
MessageTypes.SESSION.value: self.handle_session_message,
}
self.message_queue = Queue()
self.node_status_request = {}
self._shutdown_lock = threading.Lock()
self._sessions_lock = threading.Lock()
self.handler_threads = []
num_threads = int(server.config["numthreads"])
if num_threads < 1:
raise ValueError("invalid number of threads: %s" % num_threads)
logging.debug("launching core server handler threads: %s", num_threads)
for _ in range(num_threads):
thread = threading.Thread(target=self.handler_thread)
self.handler_threads.append(thread)
thread.start()
self.master = False
self.session = None
# core emulator
self.coreemu = server.coreemu
utils.close_onexec(request.fileno())
socketserver.BaseRequestHandler.__init__(self, request, client_address, server)
def setup(self):
"""
Client has connected, set up a new connection.
:return: nothing
"""
logging.debug("new TCP connection: %s", self.client_address)
def finish(self):
"""
Client has disconnected, end this request handler and disconnect
from the session. Shutdown sessions that are not running.
:return: nothing
"""
logging.debug("finishing request handler")
logging.debug("remaining message queue size: %s", self.message_queue.qsize())
# give some time for message queue to deplete
timeout = 10
wait = 0
while not self.message_queue.empty():
logging.debug("waiting for message queue to empty: %s seconds", wait)
time.sleep(1)
wait += 1
if wait == timeout:
logging.warning("queue failed to be empty, finishing request handler")
break
logging.info("client disconnected: notifying threads")
self.done = True
for thread in self.handler_threads:
logging.info("waiting for thread: %s", thread.getName())
thread.join(timeout)
if thread.isAlive():
logging.warning("joining %s failed: still alive after %s sec", thread.getName(), timeout)
logging.info("connection closed: %s", self.client_address)
if self.session:
# remove client from session broker and shutdown if there are no clients
self.remove_session_handlers()
self.session.broker.session_clients.remove(self)
if not self.session.broker.session_clients and not self.session.is_active():
logging.info("no session clients left and not active, initiating shutdown")
self.coreemu.delete_session(self.session.id)
return socketserver.BaseRequestHandler.finish(self)
def session_message(self, flags=0):
"""
Build CORE API Sessions message based on current session info.
:param int flags: message flags
:return: session message
"""
id_list = []
name_list = []
file_list = []
node_count_list = []
date_list = []
thumb_list = []
num_sessions = 0
with self._sessions_lock:
for _id in self.coreemu.sessions:
session = self.coreemu.sessions[_id]
num_sessions += 1
id_list.append(str(_id))
name = session.name
if not name:
name = ""
name_list.append(name)
file_name = session.file_name
if not file_name:
file_name = ""
file_list.append(file_name)
node_count_list.append(str(session.get_node_count()))
date_list.append(time.ctime(session._state_time))
thumb = session.thumbnail
if not thumb:
thumb = ""
thumb_list.append(thumb)
session_ids = "|".join(id_list)
names = "|".join(name_list)
files = "|".join(file_list)
node_counts = "|".join(node_count_list)
dates = "|".join(date_list)
thumbs = "|".join(thumb_list)
if num_sessions > 0:
tlv_data = b""
if len(session_ids) > 0:
tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.NUMBER.value, session_ids)
if len(names) > 0:
tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.NAME.value, names)
if len(files) > 0:
tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.FILE.value, files)
if len(node_counts) > 0:
tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.NODE_COUNT.value, node_counts)
if len(dates) > 0:
tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.DATE.value, dates)
if len(thumbs) > 0:
tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.THUMB.value, thumbs)
message = coreapi.CoreSessionMessage.pack(flags, tlv_data)
else:
message = None
return message
def handle_broadcast_event(self, event_data):
"""
Callback to handle an event broadcast out from a session.
:param core.emulator.data.EventData event_data: event data to handle
:return: nothing
"""
logging.debug("handling broadcast event: %s", event_data)
tlv_data = structutils.pack_values(coreapi.CoreEventTlv, [
(EventTlvs.NODE, event_data.node),
(EventTlvs.TYPE, event_data.event_type),
(EventTlvs.NAME, event_data.name),
(EventTlvs.DATA, event_data.data),
(EventTlvs.TIME, event_data.time),
(EventTlvs.SESSION, event_data.session)
])
message = coreapi.CoreEventMessage.pack(0, tlv_data)
try:
self.sendall(message)
except IOError:
logging.exception("error sending event message")
def handle_broadcast_file(self, file_data):
"""
Callback to handle a file broadcast out from a session.
:param core.emulator.data.FileData file_data: file data to handle
:return: nothing
"""
logging.debug("handling broadcast file: %s", file_data)
tlv_data = structutils.pack_values(coreapi.CoreFileTlv, [
(FileTlvs.NODE, file_data.node),
(FileTlvs.NAME, file_data.name),
(FileTlvs.MODE, file_data.mode),
(FileTlvs.NUMBER, file_data.number),
(FileTlvs.TYPE, file_data.type),
(FileTlvs.SOURCE_NAME, file_data.source),
(FileTlvs.SESSION, file_data.session),
(FileTlvs.DATA, file_data.data),
(FileTlvs.COMPRESSED_DATA, file_data.compressed_data),
])
message = coreapi.CoreFileMessage.pack(file_data.message_type, tlv_data)
try:
self.sendall(message)
except IOError:
logging.exception("error sending file message")
def handle_broadcast_config(self, config_data):
"""
Callback to handle a config broadcast out from a session.
:param core.emulator.data.ConfigData config_data: config data to handle
:return: nothing
"""
logging.debug("handling broadcast config: %s", config_data)
message = dataconversion.convert_config(config_data)
try:
self.sendall(message)
except IOError:
logging.exception("error sending config message")
def handle_broadcast_exception(self, exception_data):
"""
Callback to handle an exception broadcast out from a session.
:param core.emulator.data.ExceptionData exception_data: exception data to handle
:return: nothing
"""
logging.debug("handling broadcast exception: %s", exception_data)
tlv_data = structutils.pack_values(coreapi.CoreExceptionTlv, [
(ExceptionTlvs.NODE, exception_data.node),
(ExceptionTlvs.SESSION, exception_data.session),
(ExceptionTlvs.LEVEL, exception_data.level),
(ExceptionTlvs.SOURCE, exception_data.source),
(ExceptionTlvs.DATE, exception_data.date),
(ExceptionTlvs.TEXT, exception_data.text)
])
message = coreapi.CoreExceptionMessage.pack(0, tlv_data)
try:
self.sendall(message)
except IOError:
logging.exception("error sending exception message")
def handle_broadcast_node(self, node_data):
"""
Callback to handle an node broadcast out from a session.
:param core.emulator.data.NodeData node_data: node data to handle
:return: nothing
"""
logging.debug("handling broadcast node: %s", node_data)
message = dataconversion.convert_node(node_data)
try:
self.sendall(message)
except IOError:
logging.exception("error sending node message")
def handle_broadcast_link(self, link_data):
"""
Callback to handle an link broadcast out from a session.
:param core.emulator.data.LinkData link_data: link data to handle
:return: nothing
"""
logging.debug("handling broadcast link: %s", link_data)
per = ""
if link_data.per is not None:
per = str(link_data.per)
dup = ""
if link_data.dup is not None:
dup = str(link_data.dup)
tlv_data = structutils.pack_values(coreapi.CoreLinkTlv, [
(LinkTlvs.N1_NUMBER, link_data.node1_id),
(LinkTlvs.N2_NUMBER, link_data.node2_id),
(LinkTlvs.DELAY, link_data.delay),
(LinkTlvs.BANDWIDTH, link_data.bandwidth),
(LinkTlvs.PER, per),
(LinkTlvs.DUP, dup),
(LinkTlvs.JITTER, link_data.jitter),
(LinkTlvs.MER, link_data.mer),
(LinkTlvs.BURST, link_data.burst),
(LinkTlvs.SESSION, link_data.session),
(LinkTlvs.MBURST, link_data.mburst),
(LinkTlvs.TYPE, link_data.link_type),
(LinkTlvs.GUI_ATTRIBUTES, link_data.gui_attributes),
(LinkTlvs.UNIDIRECTIONAL, link_data.unidirectional),
(LinkTlvs.EMULATION_ID, link_data.emulation_id),
(LinkTlvs.NETWORK_ID, link_data.network_id),
(LinkTlvs.KEY, link_data.key),
(LinkTlvs.INTERFACE1_NUMBER, link_data.interface1_id),
(LinkTlvs.INTERFACE1_NAME, link_data.interface1_name),
(LinkTlvs.INTERFACE1_IP4, link_data.interface1_ip4),
(LinkTlvs.INTERFACE1_IP4_MASK, link_data.interface1_ip4_mask),
(LinkTlvs.INTERFACE1_MAC, link_data.interface1_mac),
(LinkTlvs.INTERFACE1_IP6, link_data.interface1_ip6),
(LinkTlvs.INTERFACE1_IP6_MASK, link_data.interface1_ip6_mask),
(LinkTlvs.INTERFACE2_NUMBER, link_data.interface2_id),
(LinkTlvs.INTERFACE2_NAME, link_data.interface2_name),
(LinkTlvs.INTERFACE2_IP4, link_data.interface2_ip4),
(LinkTlvs.INTERFACE2_IP4_MASK, link_data.interface2_ip4_mask),
(LinkTlvs.INTERFACE2_MAC, link_data.interface2_mac),
(LinkTlvs.INTERFACE2_IP6, link_data.interface2_ip6),
(LinkTlvs.INTERFACE2_IP6_MASK, link_data.interface2_ip6_mask),
(LinkTlvs.OPAQUE, link_data.opaque)
])
message = coreapi.CoreLinkMessage.pack(link_data.message_type, tlv_data)
try:
self.sendall(message)
except IOError:
logging.exception("error sending Event Message")
def register(self):
"""
Return a Register Message
:return: register message data
"""
logging.info("GUI has connected to session %d at %s", self.session.id, time.ctime())
tlv_data = b""
tlv_data += coreapi.CoreRegisterTlv.pack(RegisterTlvs.EXECUTE_SERVER.value, "core-daemon")
tlv_data += coreapi.CoreRegisterTlv.pack(RegisterTlvs.EMULATION_SERVER.value, "core-daemon")
tlv_data += coreapi.CoreRegisterTlv.pack(self.session.broker.config_type, self.session.broker.name)
tlv_data += coreapi.CoreRegisterTlv.pack(self.session.location.config_type, self.session.location.name)
tlv_data += coreapi.CoreRegisterTlv.pack(self.session.mobility.config_type, self.session.mobility.name)
for model_name in self.session.mobility.models:
model_class = self.session.mobility.models[model_name]
tlv_data += coreapi.CoreRegisterTlv.pack(model_class.config_type, model_class.name)
tlv_data += coreapi.CoreRegisterTlv.pack(self.session.services.config_type, self.session.services.name)
tlv_data += coreapi.CoreRegisterTlv.pack(self.session.emane.config_type, self.session.emane.name)
for model_name in self.session.emane.models:
model_class = self.session.emane.models[model_name]
tlv_data += coreapi.CoreRegisterTlv.pack(model_class.config_type, model_class.name)
tlv_data += coreapi.CoreRegisterTlv.pack(self.session.options.config_type, self.session.options.name)
tlv_data += coreapi.CoreRegisterTlv.pack(self.session.metadata.config_type, self.session.metadata.name)
return coreapi.CoreRegMessage.pack(MessageFlags.ADD.value, tlv_data)
def sendall(self, data):
"""
Send raw data to the other end of this TCP connection
using socket"s sendall().
:param data: data to send over request socket
:return: data sent
"""
return self.request.sendall(data)
def receive_message(self):
"""
Receive data and return a CORE API message object.
:return: received message
:rtype: core.api.tlv.coreapi.CoreMessage
"""
try:
header = self.request.recv(coreapi.CoreMessage.header_len)
except IOError as e:
raise IOError("error receiving header (%s)" % e)
if len(header) != coreapi.CoreMessage.header_len:
if len(header) == 0:
raise EOFError("client disconnected")
else:
raise IOError("invalid message header size")
message_type, message_flags, message_len = coreapi.CoreMessage.unpack_header(header)
if message_len == 0:
logging.warning("received message with no data")
data = b""
while len(data) < message_len:
data += self.request.recv(message_len - len(data))
if len(data) > message_len:
error_message = "received message length does not match received data (%s != %s)" % (
len(data), message_len)
logging.error(error_message)
raise IOError(error_message)
try:
message_class = coreapi.CLASS_MAP[message_type]
message = message_class(message_flags, header, data)
except KeyError:
message = coreapi.CoreMessage(message_flags, header, data)
message.message_type = message_type
logging.exception("unimplemented core message type: %s", message.type_str())
return message
def queue_message(self, message):
"""
Queue an API message for later processing.
:param message: message to queue
:return: nothing
"""
logging.debug("queueing msg (queuedtimes = %s): type %s", message.queuedtimes, MessageTypes(
message.message_type))
self.message_queue.put(message)
def handler_thread(self):
"""
CORE API message handling loop that is spawned for each server
thread; get CORE API messages from the incoming message queue,
and call handlemsg() for processing.
:return: nothing
"""
while not self.done:
try:
message = self.message_queue.get(timeout=1)
self.handle_message(message)
except Empty:
pass
def handle_message(self, message):
"""
Handle an incoming message; dispatch based on message type,
optionally sending replies.
:param message: message to handle
:return: nothing
"""
if self.session and self.session.broker.handle_message(message):
logging.debug("message not being handled locally")
return
logging.debug("%s handling message:\n%s", threading.currentThread().getName(), message)
if message.message_type not in self.message_handlers:
logging.error("no handler for message type: %s", message.type_str())
return
message_handler = self.message_handlers[message.message_type]
try:
# TODO: this needs to be removed, make use of the broadcast message methods
replies = message_handler(message)
self.dispatch_replies(replies, message)
except:
logging.exception("%s: exception while handling message: %s", threading.currentThread().getName(), message)
def dispatch_replies(self, replies, message):
"""
Dispatch replies by CORE to message msg previously received from the client.
:param list replies: reply messages to dispatch
:param message: message for replies
:return: nothing
"""
for reply in replies:
message_type, message_flags, message_length = coreapi.CoreMessage.unpack_header(reply)
try:
reply_message = coreapi.CLASS_MAP[message_type](
message_flags,
reply[:coreapi.CoreMessage.header_len],
reply[coreapi.CoreMessage.header_len:]
)
except KeyError:
# multiple TLVs of same type cause KeyError exception
reply_message = "CoreMessage (type %d flags %d length %d)" % (
message_type, message_flags, message_length)
logging.debug("sending reply:\n%s", reply_message)
try:
self.sendall(reply)
except IOError:
logging.exception("error dispatching reply")
def handle(self):
"""
Handle a new connection request from a client. Dispatch to the
recvmsg() method for receiving data into CORE API messages, and
add them to an incoming message queue.
:return: nothing
"""
# use port as session id
port = self.request.getpeername()[1]
# TODO: add shutdown handler for session
self.session = self.coreemu.create_session(port, master=False)
logging.debug("created new session for client: %s", self.session.id)
# TODO: hack to associate this handler with this sessions broker for broadcasting
# TODO: broker needs to be pulled out of session to the server/handler level
if self.master:
logging.debug("session set to master")
self.session.master = True
self.session.broker.session_clients.append(self)
# add handlers for various data
self.add_session_handlers()
# set initial session state
self.session.set_state(EventTypes.DEFINITION_STATE)
while True:
try:
message = self.receive_message()
except EOFError:
logging.info("client disconnected")
break
except IOError:
logging.exception("error receiving message")
break
message.queuedtimes = 0
self.queue_message(message)
# delay is required for brief connections, allow session joining
if message.message_type == MessageTypes.SESSION.value:
time.sleep(0.125)
# broadcast node/link messages to other connected clients
if message.message_type not in [MessageTypes.NODE.value, MessageTypes.LINK.value]:
continue
for client in self.session.broker.session_clients:
if client == self:
continue
logging.debug("BROADCAST TO OTHER CLIENT: %s", client)
client.sendall(message.raw_message)
def send_exception(self, level, source, text, node=None):
"""
Sends an exception for display within the GUI.
:param core.emulator.enumerations.ExceptionLevel level: level for exception
:param str source: source where exception came from
:param str text: details about exception
:param int node: node id, if related to a specific node
:return:
"""
exception_data = ExceptionData(
session=str(self.session.id),
node=node,
date=time.ctime(),
level=level.value,
source=source,
text=text
)
self.handle_broadcast_exception(exception_data)
def add_session_handlers(self):
logging.debug("adding session broadcast handlers")
self.session.event_handlers.append(self.handle_broadcast_event)
self.session.exception_handlers.append(self.handle_broadcast_exception)
self.session.node_handlers.append(self.handle_broadcast_node)
self.session.link_handlers.append(self.handle_broadcast_link)
self.session.file_handlers.append(self.handle_broadcast_file)
self.session.config_handlers.append(self.handle_broadcast_config)
def remove_session_handlers(self):
logging.debug("removing session broadcast handlers")
self.session.event_handlers.remove(self.handle_broadcast_event)
self.session.exception_handlers.remove(self.handle_broadcast_exception)
self.session.node_handlers.remove(self.handle_broadcast_node)
self.session.link_handlers.remove(self.handle_broadcast_link)
self.session.file_handlers.remove(self.handle_broadcast_file)
self.session.config_handlers.remove(self.handle_broadcast_config)
def handle_node_message(self, message):
"""
Node Message handler
:param core.api.tlv.coreapi.CoreNodeMessage message: node message
:return: replies to node message
"""
replies = []
if message.flags & MessageFlags.ADD.value and message.flags & MessageFlags.DELETE.value:
logging.warning("ignoring invalid message: add and delete flag both set")
return ()
node_type = None
node_type_value = message.get_tlv(NodeTlvs.TYPE.value)
if node_type_value is not None:
node_type = NodeTypes(node_type_value)
node_id = message.get_tlv(NodeTlvs.NUMBER.value)
node_options = NodeOptions(
name=message.get_tlv(NodeTlvs.NAME.value),
model=message.get_tlv(NodeTlvs.MODEL.value)
)
node_options.set_position(
x=message.get_tlv(NodeTlvs.X_POSITION.value),
y=message.get_tlv(NodeTlvs.Y_POSITION.value)
)
lat = message.get_tlv(NodeTlvs.LATITUDE.value)
if lat is not None:
lat = float(lat)
lon = message.get_tlv(NodeTlvs.LONGITUDE.value)
if lon is not None:
lon = float(lon)
alt = message.get_tlv(NodeTlvs.ALTITUDE.value)
if alt is not None:
alt = float(alt)
node_options.set_location(lat=lat, lon=lon, alt=alt)
node_options.icon = message.get_tlv(NodeTlvs.ICON.value)
node_options.canvas = message.get_tlv(NodeTlvs.CANVAS.value)
node_options.opaque = message.get_tlv(NodeTlvs.OPAQUE.value)
services = message.get_tlv(NodeTlvs.SERVICES.value)
if services:
node_options.services = services.split("|")
if message.flags & MessageFlags.ADD.value:
node = self.session.add_node(node_type, node_id, node_options)
if node:
if message.flags & MessageFlags.STRING.value:
self.node_status_request[node.id] = True
if self.session.state == EventTypes.RUNTIME_STATE.value:
self.send_node_emulation_id(node.id)
elif message.flags & MessageFlags.DELETE.value:
with self._shutdown_lock:
result = self.session.delete_node(node_id)
# if we deleted a node broadcast out its removal
if result and message.flags & MessageFlags.STRING.value:
tlvdata = b""
tlvdata += coreapi.CoreNodeTlv.pack(NodeTlvs.NUMBER.value, node_id)
flags = MessageFlags.DELETE.value | MessageFlags.LOCAL.value
replies.append(coreapi.CoreNodeMessage.pack(flags, tlvdata))
# node update
else:
self.session.update_node(node_id, node_options)
return replies
def handle_link_message(self, message):
"""
Link Message handler
:param core.api.tlv.coreapi.CoreLinkMessage message: link message to handle
:return: link message replies
"""
node_one_id = message.get_tlv(LinkTlvs.N1_NUMBER.value)
node_two_id = message.get_tlv(LinkTlvs.N2_NUMBER.value)
interface_one = InterfaceData(
_id=message.get_tlv(LinkTlvs.INTERFACE1_NUMBER.value),
name=message.get_tlv(LinkTlvs.INTERFACE1_NAME.value),
mac=message.get_tlv(LinkTlvs.INTERFACE1_MAC.value),
ip4=message.get_tlv(LinkTlvs.INTERFACE1_IP4.value),
ip4_mask=message.get_tlv(LinkTlvs.INTERFACE1_IP4_MASK.value),
ip6=message.get_tlv(LinkTlvs.INTERFACE1_IP6.value),
ip6_mask=message.get_tlv(LinkTlvs.INTERFACE1_IP6_MASK.value),
)
interface_two = InterfaceData(
_id=message.get_tlv(LinkTlvs.INTERFACE2_NUMBER.value),
name=message.get_tlv(LinkTlvs.INTERFACE2_NAME.value),
mac=message.get_tlv(LinkTlvs.INTERFACE2_MAC.value),
ip4=message.get_tlv(LinkTlvs.INTERFACE2_IP4.value),
ip4_mask=message.get_tlv(LinkTlvs.INTERFACE2_IP4_MASK.value),
ip6=message.get_tlv(LinkTlvs.INTERFACE2_IP6.value),
ip6_mask=message.get_tlv(LinkTlvs.INTERFACE2_IP6_MASK.value),
)
link_type = None
link_type_value = message.get_tlv(LinkTlvs.TYPE.value)
if link_type_value is not None:
link_type = LinkTypes(link_type_value)
link_options = LinkOptions(_type=link_type)
link_options.delay = message.get_tlv(LinkTlvs.DELAY.value)
link_options.bandwidth = message.get_tlv(LinkTlvs.BANDWIDTH.value)
link_options.session = message.get_tlv(LinkTlvs.SESSION.value)
link_options.per = message.get_tlv(LinkTlvs.PER.value)
link_options.dup = message.get_tlv(LinkTlvs.DUP.value)
link_options.jitter = message.get_tlv(LinkTlvs.JITTER.value)
link_options.mer = message.get_tlv(LinkTlvs.MER.value)
link_options.burst = message.get_tlv(LinkTlvs.BURST.value)
link_options.mburst = message.get_tlv(LinkTlvs.MBURST.value)
link_options.gui_attributes = message.get_tlv(LinkTlvs.GUI_ATTRIBUTES.value)
link_options.unidirectional = message.get_tlv(LinkTlvs.UNIDIRECTIONAL.value)
link_options.emulation_id = message.get_tlv(LinkTlvs.EMULATION_ID.value)
link_options.network_id = message.get_tlv(LinkTlvs.NETWORK_ID.value)
link_options.key = message.get_tlv(LinkTlvs.KEY.value)
link_options.opaque = message.get_tlv(LinkTlvs.OPAQUE.value)
if message.flags & MessageFlags.ADD.value:
self.session.add_link(node_one_id, node_two_id, interface_one, interface_two, link_options)
elif message.flags & MessageFlags.DELETE.value:
self.session.delete_link(node_one_id, node_two_id, interface_one.id, interface_two.id)
else:
self.session.update_link(node_one_id, node_two_id, interface_one.id, interface_two.id, link_options)
return ()
def handle_execute_message(self, message):
"""
Execute Message handler
:param core.api.tlv.coreapi.CoreExecMessage message: execute message to handle
:return: reply messages
"""
node_num = message.get_tlv(ExecuteTlvs.NODE.value)
execute_num = message.get_tlv(ExecuteTlvs.NUMBER.value)
execute_time = message.get_tlv(ExecuteTlvs.TIME.value)
command = message.get_tlv(ExecuteTlvs.COMMAND.value)
# local flag indicates command executed locally, not on a node
if node_num is None and not message.flags & MessageFlags.LOCAL.value:
raise ValueError("Execute Message is missing node number.")
if execute_num is None:
raise ValueError("Execute Message is missing execution number.")
if execute_time is not None:
self.session.add_event(execute_time, node=node_num, name=None, data=command)
return ()
try:
node = self.session.get_node(node_num)
# build common TLV items for reply
tlv_data = b""
if node_num is not None:
tlv_data += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.NODE.value, node_num)
tlv_data += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.NUMBER.value, execute_num)
tlv_data += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.COMMAND.value, command)
if message.flags & MessageFlags.TTY.value:
if node_num is None:
raise NotImplementedError
# echo back exec message with cmd for spawning interactive terminal
if command == "bash":
command = "/bin/bash"
res = node.termcmdstring(command)
tlv_data += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.RESULT.value, res)
reply = coreapi.CoreExecMessage.pack(MessageFlags.TTY.value, tlv_data)
return reply,
else:
logging.info("execute message with cmd=%s", command)
# execute command and send a response
if message.flags & MessageFlags.STRING.value or message.flags & MessageFlags.TEXT.value:
# shlex.split() handles quotes within the string
if message.flags & MessageFlags.LOCAL.value:
status, res = utils.cmd_output(command)
else:
status, res = node.cmd_output(command)
logging.info("done exec cmd=%s with status=%d res=(%d bytes)", command, status, len(res))
if message.flags & MessageFlags.TEXT.value:
tlv_data += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.RESULT.value, res)
if message.flags & MessageFlags.STRING.value:
tlv_data += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.STATUS.value, status)
reply = coreapi.CoreExecMessage.pack(0, tlv_data)
return reply,
# execute the command with no response
else:
if message.flags & MessageFlags.LOCAL.value:
utils.mute_detach(command)
else:
node.cmd(command, wait=False)
except KeyError:
logging.exception("error getting object: %s", node_num)
# XXX wait and queue this message to try again later
# XXX maybe this should be done differently
if not message.flags & MessageFlags.LOCAL.value:
time.sleep(0.125)
self.queue_message(message)
return ()
def handle_register_message(self, message):
"""
Register Message Handler
:param core.api.tlv.coreapi.CoreRegMessage message: register message to handle
:return: reply messages
"""
replies = []
# execute a Python script or XML file
execute_server = message.get_tlv(RegisterTlvs.EXECUTE_SERVER.value)
if execute_server:
try:
logging.info("executing: %s", execute_server)
if message.flags & MessageFlags.STRING.value:
old_session_ids = set(self.coreemu.sessions.keys())
sys.argv = shlex.split(execute_server)
file_name = sys.argv[0]
if os.path.splitext(file_name)[1].lower() == ".xml":
session = self.coreemu.create_session(master=False)
try:
session.open_xml(file_name, start=True)
except:
self.coreemu.delete_session(session.id)
raise
else:
thread = threading.Thread(
target=utils.execute_file,
args=(file_name, {"__file__": file_name, "coreemu": self.coreemu})
)
thread.daemon = True
thread.start()
# allow time for session creation
time.sleep(0.25)
if message.flags & MessageFlags.STRING.value:
new_session_ids = set(self.coreemu.sessions.keys())
new_sid = new_session_ids.difference(old_session_ids)
try:
sid = new_sid.pop()
logging.info("executed: %s as session %d", execute_server, sid)
except KeyError:
logging.info("executed %s with unknown session ID", execute_server)
return replies
logging.debug("checking session %d for RUNTIME state", sid)
session = self.coreemu.sessions.get(sid)
retries = 10
# wait for session to enter RUNTIME state, to prevent GUI from
# connecting while nodes are still being instantiated
while session.state != EventTypes.RUNTIME_STATE.value:
logging.debug("waiting for session %d to enter RUNTIME state", sid)
time.sleep(1)
retries -= 1
if retries <= 0:
logging.debug("session %d did not enter RUNTIME state", sid)
return replies
tlv_data = coreapi.CoreRegisterTlv.pack(RegisterTlvs.EXECUTE_SERVER.value, execute_server)
tlv_data += coreapi.CoreRegisterTlv.pack(RegisterTlvs.SESSION.value, "%s" % sid)
message = coreapi.CoreRegMessage.pack(0, tlv_data)
replies.append(message)
except Exception as e:
logging.exception("error executing: %s", execute_server)
tlv_data = coreapi.CoreExceptionTlv.pack(ExceptionTlvs.LEVEL.value, 2)
tlv_data += coreapi.CoreExceptionTlv.pack(ExceptionTlvs.TEXT.value, str(e))
message = coreapi.CoreExceptionMessage.pack(0, tlv_data)
replies.append(message)
return replies
gui = message.get_tlv(RegisterTlvs.GUI.value)
if gui is None:
logging.debug("ignoring Register message")
else:
# register capabilities with the GUI
self.master = True
# find the session containing this client and set the session to master
for _id in self.coreemu.sessions:
session = self.coreemu.sessions[_id]
if self in session.broker.session_clients:
logging.debug("setting session to master: %s", session.id)
session.master = True
break
replies.append(self.register())
replies.append(self.session_message())
return replies
def handle_config_message(self, message):
"""
Configuration Message handler
:param core.api.tlv.coreapi.CoreConfMessage message: configuration message to handle
:return: reply messages
"""
# convert config message to standard config data object
config_data = ConfigData(
node=message.get_tlv(ConfigTlvs.NODE.value),
object=message.get_tlv(ConfigTlvs.OBJECT.value),
type=message.get_tlv(ConfigTlvs.TYPE.value),
data_types=message.get_tlv(ConfigTlvs.DATA_TYPES.value),
data_values=message.get_tlv(ConfigTlvs.VALUES.value),
captions=message.get_tlv(ConfigTlvs.CAPTIONS.value),
bitmap=message.get_tlv(ConfigTlvs.BITMAP.value),
possible_values=message.get_tlv(ConfigTlvs.POSSIBLE_VALUES.value),
groups=message.get_tlv(ConfigTlvs.GROUPS.value),
session=message.get_tlv(ConfigTlvs.SESSION.value),
interface_number=message.get_tlv(ConfigTlvs.INTERFACE_NUMBER.value),
network_id=message.get_tlv(ConfigTlvs.NETWORK_ID.value),
opaque=message.get_tlv(ConfigTlvs.OPAQUE.value)
)
logging.debug("configuration message for %s node %s", config_data.object, config_data.node)
message_type = ConfigFlags(config_data.type)
replies = []
# handle session configuration
if config_data.object == "all":
replies = self.handle_config_all(message_type, config_data)
elif config_data.object == self.session.options.name:
replies = self.handle_config_session(message_type, config_data)
elif config_data.object == self.session.location.name:
self.handle_config_location(message_type, config_data)
elif config_data.object == self.session.metadata.name:
replies = self.handle_config_metadata(message_type, config_data)
elif config_data.object == self.session.broker.name:
self.handle_config_broker(message_type, config_data)
elif config_data.object == self.session.services.name:
replies = self.handle_config_services(message_type, config_data)
elif config_data.object == self.session.mobility.name:
self.handle_config_mobility(message_type, config_data)
elif config_data.object in self.session.mobility.models:
replies = self.handle_config_mobility_models(message_type, config_data)
elif config_data.object == self.session.emane.name:
replies = self.handle_config_emane(message_type, config_data)
elif config_data.object in self.session.emane.models:
replies = self.handle_config_emane_models(message_type, config_data)
else:
raise Exception("no handler for configuration: %s", config_data.object)
for reply in replies:
self.handle_broadcast_config(reply)
return []
def handle_config_all(self, message_type, config_data):
replies = []
if message_type == ConfigFlags.RESET:
node_id = config_data.node
self.session.location.reset()
self.session.services.reset()
self.session.mobility.config_reset(node_id)
self.session.emane.config_reset(node_id)
else:
raise Exception("cant handle config all: %s" % message_type)
return replies
def handle_config_session(self, message_type, config_data):
replies = []
if message_type == ConfigFlags.REQUEST:
type_flags = ConfigFlags.NONE.value
config = self.session.options.get_configs()
config_response = ConfigShim.config_data(0, None, type_flags, self.session.options, config)
replies.append(config_response)
elif message_type != ConfigFlags.RESET and config_data.data_values:
values = ConfigShim.str_to_dict(config_data.data_values)
for key in values:
value = values[key]
self.session.options.set_config(key, value)
return replies
def handle_config_location(self, message_type, config_data):
if message_type == ConfigFlags.RESET:
self.session.location.reset()
else:
if not config_data.data_values:
logging.warning("location data missing")
else:
values = [float(x) for x in config_data.data_values.split("|")]
# Cartesian coordinate reference point
refx, refy = values[0], values[1]
refz = 0.0
lat, lon, alt = values[2], values[3], values[4]
# xyz point
self.session.location.refxyz = (refx, refy, refz)
# geographic reference point
self.session.location.setrefgeo(lat, lon, alt)
self.session.location.refscale = values[5]
logging.info("location configured: %s = %s scale=%s", self.session.location.refxyz,
self.session.location.refgeo, self.session.location.refscale)
logging.info("location configured: UTM%s", self.session.location.refutm)
def handle_config_metadata(self, message_type, config_data):
replies = []
if message_type == ConfigFlags.REQUEST:
node_id = config_data.node
metadata_configs = self.session.metadata.get_configs()
if metadata_configs is None:
metadata_configs = {}
data_values = "|".join(["%s=%s" % (x, metadata_configs[x]) for x in metadata_configs])
data_types = tuple(ConfigDataTypes.STRING.value for _ in metadata_configs)
config_response = ConfigData(
message_type=0,
node=node_id,
object=self.session.metadata.name,
type=ConfigFlags.NONE.value,
data_types=data_types,
data_values=data_values
)
replies.append(config_response)
elif message_type != ConfigFlags.RESET and config_data.data_values:
values = ConfigShim.str_to_dict(config_data.data_values)
for key in values:
value = values[key]
self.session.metadata.set_config(key, value)
return replies
def handle_config_broker(self, message_type, config_data):
if message_type not in [ConfigFlags.REQUEST, ConfigFlags.RESET]:
session_id = config_data.session
if not config_data.data_values:
logging.info("emulation server data missing")
else:
values = config_data.data_values.split("|")
# string of "server:ip:port,server:ip:port,..."
server_strings = values[0]
server_list = server_strings.split(",")
for server in server_list:
server_items = server.split(":")
name, host, port = server_items[:3]
if host == "":
host = None
if port == "":
port = None
else:
port = int(port)
if session_id is not None:
# receive session ID and my IP from master
self.session.broker.session_id_master = int(session_id.split("|")[0])
self.session.broker.myip = host
host = None
port = None
# this connects to the server immediately; maybe we should wait
# or spin off a new "client" thread here
self.session.broker.addserver(name, host, port)
self.session.broker.setupserver(name)
def handle_config_services(self, message_type, config_data):
replies = []
node_id = config_data.node
opaque = config_data.opaque
if message_type == ConfigFlags.REQUEST:
session_id = config_data.session
opaque = config_data.opaque
logging.debug("configuration request: node(%s) session(%s) opaque(%s)", node_id, session_id, opaque)
# send back a list of available services
if opaque is None:
type_flag = ConfigFlags.NONE.value
data_types = tuple(repeat(ConfigDataTypes.BOOL.value, len(ServiceManager.services)))
# sort groups by name and map services to groups
groups = set()
group_map = {}
for name in ServiceManager.services:
service_name = ServiceManager.services[name]
group = service_name.group
groups.add(group)
group_map.setdefault(group, []).append(service_name)
groups = sorted(groups, key=lambda x: x.lower())
# define tlv values in proper order
captions = []
possible_values = []
values = []
group_strings = []
start_index = 1
logging.info("sorted groups: %s", groups)
for group in groups:
services = sorted(group_map[group], key=lambda x: x.name.lower())
logging.info("sorted services for group(%s): %s", group, services)
end_index = start_index + len(services) - 1
group_strings.append("%s:%s-%s" % (group, start_index, end_index))
start_index += len(services)
for service_name in services:
captions.append(service_name.name)
values.append("0")
if service_name.custom_needed:
possible_values.append("1")
else:
possible_values.append("")
# format for tlv
captions = "|".join(captions)
possible_values = "|".join(possible_values)
values = "|".join(values)
groups = "|".join(group_strings)
# send back the properties for this service
else:
if not node_id:
return replies
node = self.session.get_node(node_id)
if node is None:
logging.warning("request to configure service for unknown node %s", node_id)
return replies
services = ServiceShim.servicesfromopaque(opaque)
if not services:
return replies
servicesstring = opaque.split(":")
if len(servicesstring) == 3:
# a file request: e.g. "service:zebra:quagga.conf"
file_name = servicesstring[2]
service_name = services[0]
file_data = self.session.services.get_service_file(node, service_name, file_name)
self.session.broadcast_file(file_data)
# short circuit this request early to avoid returning response below
return replies
# the first service in the list is the one being configured
service_name = services[0]
# send back:
# dirs, configs, startindex, startup, shutdown, metadata, config
type_flag = ConfigFlags.UPDATE.value
data_types = tuple(repeat(ConfigDataTypes.STRING.value, len(ServiceShim.keys)))
service = self.session.services.get_service(node_id, service_name, default_service=True)
values = ServiceShim.tovaluelist(node, service)
captions = None
possible_values = None
groups = None
config_response = ConfigData(
message_type=0,
node=node_id,
object=self.session.services.name,
type=type_flag,
data_types=data_types,
data_values=values,
captions=captions,
possible_values=possible_values,
groups=groups,
session=session_id,
opaque=opaque
)
replies.append(config_response)
elif message_type == ConfigFlags.RESET:
self.session.services.reset()
else:
data_types = config_data.data_types
values = config_data.data_values
error_message = "services config message that I don't know how to handle"
if values is None:
logging.error(error_message)
else:
if opaque is None:
values = values.split("|")
# store default services for a node type in self.defaultservices[]
if data_types is None or data_types[0] != ConfigDataTypes.STRING.value:
logging.info(error_message)
return None
key = values.pop(0)
self.session.services.default_services[key] = values
logging.debug("default services for type %s set to %s", key, values)
elif node_id:
services = ServiceShim.servicesfromopaque(opaque)
if services:
service_name = services[0]
# set custom service for node
self.session.services.set_service(node_id, service_name)
# set custom values for custom service
service = self.session.services.get_service(node_id, service_name)
if not service:
raise ValueError("custom service(%s) for node(%s) does not exist", service_name, node_id)
values = ConfigShim.str_to_dict(values)
for name in values:
value = values[name]
ServiceShim.setvalue(service, name, value)
return replies
def handle_config_mobility(self, message_type, _):
if message_type == ConfigFlags.RESET:
self.session.mobility.reset()
def handle_config_mobility_models(self, message_type, config_data):
replies = []
node_id = config_data.node
object_name = config_data.object
interface_id = config_data.interface_number
values_str = config_data.data_values
if interface_id is not None:
node_id = node_id * 1000 + interface_id
logging.debug("received configure message for %s nodenum: %s", object_name, node_id)
if message_type == ConfigFlags.REQUEST:
logging.info("replying to configure request for model: %s", object_name)
typeflags = ConfigFlags.NONE.value
model_class = self.session.mobility.models.get(object_name)
if not model_class:
logging.warning("model class does not exist: %s", object_name)
return []
config = self.session.mobility.get_model_config(node_id, object_name)
config_response = ConfigShim.config_data(0, node_id, typeflags, model_class, config)
replies.append(config_response)
elif message_type != ConfigFlags.RESET:
# store the configuration values for later use, when the node
if not object_name:
logging.warning("no configuration object for node: %s", node_id)
return []
parsed_config = {}
if values_str:
parsed_config = ConfigShim.str_to_dict(values_str)
self.session.mobility.set_model_config(node_id, object_name, parsed_config)
if self.session.state == EventTypes.RUNTIME_STATE.value:
try:
node = self.session.get_node(node_id)
if object_name == BasicRangeModel.name:
node.updatemodel(parsed_config)
except KeyError:
logging.error("skipping mobility configuration for unknown node: %s", node_id)
return replies
def handle_config_emane(self, message_type, config_data):
replies = []
node_id = config_data.node
object_name = config_data.object
interface_id = config_data.interface_number
values_str = config_data.data_values
if interface_id is not None:
node_id = node_id * 1000 + interface_id
logging.debug("received configure message for %s nodenum: %s", object_name, node_id)
if message_type == ConfigFlags.REQUEST:
logging.info("replying to configure request for %s model", object_name)
typeflags = ConfigFlags.NONE.value
config = self.session.emane.get_configs()
config_response = ConfigShim.config_data(0, node_id, typeflags, self.session.emane.emane_config, config)
replies.append(config_response)
elif message_type != ConfigFlags.RESET:
if not object_name:
logging.info("no configuration object for node %s", node_id)
return []
if values_str:
config = ConfigShim.str_to_dict(values_str)
self.session.emane.set_configs(config)
# extra logic to start slave Emane object after nemid has been configured from the master
if message_type == ConfigFlags.UPDATE and self.session.master is False:
# instantiation was previously delayed by setup returning Emane.NOT_READY
self.session.instantiate()
return replies
def handle_config_emane_models(self, message_type, config_data):
replies = []
node_id = config_data.node
object_name = config_data.object
interface_id = config_data.interface_number
values_str = config_data.data_values
if interface_id is not None:
node_id = node_id * 1000 + interface_id
logging.debug("received configure message for %s nodenum: %s", object_name, node_id)
if message_type == ConfigFlags.REQUEST:
logging.info("replying to configure request for model: %s", object_name)
typeflags = ConfigFlags.NONE.value
model_class = self.session.emane.models.get(object_name)
if not model_class:
logging.warning("model class does not exist: %s", object_name)
return []
config = self.session.emane.get_model_config(node_id, object_name)
config_response = ConfigShim.config_data(0, node_id, typeflags, model_class, config)
replies.append(config_response)
elif message_type != ConfigFlags.RESET:
# store the configuration values for later use, when the node
if not object_name:
logging.warning("no configuration object for node: %s", node_id)
return []
parsed_config = {}
if values_str:
parsed_config = ConfigShim.str_to_dict(values_str)
self.session.emane.set_model_config(node_id, object_name, parsed_config)
return replies
def handle_file_message(self, message):
"""
File Message handler
:param core.api.tlv.coreapi.CoreFileMessage message: file message to handle
:return: reply messages
"""
if message.flags & MessageFlags.ADD.value:
node_num = message.get_tlv(FileTlvs.NODE.value)
file_name = message.get_tlv(FileTlvs.NAME.value)
file_type = message.get_tlv(FileTlvs.TYPE.value)
source_name = message.get_tlv(FileTlvs.SOURCE_NAME.value)
data = message.get_tlv(FileTlvs.DATA.value)
compressed_data = message.get_tlv(FileTlvs.COMPRESSED_DATA.value)
if compressed_data:
logging.warning("Compressed file data not implemented for File message.")
return ()
if source_name and data:
logging.warning("ignoring invalid File message: source and data TLVs are both present")
return ()
# some File Messages store custom files in services,
# prior to node creation
if file_type is not None:
if file_type.startswith("service:"):
_, service_name = file_type.split(':')[:2]
self.session.services.set_service_file(node_num, service_name, file_name, data)
return ()
elif file_type.startswith("hook:"):
_, state = file_type.split(':')[:2]
if not state.isdigit():
logging.error("error setting hook having state '%s'", state)
return ()
state = int(state)
self.session.add_hook(state, file_name, source_name, data)
return ()
# writing a file to the host
if node_num is None:
if source_name is not None:
shutil.copy2(source_name, file_name)
else:
with open(file_name, "w") as open_file:
open_file.write(data)
return ()
self.session.add_node_file(node_num, source_name, file_name, data)
else:
raise NotImplementedError
return ()
def handle_interface_message(self, message):
"""
Interface Message handler.
:param message: interface message to handle
:return: reply messages
"""
logging.info("ignoring Interface message")
return ()
def handle_event_message(self, message):
"""
Event Message handler
:param core.api.tlv.coreapi.CoreEventMessage message: event message to handle
:return: reply messages
"""
event_data = EventData(
node=message.get_tlv(EventTlvs.NODE.value),
event_type=message.get_tlv(EventTlvs.TYPE.value),
name=message.get_tlv(EventTlvs.NAME.value),
data=message.get_tlv(EventTlvs.DATA.value),
time=message.get_tlv(EventTlvs.TIME.value),
session=message.get_tlv(EventTlvs.SESSION.value)
)
if event_data.event_type is None:
raise NotImplementedError("Event message missing event type")
event_type = EventTypes(event_data.event_type)
node_id = event_data.node
logging.debug("handling event %s at %s", event_type.name, time.ctime())
if event_type.value <= EventTypes.SHUTDOWN_STATE.value:
if node_id is not None:
try:
node = self.session.get_node(node_id)
except KeyError:
raise KeyError("Event message for unknown node %d" % node_id)
# configure mobility models for WLAN added during runtime
if event_type == EventTypes.INSTANTIATION_STATE and nodeutils.is_node(node, NodeTypes.WIRELESS_LAN):
self.session.start_mobility(node_ids=(node.id,))
return ()
logging.warning("dropping unhandled Event message with node number")
return ()
self.session.set_state(event_type)
if event_type == EventTypes.DEFINITION_STATE:
# clear all session objects in order to receive new definitions
self.session.clear()
elif event_type == EventTypes.INSTANTIATION_STATE:
if len(self.handler_threads) > 1:
# TODO: sync handler threads here before continuing
time.sleep(2.0) # XXX
# done receiving node/link configuration, ready to instantiate
self.session.instantiate()
# after booting nodes attempt to send emulation id for nodes waiting on status
for _id in self.session.nodes:
self.send_node_emulation_id(_id)
elif event_type == EventTypes.RUNTIME_STATE:
if self.session.master:
logging.warning("Unexpected event message: RUNTIME state received at session master")
else:
# master event queue is started in session.checkruntime()
self.session.start_events()
elif event_type == EventTypes.DATACOLLECT_STATE:
self.session.data_collect()
elif event_type == EventTypes.SHUTDOWN_STATE:
if self.session.master:
logging.warning("Unexpected event message: SHUTDOWN state received at session master")
elif event_type in {EventTypes.START, EventTypes.STOP, EventTypes.RESTART, EventTypes.PAUSE,
EventTypes.RECONFIGURE}:
handled = False
name = event_data.name
if name:
# TODO: register system for event message handlers,
# like confobjs
if name.startswith("service:"):
self.handle_service_event(event_data)
handled = True
elif name.startswith("mobility:"):
self.session.mobility_event(event_data)
handled = True
if not handled:
logging.warning("Unhandled event message: event type %s ", event_type.name)
elif event_type == EventTypes.FILE_OPEN:
filename = event_data.name
self.session.open_xml(filename, start=False)
self.send_objects()
return ()
elif event_type == EventTypes.FILE_SAVE:
filename = event_data.name
self.session.save_xml(filename)
elif event_type == EventTypes.SCHEDULED:
etime = event_data.time
node = event_data.node
name = event_data.name
data = event_data.data
if etime is None:
logging.warning("Event message scheduled event missing start time")
return ()
if message.flags & MessageFlags.ADD.value:
self.session.add_event(float(etime), node=node, name=name, data=data)
else:
raise NotImplementedError
else:
logging.warning("unhandled event message: event type %s", event_type)
return ()
def handle_service_event(self, event_data):
"""
Handle an Event Message used to start, stop, restart, or validate
a service on a given node.
:param core.emulator.enumerations.EventData event_data: event data to handle
:return: nothing
"""
event_type = event_data.event_type
node_id = event_data.node
name = event_data.name
try:
node = self.session.get_node(node_id)
except KeyError:
logging.warning("ignoring event for service '%s', unknown node '%s'", name, node_id)
return
fail = ""
unknown = []
services = ServiceShim.servicesfromopaque(name)
for service_name in services:
service = self.session.services.get_service(node_id, service_name, default_service=True)
if not service:
unknown.append(service_name)
continue
if event_type == EventTypes.STOP.value or event_type == EventTypes.RESTART.value:
status = self.session.services.stop_service(node, service)
if status:
fail += "Stop %s," % service.name
if event_type == EventTypes.START.value or event_type == EventTypes.RESTART.value:
status = self.session.services.startup_service(node, service)
if status:
fail += "Start %s(%s)," % service.name
if event_type == EventTypes.PAUSE.value:
status = self.session.services.validate_service(node, service)
if status:
fail += "%s," % service.name
if event_type == EventTypes.RECONFIGURE.value:
self.session.services.service_reconfigure(node, service)
fail_data = ""
if len(fail) > 0:
fail_data += "Fail:" + fail
unknown_data = ""
num = len(unknown)
if num > 0:
for u in unknown:
unknown_data += u
if num > 1:
unknown_data += ", "
num -= 1
logging.warning("Event requested for unknown service(s): %s", unknown_data)
unknown_data = "Unknown:" + unknown_data
event_data = EventData(
node=node_id,
event_type=event_type,
name=name,
data=fail_data + ";" + unknown_data,
time="%s" % time.time()
)
self.session.broadcast_event(event_data)
def handle_session_message(self, message):
"""
Session Message handler
:param core.api.tlv.coreapi.CoreSessionMessage message: session message to handle
:return: reply messages
"""
session_id_str = message.get_tlv(SessionTlvs.NUMBER.value)
session_ids = coreapi.str_to_list(session_id_str)
name_str = message.get_tlv(SessionTlvs.NAME.value)
names = coreapi.str_to_list(name_str)
file_str = message.get_tlv(SessionTlvs.FILE.value)
files = coreapi.str_to_list(file_str)
thumb = message.get_tlv(SessionTlvs.THUMB.value)
user = message.get_tlv(SessionTlvs.USER.value)
logging.debug("SESSION message flags=0x%x sessions=%s" % (message.flags, session_id_str))
if message.flags == 0:
for index, session_id in enumerate(session_ids):
session_id = int(session_id)
if session_id == 0:
session = self.session
else:
session = self.coreemu.sessions.get(session_id)
if session is None:
logging.warning("session %s not found", session_id)
continue
logging.info("request to modify to session: %s", session.id)
if names is not None:
session.name = names[index]
if files is not None:
session.file_name = files[index]
if thumb:
session.set_thumbnail(thumb)
if user:
session.set_user(user)
elif message.flags & MessageFlags.STRING.value and not message.flags & MessageFlags.ADD.value:
# status request flag: send list of sessions
return self.session_message(),
else:
# handle ADD or DEL flags
for session_id in session_ids:
session_id = int(session_id)
session = self.coreemu.sessions.get(session_id)
if session is None:
logging.info("session %s not found (flags=0x%x)", session_id, message.flags)
continue
if message.flags & MessageFlags.ADD.value:
# connect to the first session that exists
logging.info("request to connect to session %s", session_id)
# remove client from session broker and shutdown if needed
self.remove_session_handlers()
self.session.broker.session_clients.remove(self)
if not self.session.broker.session_clients and not self.session.is_active():
self.coreemu.delete_session(self.session.id)
# set session to join
self.session = session
# add client to session broker and set master if needed
if self.master:
self.session.master = True
self.session.broker.session_clients.append(self)
# add broadcast handlers
logging.info("adding session broadcast handlers")
self.add_session_handlers()
if user:
self.session.set_user(user)
if message.flags & MessageFlags.STRING.value:
self.send_objects()
elif message.flags & MessageFlags.DELETE.value:
# shut down the specified session(s)
logging.info("request to terminate session %s", session_id)
self.coreemu.delete_session(session_id)
else:
logging.warning("unhandled session flags for session %s", session_id)
return ()
def send_node_emulation_id(self, node_id):
"""
Node emulation id to send.
:param int node_id: node id to send
:return: nothing
"""
if node_id in self.node_status_request:
tlv_data = b""
tlv_data += coreapi.CoreNodeTlv.pack(NodeTlvs.NUMBER.value, node_id)
tlv_data += coreapi.CoreNodeTlv.pack(NodeTlvs.EMULATION_ID.value, node_id)
reply = coreapi.CoreNodeMessage.pack(MessageFlags.ADD.value | MessageFlags.LOCAL.value, tlv_data)
try:
self.sendall(reply)
except IOError:
logging.exception("error sending node emulation id message: %s", node_id)
del self.node_status_request[node_id]
def send_objects(self):
"""
Return API messages that describe the current session.
"""
# find all nodes and links
nodes_data = []
links_data = []
with self.session._nodes_lock:
for node_id in self.session.nodes:
node = self.session.nodes[node_id]
node_data = node.data(message_type=MessageFlags.ADD.value)
if node_data:
nodes_data.append(node_data)
node_links = node.all_link_data(flags=MessageFlags.ADD.value)
for link_data in node_links:
links_data.append(link_data)
# send all nodes first, so that they will exist for any links
for node_data in nodes_data:
self.session.broadcast_node(node_data)
for link_data in links_data:
self.session.broadcast_link(link_data)
# send mobility model info
for node_id in self.session.mobility.nodes():
mobility_configs = self.session.mobility.get_all_configs(node_id)
for model_name in mobility_configs:
config = mobility_configs[model_name]
model_class = self.session.mobility.models[model_name]
logging.debug("mobility config: node(%s) class(%s) values(%s)", node_id, model_class, config)
config_data = ConfigShim.config_data(0, node_id, ConfigFlags.UPDATE.value, model_class, config)
self.session.broadcast_config(config_data)
# send emane model info
for node_id in self.session.emane.nodes():
emane_configs = self.session.emane.get_all_configs(node_id)
for model_name in emane_configs:
config = emane_configs[model_name]
model_class = self.session.emane.models[model_name]
logging.debug("emane config: node(%s) class(%s) values(%s)", node_id, model_class, config)
config_data = ConfigShim.config_data(0, node_id, ConfigFlags.UPDATE.value, model_class, config)
self.session.broadcast_config(config_data)
# service customizations
service_configs = self.session.services.all_configs()
for node_id, service in service_configs:
opaque = "service:%s" % service.name
data_types = tuple(repeat(ConfigDataTypes.STRING.value, len(ServiceShim.keys)))
node = self.session.get_node(node_id)
values = ServiceShim.tovaluelist(node, service)
config_data = ConfigData(
message_type=0,
node=node_id,
object=self.session.services.name,
type=ConfigFlags.UPDATE.value,
data_types=data_types,
data_values=values,
session=str(self.session.id),
opaque=opaque
)
self.session.broadcast_config(config_data)
for file_name, config_data in self.session.services.all_files(service):
file_data = FileData(
message_type=MessageFlags.ADD.value,
node=node_id,
name=str(file_name),
type=opaque,
data=str(config_data)
)
self.session.broadcast_file(file_data)
# TODO: send location info
# send hook scripts
for state in sorted(self.session._hooks.keys()):
for file_name, config_data in self.session._hooks[state]:
file_data = FileData(
message_type=MessageFlags.ADD.value,
name=str(file_name),
type="hook:%s" % state,
data=str(config_data)
)
self.session.broadcast_file(file_data)
# send session configuration
session_config = self.session.options.get_configs()
config_data = ConfigShim.config_data(0, None, ConfigFlags.UPDATE.value, self.session.options, session_config)
self.session.broadcast_config(config_data)
# send session metadata
metadata_configs = self.session.metadata.get_configs()
if metadata_configs:
data_values = "|".join(["%s=%s" % (x, metadata_configs[x]) for x in metadata_configs])
data_types = tuple(ConfigDataTypes.STRING.value for _ in self.session.metadata.get_configs())
config_data = ConfigData(
message_type=0,
object=self.session.metadata.name,
type=ConfigFlags.NONE.value,
data_types=data_types,
data_values=data_values
)
self.session.broadcast_config(config_data)
logging.info("informed GUI about %d nodes and %d links", len(nodes_data), len(links_data))
class CoreUdpHandler(CoreHandler):
def __init__(self, request, client_address, server):
self.message_handlers = {
MessageTypes.NODE.value: self.handle_node_message,
MessageTypes.LINK.value: self.handle_link_message,
MessageTypes.EXECUTE.value: self.handle_execute_message,
MessageTypes.REGISTER.value: self.handle_register_message,
MessageTypes.CONFIG.value: self.handle_config_message,
MessageTypes.FILE.value: self.handle_file_message,
MessageTypes.INTERFACE.value: self.handle_interface_message,
MessageTypes.EVENT.value: self.handle_event_message,
MessageTypes.SESSION.value: self.handle_session_message,
}
self.master = False
self.session = None
socketserver.BaseRequestHandler.__init__(self, request, client_address, server)
def setup(self):
"""
Client has connected, set up a new connection.
:return: nothing
"""
pass
def receive_message(self):
data = self.request[0]
header = data[:coreapi.CoreMessage.header_len]
if len(header) < coreapi.CoreMessage.header_len:
raise IOError("error receiving header (received %d bytes)" % len(header))
message_type, message_flags, message_len = coreapi.CoreMessage.unpack_header(header)
if message_len == 0:
logging.warning("received message with no data")
return
if len(data) != coreapi.CoreMessage.header_len + message_len:
logging.error("received message length does not match received data (%s != %s)",
len(data), coreapi.CoreMessage.header_len + message_len)
raise IOError
try:
message_class = coreapi.CLASS_MAP[message_type]
message = message_class(message_flags, header, data[coreapi.CoreMessage.header_len:])
return message
except KeyError:
message = coreapi.CoreMessage(message_flags, header, data[coreapi.CoreMessage.header_len:])
message.msgtype = message_type
logging.exception("unimplemented core message type: %s", message.type_str())
def handle(self):
message = self.receive_message()
sessions = message.session_numbers()
message.queuedtimes = 0
if sessions:
for session_id in sessions:
session = self.server.mainserver.coreemu.sessions.get(session_id)
if session:
logging.debug("session handling message: %s", session.session_id)
self.session = session
self.handle_message(message)
self.broadcast(message)
else:
logging.error("session %d in %s message not found.", session_id, message.type_str())
else:
# no session specified, find an existing one
session = None
node_count = 0
for session_id in self.server.mainserver.coreemu.sessions:
current_session = self.server.mainserver.coreemu.sessions[session_id]
current_node_count = current_session.get_node_count()
if current_session.state == EventTypes.RUNTIME_STATE.value and current_node_count > node_count:
node_count = current_node_count
session = current_session
if session or message.message_type == MessageTypes.REGISTER.value:
self.session = session
self.handle_message(message)
self.broadcast(message)
else:
logging.error("no active session, dropping %s message.", message.type_str())
def broadcast(self, message):
if not isinstance(message, (coreapi.CoreNodeMessage, coreapi.CoreLinkMessage)):
return
for client in self.session.broker.session_clients:
try:
client.sendall(message.raw_message)
except IOError:
logging.error("error broadcasting")
def finish(self):
return socketserver.BaseRequestHandler.finish(self)
def queuemsg(self, msg):
"""
UDP handlers are short-lived and do not have message queues.
:param bytes msg: message to queue
:return:
"""
raise Exception("Unable to queue %s message for later processing using UDP!" % msg)
def sendall(self, data):
"""
Use sendto() on the connectionless UDP socket.
:param data:
:return:
"""
self.request[1].sendto(data, self.client_address)
|
coap.py | import logging.config
import random
import socket
import struct
import threading
import xml.etree.ElementTree as ElementTree
import os
import re
from coapthon import defines
from coapthon.client.helperclient import HelperClient
from coapthon.layers.blocklayer import BlockLayer
from coapthon.layers.cachelayer import CacheLayer
from coapthon.layers.forwardLayer import ForwardLayer
from coapthon.layers.messagelayer import MessageLayer
from coapthon.layers.observelayer import ObserveLayer
from coapthon.layers.resourcelayer import ResourceLayer
from coapthon.messages.message import Message
from coapthon.messages.request import Request
from coapthon.resources.remoteResource import RemoteResource
from coapthon.resources.resource import Resource
from coapthon.serializer import Serializer
from coapthon.utils import Tree, create_logging
__author__ = 'Giacomo Tanganelli'
if not os.path.isfile("logging.conf"):
create_logging()
logger = logging.getLogger(__name__)
logging.config.fileConfig("logging.conf", disable_existing_loggers=False)
class CoAP(object):
"""
Implementation of the Reverse Proxy
"""
def __init__(self, server_address, xml_file, multicast=False, starting_mid=None, cache=False, sock=None):
"""
Initialize the Reverse Proxy.
:param server_address: Server address for incoming connections
:param xml_file: the xml file that describe remote servers
:param multicast: if the ip is a multicast address
:param starting_mid: used for testing purposes
:param cache: if a cache must be used
:param sock: if a socket has been created externally, it can be used directly
"""
self.stopped = threading.Event()
self.stopped.clear()
self.to_be_stopped = []
self.purge = threading.Thread(target=self.purge)
self.purge.start()
self._messageLayer = MessageLayer(starting_mid)
self._blockLayer = BlockLayer()
self._observeLayer = ObserveLayer()
self._forwardLayer = ForwardLayer(self)
self.resourceLayer = ResourceLayer(self)
self.cache_enable = cache
if self.cache_enable:
self._cacheLayer = CacheLayer(defines.REVERSE_PROXY)
else:
self._cacheLayer = None
# Resource directory
root = Resource('root', self, visible=False, observable=False, allow_children=True)
root.path = '/'
self.root = Tree()
self.root["/"] = root
self._serializer = None
self.server_address = server_address
self.multicast = multicast
self.file_xml = xml_file
self._mapping = {}
addrinfo = socket.getaddrinfo(self.server_address[0], None)[0]
if sock is not None:
# Use given socket, could be a DTLS socket
self._socket = sock
elif self.multicast: # pragma: no cover
# Create a socket
# self._socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, 255)
# self._socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, 1)
# Join group
if addrinfo[0] == socket.AF_INET: # IPv4
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
# Allow multiple copies of this program on one machine
# (not strictly needed)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind((defines.ALL_COAP_NODES, self.server_address[1]))
mreq = struct.pack("4sl", socket.inet_aton(defines.ALL_COAP_NODES), socket.INADDR_ANY)
self._socket.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
self._unicast_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._unicast_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._unicast_socket.bind(self.server_address)
else:
self._socket = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
# Allow multiple copies of this program on one machine
# (not strictly needed)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind((defines.ALL_COAP_NODES_IPV6, self.server_address[1]))
addrinfo_multicast = socket.getaddrinfo(defines.ALL_COAP_NODES_IPV6, 5683)[0]
group_bin = socket.inet_pton(socket.AF_INET6, addrinfo_multicast[4][0])
mreq = group_bin + struct.pack('@I', 0)
self._socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, mreq)
self._unicast_socket = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
self._unicast_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._unicast_socket.bind(self.server_address)
else:
if addrinfo[0] == socket.AF_INET: # IPv4
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
else:
self._socket = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind(self.server_address)
self.parse_config()
def parse_config(self):
"""
Parse the xml file with remote servers and discover resources on each found server.
"""
tree = ElementTree.parse(self.file_xml)
root = tree.getroot()
for server in root.findall('server'):
destination = server.text
name = server.get("name")
self.discover_remote(destination, name)
def discover_remote(self, destination, name):
"""
Discover resources on remote servers.
:param destination: the remote server (ip, port)
:type destination: tuple
:param name: the name of the remote server
:type name: String
"""
assert (isinstance(destination, str))
if destination.startswith("["):
split = destination.split("]", 1)
host = split[0][1:]
port = int(split[1][1:])
else:
split = destination.split(":", 1)
host = split[0]
port = int(split[1])
server = (host, port)
client = HelperClient(server)
response = client.discover()
client.stop()
self.discover_remote_results(response, name)
def discover_remote_results(self, response, name):
"""
Create a new remote server resource for each valid discover response.
:param response: the response to the discovery request
:param name: the server name
"""
host, port = response.source
if response.code == defines.Codes.CONTENT.number:
resource = Resource('server', self, visible=True, observable=False, allow_children=True)
self.add_resource(name, resource)
self._mapping[name] = (host, port)
self.parse_core_link_format(response.payload, name, (host, port))
else:
logger.error("Server: " + response.source + " isn't valid.")
def parse_core_link_format(self, link_format, base_path, remote_server):
"""
Parse discovery results.
:param link_format: the payload of the response to the discovery request
:param base_path: the base path used to create child resources discovered on the remote server
:param remote_server: the (ip, port) of the remote server
"""
while len(link_format) > 0:
pattern = "<([^>]*)>;"
result = re.match(pattern, link_format)
path = result.group(1)
path = path.split("/")
path = path[1:][0]
link_format = link_format[result.end(1) + 2:]
pattern = "([^<,])*"
result = re.match(pattern, link_format)
attributes = result.group(0)
dict_att = {}
if len(attributes) > 0:
attributes = attributes.split(";")
for att in attributes:
a = att.split("=")
if len(a) > 1:
dict_att[a[0]] = a[1]
else:
dict_att[a[0]] = a[0]
link_format = link_format[result.end(0) + 1:]
# TODO handle observing
resource = RemoteResource('server', remote_server, path, coap_server=self, visible=True, observable=False,
allow_children=True)
resource.attributes = dict_att
self.add_resource(base_path + "/" + path, resource)
logger.info(self.root.dump())
def purge(self):
"""
Clean old transactions
"""
while not self.stopped.isSet():
self.stopped.wait(timeout=defines.EXCHANGE_LIFETIME)
self._messageLayer.purge()
def listen(self, timeout=10):
"""
Listen for incoming messages. Timeout is used to check if the server must be switched off.
:param timeout: Socket Timeout in seconds
"""
self._socket.settimeout(float(timeout))
while not self.stopped.isSet():
try:
data, client_address = self._socket.recvfrom(4096)
except socket.timeout:
continue
try:
self.receive_datagram((data, client_address))
except RuntimeError:
print "Exception with Executor"
self._socket.close()
def close(self):
"""
Stop the server.
"""
logger.info("Stop server")
self.stopped.set()
for event in self.to_be_stopped:
event.set()
self._socket.close()
def receive_datagram(self, args):
"""
Handle messages coming from the udp socket.
:param args: (data, client_address)
"""
data, client_address = args
serializer = Serializer()
message = serializer.deserialize(data, client_address)
if isinstance(message, int):
logger.error("receive_datagram - BAD REQUEST")
rst = Message()
rst.destination = client_address
rst.type = defines.Types["RST"]
rst.code = message
self.send_datagram(rst)
return
logger.debug("receive_datagram - " + str(message))
if isinstance(message, Request):
transaction = self._messageLayer.receive_request(message)
if transaction.request.duplicated and transaction.completed:
logger.debug("message duplicated,transaction completed")
transaction = self._observeLayer.send_response(transaction)
transaction = self._blockLayer.send_response(transaction)
transaction = self._messageLayer.send_response(transaction)
self.send_datagram(transaction.response)
return
elif transaction.request.duplicated and not transaction.completed:
logger.debug("message duplicated,transaction NOT completed")
self._send_ack(transaction)
return
transaction.separate_timer = self._start_separate_timer(transaction)
transaction = self._blockLayer.receive_request(transaction)
if transaction.block_transfer:
self._stop_separate_timer(transaction.separate_timer)
transaction = self._messageLayer.send_response(transaction)
self.send_datagram(transaction.response)
return
transaction = self._observeLayer.receive_request(transaction)
"""
call to the cache layer to check if there's a cached response for the request
if not, call the forward layer
"""
if self._cacheLayer is not None:
transaction = self._cacheLayer.receive_request(transaction)
if transaction.cacheHit is False:
print transaction.request
transaction = self._forwardLayer.receive_request_reverse(transaction)
print transaction.response
transaction = self._observeLayer.send_response(transaction)
transaction = self._blockLayer.send_response(transaction)
transaction = self._cacheLayer.send_response(transaction)
else:
transaction = self._forwardLayer.receive_request_reverse(transaction)
transaction = self._observeLayer.send_response(transaction)
transaction = self._blockLayer.send_response(transaction)
self._stop_separate_timer(transaction.separate_timer)
transaction = self._messageLayer.send_response(transaction)
if transaction.response is not None:
if transaction.response.type == defines.Types["CON"]:
self._start_retrasmission(transaction, transaction.response)
self.send_datagram(transaction.response)
elif isinstance(message, Message):
transaction = self._messageLayer.receive_empty(message)
if transaction is not None:
transaction = self._blockLayer.receive_empty(message, transaction)
self._observeLayer.receive_empty(message, transaction)
else: # pragma: no cover
logger.error("Received response from %s", message.source)
def send_datagram(self, message):
"""
Send a message through the udp socket.
:type message: Message
:param message: the message to send
"""
if not self.stopped.isSet():
host, port = message.destination
logger.debug("send_datagram - " + str(message))
serializer = Serializer()
message = serializer.serialize(message)
self._socket.sendto(message, (host, port))
def add_resource(self, path, resource):
""""
Helper function to add resources to the resource directory during server initialization.
:param path: the path for the new created resource
:type resource: Resource
:param resource: the resource to be added
"""
assert isinstance(resource, Resource)
path = path.strip("/")
paths = path.split("/")
actual_path = ""
i = 0
for p in paths:
i += 1
actual_path += "/" + p
try:
res = self.root[actual_path]
except KeyError:
res = None
if res is None:
if len(paths) != i:
return False
resource.path = actual_path
self.root[actual_path] = resource
return True
def _start_retrasmission(self, transaction, message):
"""
Start the retransmission task.
:type transaction: Transaction
:param transaction: the transaction that owns the message that needs retransmission
:type message: Message
:param message: the message that needs the retransmission task
"""
with transaction:
if message.type == defines.Types['CON']:
future_time = random.uniform(defines.ACK_TIMEOUT, (defines.ACK_TIMEOUT * defines.ACK_RANDOM_FACTOR))
transaction.retransmit_thread = threading.Thread(target=self._retransmit,
args=(transaction, message, future_time, 0))
transaction.retransmit_stop = threading.Event()
self.to_be_stopped.append(transaction.retransmit_stop)
transaction.retransmit_thread.start()
def _retransmit(self, transaction, message, future_time, retransmit_count):
"""
Thread function to retransmit the message in the future
:param transaction: the transaction that owns the message that needs retransmission
:param message: the message that needs the retransmission task
:param future_time: the amount of time to wait before a new attempt
:param retransmit_count: the number of retransmissions
"""
with transaction:
while retransmit_count < defines.MAX_RETRANSMIT and (not message.acknowledged and not message.rejected) \
and not self.stopped.isSet():
transaction.retransmit_stop.wait(timeout=future_time)
if not message.acknowledged and not message.rejected and not self.stopped.isSet():
retransmit_count += 1
future_time *= 2
self.send_datagram(message)
if message.acknowledged or message.rejected:
message.timeouted = False
else:
logger.warning("Give up on message {message}".format(message=message.line_print))
message.timeouted = True
if message.observe is not None:
self._observeLayer.remove_subscriber(message)
try:
self.to_be_stopped.remove(transaction.retransmit_stop)
except ValueError:
pass
transaction.retransmit_stop = None
transaction.retransmit_thread = None
def _start_separate_timer(self, transaction):
"""
Start a thread to handle separate mode.
:type transaction: Transaction
:param transaction: the transaction that is in processing
:rtype : the Timer object
"""
t = threading.Timer(defines.ACK_TIMEOUT, self._send_ack, (transaction,))
t.start()
return t
@staticmethod
def _stop_separate_timer(timer):
"""
Stop the separate Thread if an answer has been already provided to the client.
:param timer: The Timer object
"""
timer.cancel()
def _send_ack(self, transaction):
"""
Sends an ACK message for the request.
:param transaction: the transaction that owns the request
"""
ack = Message()
ack.type = defines.Types['ACK']
if not transaction.request.acknowledged:
ack = self._messageLayer.send_empty(transaction, transaction.request, ack)
self.send_datagram(ack)
|
engine.py | # encoding: UTF-8
# ้่พพไฟกๆๆฐ่กๆ
ๅๅธๅจ
# ๅๅฏ่ตไบง
import copy
import json
import traceback
from threading import Thread
from datetime import datetime, timedelta
from time import sleep
from logging import ERROR
from pytdx.exhq import TdxExHq_API
from copy import deepcopy
from vnpy.event import EventEngine
from vnpy.trader.constant import Exchange
from vnpy.trader.engine import BaseEngine, MainEngine
from vnpy.trader.event import EVENT_TIMER
from vnpy.trader.object import TickData, SubscribeRequest
from vnpy.trader.utility import get_trading_date, get_underlying_symbol, load_json, get_real_symbol_by_exchange
from vnpy.data.tdx.tdx_common import TDX_FUTURE_HOSTS, get_future_contracts
from vnpy.component.base import (
NIGHT_MARKET_23,
NIGHT_MARKET_SQ2,
MARKET_DAY_ONLY)
from vnpy.amqp.producer import publisher
from vnpy.gateway.ctp.ctp_gateway import CtpMdApi, symbol_exchange_map
APP_NAME = 'Idx_Publisher'
class IndexTickPublisherV2(BaseEngine):
"""
ๆๆฐtickๅๅธๆๅก
้่ฟctp ่กๆ
ๆฅๅฃ๏ผ่ทๅๆๆๅ็บฆ๏ผๅนถๆ นๆฎๅ็บฆ็ไปๆ๏ผ็ๆๆๆฐtick๏ผๅๅธ่ณrabbitMQ
"""
# ----------------------------------------------------------------------
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(IndexTickPublisherV2, self).__init__(
main_engine, event_engine, APP_NAME)
self.main_engine = main_engine
self.event_engine = event_engine
self.create_logger(logger_name=APP_NAME)
self.gateway_name = 'CTP'
self.last_minute = None
self.registerEvent()
self.connection_status = False # ่ฟๆฅ็ถๆ
# ctp md api
self.subscribed_symbols = set() # ๅทฒ่ฎข้
ๅ็บฆไปฃ็
self.md_api = None # API ็่ฟๆฅไผ่ฏๅฏน่ฑก
self.last_tick_dt = {} # ่ฎฐๅฝ่ฏฅไผ่ฏๅฏน่ฑก็ๆๅไธไธชtickๆถ้ด
self.instrument_count = 50000
self.has_qry_instrument = False
# vt_setting.jsonๅ
rabbitmq้
็ฝฎ้กน
self.conf = {}
self.pub = None
self.status = {}
self.subscribed_symbols = set() # ๅทฒ่ฎข้
ๅ็บฆไปฃ็
self.ticks = {}
self.dt = datetime.now()
# ๆฌๅฐ/vnpy/data/tdx/future_contracts.json
self.all_contracts = get_future_contracts()
# ้่ฆ่ฎข้
็็ญๅ็บฆ
self.selected_underly_symbols = load_json('subscribe_symbols.json', auto_save=False)
# ็ญๅ็บฆ <=> ๆๆ็ๅฎๅ็บฆ ็ๆฐ้
self.underly_symbols_num_dict = {}
def write_error(self, content: str):
self.write_log(msg=content, level=ERROR)
def create_publisher(self, conf):
"""ๅๅปบrabbitmq ๆถๆฏๅๅธๅจ"""
if self.pub:
return
try:
self.write_log(f'ๅๅปบๅๅธๅจ:{conf}')
# ๆถๆฏๅๅธ
self.pub = publisher(host=conf.get('host', 'localhost'),
port=conf.get('port', 5672),
user=conf.get('user', 'admin'),
password=conf.get('password', 'admin'),
channel_number=conf.get('channel_number', 1),
queue_name=conf.get('queue_name', ''),
routing_key=conf.get('routing_key', 'default'),
exchange=conf.get('exchange', 'x_fanout_idx_tick'))
self.write_log(f'ๅๅปบๅๅธๅจๆๅ')
except Exception as ex:
self.write_log(u'ๅๅปบtickๅๅธๅจๅผๅธธ:{}'.format(str(ex)))
# ----------------------------------------------------------------------
def registerEvent(self):
"""ๆณจๅไบไปถ็ๅฌ"""
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
def process_timer_event(self, event):
"""ๅฎๆถๆง่ก"""
self.dt = datetime.now()
if self.last_minute and self.dt.minute == self.last_minute:
return
self.last_minute = self.dt.minute
self.check_status()
def check_status(self):
"""ๅฎๆๆฃๆฅ็ถๆ"""
if not self.md_api:
self.status.update({'con': False})
self.write_log(f'่กๆ
ๆฅๅฃๆช่ฟๆฅ')
return
dt_now = datetime.now()
# ๆซๆๅ็บฆ้
็ฝฎๆไปถ
for underly_symbol, info in self.all_contracts.items():
# ๅฆๆๆฌๅฐsubscribe_symbolsๅ
ๆๅ็บฆ็ๆๅฎ่ฎข้
ๆธ
ๅ๏ผ่ฟ่กๆ้ค ['RB','IF']
if len(self.selected_underly_symbols) > 0 and underly_symbol not in self.selected_underly_symbols:
continue
self.write_log(f'ๅฎๆถๆฃๆฅ{underly_symbol}')
# ๆฅ็ๆฐๆฎ๏ผๅค็ๆ้ดไธ่ฎข้
if dt_now.hour < 4 or dt_now.hour > 20:
if underly_symbol in MARKET_DAY_ONLY:
continue
# ่ทๅๅฝๅๆๆ็ๅ็บฆๅ่กจ
symbols = info.get('symbols', {})
total_oi = 0
if len(symbols) > 0:
total_oi = sum([v for v in symbols.values()])
# ่ทๅไบคๆๆ
exchange = info.get('exchange', 'LOCAL')
# ่ทๅๆฌๅฐ่ฎฐๅฝ็tick dict
tick_dict = self.ticks.get(underly_symbol, {})
for symbol in list(symbols.keys()):
# ๅ
จ่ทฏๅพๅ็บฆ => ๆ ๅๅ็บฆ ,ๅฆ ZC2109 => ZC109, RB2110 => rb2110
vn_symbol = get_real_symbol_by_exchange(symbol, Exchange(exchange))
if symbol.replace(underly_symbol, '') < dt_now.strftime('%Y%m%d'):
self.write_log(f'็งป้คๆฉไบๅฝๆ็ๅ็บฆ{symbol}')
symbols.pop(symbol, None)
continue
cur_oi = symbols.get(symbol,0)
if cur_oi < max(total_oi * 0.03,100):
self.write_log(f'{symbol} ไธไธไบคๆๆฅๆไป้:{cur_oi} ๅฐไบๅ็บฆๆปๆไป้{total_oi}ๅพ3% {max(total_oi * 0.03,100)},ไธ็บณๅ
ฅๆๆฐ่ฎก็ฎ่ๅด')
symbols.pop(symbol, None)
continue
# ็ๆๅธฆไบคๆๆไฟกๆฏ็ๅ็บฆ
vt_symbol = f'{vn_symbol}.{exchange}'
# symbol_exchange_mapๆฏๅ
จๅฑๅ้๏ผctp md apiไผไฝฟ็จๅฐ๏ผๆไปฅ้่ฆๆดๆฐๅ
ถ ๅ็บฆไธไบคๆๆ็ๅ
ณ็ณป
if vn_symbol not in symbol_exchange_map:
symbol_exchange_map.update({vn_symbol: Exchange(exchange)})
# ่ฏฅๅ็บฆๆฒกๆๅจ่กๆ
ไธญ๏ผ้ๆฐๅๅบ่ฎข้
if vt_symbol not in tick_dict:
req = SubscribeRequest(
symbol=vn_symbol,
exchange=Exchange(exchange)
)
self.subscribe(req)
# ็ญ็บง็ญๅ็บฆ <=> ็ๅฎๅ็บฆๆฐ้
self.underly_symbols_num_dict.update({underly_symbol: len(symbols.keys())})
def connect(self, *args, **kwargs):
"""
่ฟๆฅctp่กๆ
๏ผๅrabbitmqๆจ้
:param args:
:param kwargs:
:return:
"""
self.write_log(f'connect({kwargs}')
# ่ฟๆฅctp่กๆ
ๆๅกๅจ
md_address = kwargs.get('md_address')
userid = kwargs.get('userid')
password = kwargs.get('password')
brokerid = kwargs.get('brokerid')
if not self.md_api:
self.write_log(f'ๅๅปบctp่กๆ
ๆๅกๅจ{md_address}')
self.md_api = CtpMdApi(gateway=self)
self.md_api.connect(address=md_address,
userid=userid,
password=password,
brokerid=brokerid)
# ่ฟๆฅrabbit MQ
rabbit_config = kwargs.get('rabbit_config', {})
self.write_log(f'ๅๅปบrabbitMQ ๆถๆฏๆจ้ๆกฉ,{rabbit_config}')
self.conf.update(rabbit_config)
self.create_publisher(self.conf)
def subscribe(self, req: SubscribeRequest):
"""่ฎข้
ๅ็บฆ"""
self.write_log(f'engine:่ฎข้
ๅ็บฆ: {req.vt_symbol}')
if req.vt_symbol not in self.subscribed_symbols:
self.subscribed_symbols.add(req.vt_symbol)
if self.md_api:
self.md_api.subscribe(req)
def on_tick(self, tick):
""" tickๅฐ่พพไบไปถ"""
# ๆ้คtickๆถ้ดไธๅฝๅๆถ้ดไธไธ่ดtick
if abs((tick.datetime - self.dt).total_seconds()) > 20:
return
short_symbol = get_underlying_symbol(tick.symbol).upper()
# ๆดๆฐtick
tick_dict = self.ticks.get(short_symbol, None)
if tick_dict is None:
tick_dict = {tick.symbol: tick}
self.ticks.update({short_symbol: tick_dict})
return
# ไธๆๅ
last_dt = self.last_tick_dt.get(short_symbol, tick.datetime)
# ่ฟ่กๆๆฐๅๆ
if last_dt and tick.datetime.second != last_dt.second:
all_amount = 0
all_interest = 0
all_volume = 0
all_ask1 = 0
all_bid1 = 0
last_price = 0
ask_price_1 = 0
bid_price_1 = 0
mi_tick = None
# ๅทฒ็ป็งฏ็ดฏ็่กๆ
tickๆฐ้๏ผไธ่ถณๆปๆฐๅ1๏ผไธๅค็
n = self.underly_symbols_num_dict.get(short_symbol, 1)
if len(tick_dict) < min(n*0.8, 3) :
self.write_log(f'{short_symbol}ๅ็บฆๆฐๆฎ{len(tick_dict)}ไธ่ถณ{n} 0.8,ๆไธๅๆๆๆฐ')
return
# ่ฎก็ฎๆๆๅ็บฆ็็ดฏๅ ๆไป้ใ่ต้ใๆไบค้ใๆพๅบๆๅคงๆไป้็ไธปๅๅ็บฆ
for t in tick_dict.values():
all_interest += t.open_interest
all_amount += t.last_price * t.open_interest
all_volume += t.volume
all_ask1 += t.ask_price_1 * t.open_interest
all_bid1 += t.bid_price_1 * t.open_interest
if mi_tick is None or mi_tick.open_interest < t.open_interest:
mi_tick = t
# ๆป้ > 0
if all_interest > 0 and all_amount > 0:
last_price = round(float(all_amount / all_interest), 4)
# ๅ1ไปท
if all_ask1 > 0 and all_interest > 0:
ask_price_1 = round(float(all_ask1 / all_interest), 4)
# ไนฐ1ไปท
if all_bid1 > 0 and all_interest > 0:
bid_price_1 = round(float(all_bid1 / all_interest), 4)
if mi_tick and last_price > 0:
if self.pub:
d = copy.copy(mi_tick.__dict__)
# ๆถ้ด =ใ ๅญ็ฌฆไธฒ
if isinstance(mi_tick.datetime, datetime):
d.update({'datetime': mi_tick.datetime.strftime('%Y-%m-%d %H:%M:%S.%f')})
# ๅ้ => ๅญ็ฌฆไธฒ
d.update({'exchange': mi_tick.exchange.value})
d.update({'symbol': f'{short_symbol}99', 'vt_symbol': f'{short_symbol}99.{mi_tick.exchange.value}'})
# ๆดๆฐๆชๆๆฐ็ๆไป้ใไบคๆ้๏ผๆๅไปทๆ ผ๏ผask1๏ผbid1
d.update({'open_interest': all_interest, 'volume': all_volume,
'last_price': last_price, 'ask_price_1': ask_price_1, 'bid_price_1': bid_price_1})
#print('{} {}:{}'.format(d.get('datetime'), d.get("vt_symbol"), d.get('last_price')))
d = json.dumps(d)
self.pub.pub(d)
# ๆดๆฐๆถ้ด
self.last_tick_dt.update({short_symbol: tick.datetime})
tick_dict.update({tick.symbol: tick})
self.ticks.update({short_symbol: tick_dict})
def on_custom_tick(self, tick):
pass
class IndexTickPublisher(BaseEngine):
# ๆๆฐtickๅๅธๆๅก
# ้่ฟ้่พพไฟกๆฅๅฃ๏ผ่ทๅๆๆฐ่กๆ
tick๏ผๅๅธ่ณrabbitMQ
# ----------------------------------------------------------------------
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(IndexTickPublisher, self).__init__(
main_engine, event_engine, APP_NAME)
self.main_engine = main_engine
self.event_engine = event_engine
self.create_logger(logger_name=APP_NAME)
self.last_minute = None
self.registerEvent()
self.req_interval = 0.5 # ๆไฝ่ฏทๆฑ้ด้500ๆฏซ็ง
self.req_id = 0 # ๆไฝ่ฏทๆฑ็ผๅท
self.connection_status = False # ่ฟๆฅ็ถๆ
self.symbol_exchange_dict = {} # tdxๅ็บฆไธvnไบคๆๆ็ๅญๅ
ธ
self.symbol_market_dict = {} # tdxๅ็บฆไธtdxๅธๅบ็ๅญๅ
ธ
self.symbol_tick_dict = {} # tdxๅ็บฆไธๆๅไธไธชTickๅพๅญๅ
ธ
# self.queue = Queue() # ่ฏทๆฑ้ๅ
self.pool = None # ็บฟ็จๆฑ
self.req_thread = None # ๅฎๆถๅจ็บฟ็จ
self.ip_list = TDX_FUTURE_HOSTS
# tdx api
self.fail_ip_dict = {} # ๅคฑๆๅพAPI ็่ฟๆฅๆๅกๅจ้
็ฝฎ๏ผ IP_port: ๅ้ๅๆฐ
self.best_ip = None
self.best_port = None
self.best_name = None
self.api = None # API ็่ฟๆฅไผ่ฏๅฏน่ฑก
self.last_tick_dt = None # ่ฎฐๅฝ่ฏฅไผ่ฏๅฏน่ฑก็ๆๅไธไธชtickๆถ้ด
self.last_sort_speed_dt = None
self.instrument_count = 50000
self.has_qry_instrument = False
# vt_setting.jsonๅ
rabbitmq้
็ฝฎ้กน
self.conf = {}
self.pub = None
def write_error(self, content: str):
self.write_log(msg=content, level=ERROR)
def create_publisher(self, conf):
"""ๅๅปบrabbitmq ๆถๆฏๅๅธๅจ"""
if self.pub:
return
try:
# ๆถๆฏๅๅธ
self.pub = publisher(host=conf.get('host', 'localhost'),
port=conf.get('port', 5672),
user=conf.get('user', 'admin'),
password=conf.get('password', 'admin'),
channel_number=conf.get('channel_number', 1),
queue_name=conf.get('queue_name', ''),
routing_key=conf.get('routing_key', 'default'),
exchange=conf.get('exchange', 'x_fanout_idx_tick'))
except Exception as ex:
self.write_log(u'ๅๅปบtickๅๅธๅจๅผๅธธ:{}'.format(str(ex)))
# ----------------------------------------------------------------------
def registerEvent(self):
"""ๆณจๅไบไปถ็ๅฌ"""
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
def process_timer_event(self, event):
"""ๅฎๆถๆง่ก"""
dt = datetime.now()
if dt.minute == self.last_minute:
return
# ๆดๆฐๅคฑๆIPๅฐๅๅพcounter
for k in list(self.fail_ip_dict.keys()):
c = self.fail_ip_dict.get(k, 0)
if c <= 0:
self.fail_ip_dict.pop(k, None)
else:
c -= 1
self.fail_ip_dict.update({k: c})
self.check_status()
# ----------------------------------------------------------------------
def ping(self, ip, port=7709):
"""
ping่กๆ
ๆๅกๅจ
:param ip:
:param port:
:param type_:
:return:
"""
apix = TdxExHq_API()
__time1 = datetime.now()
try:
with apix.connect(ip, port):
if apix.get_instrument_count() > 10000:
_timestamp = (datetime.now() - __time1).total_seconds() * 1000
self.write_log('ๆๅกๅจ{}:{},่ๆถ:{}ms'.format(ip, port, _timestamp))
return _timestamp
else:
self.write_log(u'่ฏฅๆๅกๅจIP {}ๆ ๅๅบ.'.format(ip))
return timedelta(seconds=10).total_seconds() * 1000
except Exception as ex:
self.write_error(u'tdx pingๆๅกๅจ{}๏ผๅผๅธธ็ๅๅบ{}'.format(ip, str(ex)))
return timedelta(seconds=10).total_seconds() * 1000
def sort_ip_speed(self):
"""
ๅฏนๆๆๆๅกๅจ่ฟ่ก้ๅบฆๆๅบ
:return:
"""
speed_result = []
for x in self.ip_list:
speed = self.ping(x['ip'], x['port'])
x.update({'speed': speed})
speed_result.append(copy.copy(x))
# ๆดๆฐๆๅกๅจ๏ผๆ็
ง้ๅบฆๆๅบ
self.ip_list = sorted(speed_result, key=lambda s: s['speed'])
self.write_log(u'ๆๅกๅจ่ฎฟ้ฎ้ๅบฆๆๅบ:{}'.format(self.ip_list))
# ----------------------------------------------------------------------
def select_best_ip(self):
"""
้ๆฉ่กๆ
ๆๅกๅจ
:return: IPๅฐๅ๏ผ ็ซฏๅฃ๏ผ ๆๅกๅจๅ็งฐ
"""
self.write_log(u'้ๆฉ้่พพไฟก่กๆ
ๆๅกๅจ')
if self.last_sort_speed_dt is None or (datetime.now() - self.last_sort_speed_dt).total_seconds() > 60:
self.sort_ip_speed()
self.last_sort_speed_dt = datetime.now()
valid_ip_list = [x for x in self.ip_list if x.get('speed', 10000) < 10000]
if len(valid_ip_list) == 0:
self.write_error(u'ๆช่ฝๆพๅฐๅ้้ๅบฆๅพ่กๆ
ๆๅกๅจ')
return None, None, None
for server in valid_ip_list:
ip = server.get('ip')
port = server.get('port')
name = server.get('name', '{}:{}'.format(ip, port))
if '{}:{}'.format(ip, port) in self.fail_ip_dict:
self.write_log(u'{}:{}ๅฑไบไธๆฌกๅผๅธธIPๅฐๅ๏ผๅฟฝ็ฅ'.format(ip, port))
continue
return ip, port, name
return None, None, None
def connect(self, rabbit_config: dict):
"""
่ฟๆฅ้่พพ่ฎฏ่กๆ
ๆๅกๅจ
:param n:
:return:
"""
if self.connection_status:
if self.api is not None or getattr(self.api, "client", None) is not None:
self.write_log(u'ๅฝๅๅทฒ็ป่ฟๆฅ,ไธ้่ฆ้ๆฐ่ฟๆฅ')
return
self.write_log(u'ๅผๅง้่พพไฟก่กๆ
ๆๅกๅจ')
try:
self.api = TdxExHq_API(heartbeat=True, auto_retry=True, raise_exception=True)
# ้ๅๆไฝณๆๅกๅจ
self.best_ip, self.best_port, self.best_name = self.select_best_ip()
if self.best_ip is None or self.best_port is None:
self.write_error(u'ๆช่ฝ้ๆฉๅฐๆๅกๅจ')
self.write_log(u'api ้ๆฉ {}: {}:{}'.format(self.best_name, self.best_ip, self.best_port))
self.api.connect(self.best_ip, self.best_port)
# ๅฐ่ฏ่ทๅๅธๅบๅ็บฆ็ป่ฎก
c = self.api.get_instrument_count()
if c is None or c < 10:
err_msg = u'่ฏฅๆๅกๅจIP {}/{}ๆ ๅๅบ'.format(self.best_ip, self.best_port)
self.fail_ip_dict.update({'{}:{}'.format(self.best_ip, self.best_port): 10})
self.write_error(err_msg)
else:
self.write_log(u'ๅๅปบtdx่ฟๆฅ')
self.last_tick_dt = datetime.now()
self.connection_status = True
self.instrument_count = c
except Exception as ex:
self.write_error(u'่ฟๆฅๆๅกๅจtdxๅผๅธธ:{},{}'.format(str(ex), traceback.format_exc()))
self.fail_ip_dict.update({'{}:{}'.format(self.best_ip, self.best_port): 10})
return
# ๆดๆฐ symbol_exchange_dict , symbol_market_dict
self.write_log(u'ๆฅ่ฏขๅ็บฆ')
self.qry_instrument()
self.conf.update(rabbit_config)
self.create_publisher(self.conf)
self.req_thread = Thread(target=self.run)
self.req_thread.start()
def reconnect(self):
"""
้่ฟ
:return:
"""
try:
self.best_ip, self.best_port, self.best_name = self.select_best_ip()
self.api = TdxExHq_API(heartbeat=True, auto_retry=True)
self.api.connect(self.best_ip, self.best_port)
# ๅฐ่ฏ่ทๅๅธๅบๅ็บฆ็ป่ฎก
c = self.api.get_instrument_count()
if c is None or c < 10:
err_msg = u'่ฏฅๆๅกๅจIP {}/{}ๆ ๅๅบ'.format(self.best_ip, self.best_port)
self.fail_ip_dict.update({'{}:{}'.format(self.best_ip, self.best_port): 10})
self.write_error(err_msg)
else:
self.write_log(u'้ๆฐๅๅปบtdx่ฟๆฅ')
sleep(1)
except Exception as ex:
self.write_error(u'้ๆฐ่ฟๆฅๆๅกๅจๅผๅธธ:{},{}'.format(str(ex), traceback.format_exc()))
self.fail_ip_dict.update({'{}:{}'.format(self.best_ip, self.best_port): 10})
return
def close(self):
"""้ๅบAPI"""
self.write_log(u'้ๅบtdx API')
self.connection_status = False
if self.req_thread is not None:
self.write_log(u'้ๅบ่ฏทๆฑ็บฟ็จ')
self.req_thread.join()
if self.pub:
self.write_log(u'้ๅบrabbitMQ ๅๅธๅจ')
self.pub.exit()
def check_status(self):
self.write_log(u'ๆฃๆฅtdxๆฅๅฃ็ถๆ')
if len(self.symbol_tick_dict) > 0:
k = self.symbol_tick_dict.keys()[0]
tick = self.symbol_tick_dict.get(k, None)
if tick:
self.write_log(f'{tick.vt_symbol}: {tick.datetime}, price:{tick.last_price}')
else:
self.write_log(f'็ฎๅๆฒกๆๆถๅฐtick')
# ่ฅ่ฟๆฒกๆๅฏๅจ่ฟๆฅ๏ผๅฐฑๅฏๅจ่ฟๆฅ
over_time = self.last_tick_dt is None or (datetime.now() - self.last_tick_dt).total_seconds() > 60
if not self.connection_status or self.api is None or over_time:
self.write_log(u'tdx่ฟๆฒกๆๅฏๅจ่ฟๆฅ๏ผๅฐฑๅฏๅจ่ฟๆฅ')
self.close()
self.api = None
self.reconnect()
else:
self.write_log(u'tdxๆฅๅฃ็ถๆๆญฃๅธธ')
def qry_instrument(self):
"""
ๆฅ่ฏข/ๆดๆฐๅ็บฆไฟกๆฏ
:return:
"""
if not self.connection_status:
self.write_error(u'tdx่ฟๆฅ็ถๆไธบๆญๅผ๏ผไธ่ฝๆฅ่ฏขๅๆดๆฐๅ็บฆไฟกๆฏ')
return
if self.has_qry_instrument:
self.write_error(u'ๅทฒ็ปๆฅ่ฏข่ฟไธๆฌกๅ็บฆไฟกๆฏ๏ผไธๅๆฅ่ฏข')
return
# ๅๅพๆๆ็ๅ็บฆไฟกๆฏ
num = self.api.get_instrument_count()
if not isinstance(num, int):
return
all_contacts = sum(
[self.api.get_instrument_info((int(num / 500) - i) * 500, 500) for i in range(int(num / 500) + 1)], [])
# [{"category":category,"market": int,"code":sting,"name":string,"desc":string},{}]
# ๅฏนๆๆๅ็บฆๅค็๏ผๆดๆฐๅญๅ
ธ ๆๆฐๅ็บฆ-tdxๅธๅบ๏ผๆๆฐๅ็บฆ-ไบคๆๆ
for tdx_contract in all_contacts:
tdx_symbol = tdx_contract.get('code', None)
if tdx_symbol is None or tdx_symbol[-2:] not in ['L9']:
continue
tdx_market_id = tdx_contract.get('market')
self.symbol_market_dict[tdx_symbol] = tdx_market_id
if tdx_market_id == 47: # ไธญ้ๆ
self.symbol_exchange_dict[tdx_symbol] = Exchange.CFFEX
elif tdx_market_id == 28: # ้ๅๆ
self.symbol_exchange_dict[tdx_symbol] = Exchange.CZCE
elif tdx_market_id == 29: # ๅคงๅๆ
self.symbol_exchange_dict[tdx_symbol] = Exchange.DCE
elif tdx_market_id == 30: # ไธๆๆ+่ฝๆบ
self.symbol_exchange_dict[tdx_symbol] = Exchange.SHFE
elif tdx_market_id == 60: # ไธปๅๅ็บฆ
self.write_log(u'ไธปๅๅ็บฆ:{}'.format(tdx_contract))
self.has_qry_instrument = True
def run(self):
# ็ๆฌ3 ๏ผ็ดๆฅๆฅ่ฏขๆฟๅ
try:
last_dt = datetime.now()
self.write_log(u'ๅผๅง่ฟ่กtdx,{}'.format(last_dt))
while self.connection_status:
try:
self.process_index_req()
except BrokenPipeError as bex:
self.write_error(u'BrokenPipeError{},้่ฏ้่ฟtdx[{}]'.format(str(bex), 0))
self.fail_ip_dict.update({'{}:{}'.format(self.best_ip, self.best_port): 10})
self.reconnect()
sleep(5)
break
except Exception as ex:
self.write_error(u'tdx exception:{},{}'.format(str(ex), traceback.format_exc()))
self.fail_ip_dict.update({'{}:{}'.format(self.best_ip, self.best_port): 10})
self.reconnect()
sleep(self.req_interval)
dt = datetime.now()
if last_dt.minute != dt.minute:
self.write_log('tdxcheck point. {},last_tick_dt:{}'.format(dt, self.last_tick_dt))
last_dt = dt
except Exception as ex:
self.write_error(u'tdx pool.run exception:{},{}'.format(str(ex), traceback.format_exc()))
self.write_error(u'tdx ็บฟ็จ {}้ๅบ'.format(datetime.now()))
def process_index_req(self):
"""ๅค็ๆฟๅ่ทๅๆๆฐ่กๆ
tick"""
# ่ทๅ้่พพไฟกๆๆฐๆฟๅๆๆ่กๆ
rt_list = self.api.get_instrument_quote_list(42, 3, 0, 100)
if rt_list is None or len(rt_list) == 0:
self.write_log(u'tdx:get_instrument_quote_list() rt_listไธบ็ฉบ')
return
# ่ฎฐๅฝ่ฏฅๆฅๅฃ็่กๆ
ๆๅๆดๆฐๆถ้ด
self.last_tick_dt = datetime.now()
for d in list(rt_list):
tdx_symbol = d.get('code', None)
if tdx_symbol.endswith('L9'):
vn_symbol = tdx_symbol.replace('L9', '99').upper()
else:
vn_symbol = tdx_symbol.upper()
tick_datetime = datetime.now()
# ไฟฎๆญฃๆฏซ็ง
last_tick = self.symbol_tick_dict.get(vn_symbol, None)
if (last_tick is not None) and tick_datetime.replace(microsecond=0) == last_tick.datetime:
# ไธไธไธไธชtick็ๆถ้ด๏ผๅป้คๆฏซ็งๅ๏ผ็ธๅ,ไฟฎๆนไธบ500ๆฏซ็ง
tick_datetime = tick_datetime.replace(microsecond=500)
else:
tick_datetime = tick_datetime.replace(microsecond=0)
# ้่พพไฟกไธ่ฝๆบ็ไบคๆๆไธบไธๆๆ๏ผ้่ฆๆนๆญฃ่ฟๆฅ
if vn_symbol in ['NR99', 'SC99']:
exchange = Exchange.INE
else:
exchange = self.symbol_exchange_dict.get(tdx_symbol, Exchange.LOCAL)
tick = TickData(
gateway_name='tdx',
symbol=vn_symbol,
datetime=tick_datetime,
exchange=exchange
)
tick.pre_close = float(d.get('ZuoJie', 0.0))
tick.high_price = float(d.get('ZuiGao', 0.0))
tick.open_price = float(d.get('JinKai', 0.0))
tick.low_price = float(d.get('ZuiDi', 0.0))
tick.last_price = float(d.get('MaiChu', 0.0))
tick.volume = int(d.get('XianLiang', 0))
tick.open_interest = d.get('ChiCangLiang')
tick.time = tick.datetime.strftime('%H:%M:%S.%f')[0:12]
tick.date = tick.datetime.strftime('%Y-%m-%d')
tick.trading_day = get_trading_date(tick_datetime)
# ๆๆฐๆฒกๆๆถจๅๅ่ทๅ๏ผๅฐฑ็จๆจๆฅๆถ็ไปทๆญฃ่ด10%
tick.limit_up = tick.pre_close * 1.1
tick.limit_down = tick.pre_close * 0.9
# CTPๅชๆไธๆกฃ่กๆ
tick.bid_price_1 = float(d.get('MaiRuJia', 0.0))
tick.bid_volume_1 = int(d.get('MaiRuLiang', 0))
tick.ask_price_1 = float(d.get('MaiChuJia', 0.0))
tick.ask_volume_1 = int(d.get('MaiChuLiang', 0))
underlying_symbol = vn_symbol.replace('99', '').upper()
# ๆ้ค้ไบคๆๆถ้ดๅพtick
if tick.exchange is Exchange.CFFEX:
if tick.datetime.hour not in [9, 10, 11, 13, 14, 15]:
continue
if tick.datetime.hour == 9 and tick.datetime.minute < 15:
continue
# ๆ้คๆฉ็ 11:30~12:00
if tick.datetime.hour == 11 and tick.datetime.minute >= 30:
continue
if tick.datetime.hour == 15 and tick.datetime.minute >= 15 and underlying_symbol in ['T', 'TF', 'TS']:
continue
if tick.datetime.hour == 15 and underlying_symbol in ['IH', 'IF', 'IC']:
continue
else: # ๅคงๅๆ/้ๅๆ๏ผไธๆๆ๏ผไธๆตท่ฝๆบ
# ๆ้ค้ๅผ็ๅฐๆถ
if tick.datetime.hour in [3, 4, 5, 6, 7, 8, 12, 15, 16, 17, 18, 19, 20]:
continue
# ๆ้คๆฉ็ 10:15~10:30
if tick.datetime.hour == 10 and 15 <= tick.datetime.minute < 30:
continue
# ๆ้คๆฉ็ 11:30~12:00
if tick.datetime.hour == 11 and tick.datetime.minute >= 30:
continue
# ๆ้คๅ็ 13:00 ~13:30
if tick.datetime.hour == 13 and tick.datetime.minute < 30:
continue
# ๆ้คๅๆจ2:30~3:00
if tick.datetime.hour == 2 and tick.datetime.minute >= 30:
continue
# ๆ้คๅคงๅๆ/้ๅๆ/ไธๆๆๅค็ๆฐๆฎไธๆๆๅค็ๆฐๆฎ 23:00 ๆถ็
if underlying_symbol in NIGHT_MARKET_23:
if tick.datetime.hour in [23, 0, 1, 2]:
continue
# ๆ้คไธๆๆๅค็ๆฐๆฎ 1:00 ๆถ็
if underlying_symbol in NIGHT_MARKET_SQ2:
if tick.datetime.hour in [1, 2]:
continue
# ๆ้คๆฅ็ๅ็บฆๅจๅค็ๅพๆฐๆฎ
if underlying_symbol in MARKET_DAY_ONLY and (tick.datetime.hour < 9 or tick.datetime.hour > 16):
# self.write_log(u'ๆ้คๆฅ็ๅ็บฆ{}ๅจๅค็ๅพๆฐๆฎ'.format(short_symbol))
continue
self.symbol_tick_dict[tick.symbol] = tick
if self.pub:
d = copy.copy(tick.__dict__)
if isinstance(tick.datetime, datetime):
d.update({'datetime': tick.datetime.strftime('%Y-%m-%d %H:%M:%S.%f')})
d.update({'exchange': tick.exchange.value})
d = json.dumps(d)
self.pub.pub(d)
|
Chap10_Example10.33.py | from threading import *
class abc:
def __init__(self, seat_available):
self.seat_available = seat_available
self.mylock = Lock()
def abc_reserveseat(self, seat_required):
self.mylock.acquire()
print("Number of seats remaining : ", self.seat_available)
if self.seat_available >= seat_required:
print(f"{current_thread().name} was alloted the seat No. L{self.seat_available}")
self.seat_available = self.seat_available - 1
else:
print("All the seats are booked now Sorry !")
self.mylock.release()
obj_abc = abc(2)
myt1 = Thread(target=obj_abc.abc_reserveseat, args=(1,), name='Saurabh')
myt2 = Thread(target=obj_abc.abc_reserveseat, args=(1,), name='Nilesh')
myt3 = Thread(target=obj_abc.abc_reserveseat, args=(1,), name='Divya')
myt1.start()
myt2.start()
myt3.start()
myt1.join()
myt2.join()
myt3.join()
print("Main Thread")
|
power_monitoring.py | import random
import threading
import time
from statistics import mean
from typing import Optional
from cereal import log
from common.params import Params, put_nonblocking
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE
from selfdrive.swaglog import cloudlog
from selfdrive.statsd import statlog
import os
CAR_VOLTAGE_LOW_PASS_K = 0.091 # LPF gain for 5s tau (dt/tau / (dt/tau + 1))
# A C2 uses about 1W while idling, and 30h seens like a good shutoff for most cars
# While driving, a battery charges completely in about 30-60 minutes
CAR_BATTERY_CAPACITY_uWh = 30e6
CAR_CHARGING_RATE_W = 45
VBATT_PAUSE_CHARGING = 11.0 # Lower limit on the LPF car battery voltage
VBATT_INSTANT_PAUSE_CHARGING = 7.0 # Lower limit on the instant car battery voltage measurements to avoid triggering on instant power loss
MAX_TIME_OFFROAD_S = 0.1*3600
MIN_ON_TIME_S = 3600
class PowerMonitoring:
def __init__(self):
self.params = Params()
self.last_measurement_time = None # Used for integration delta
self.last_save_time = 0 # Used for saving current value in a param
self.power_used_uWh = 0 # Integrated power usage in uWh since going into offroad
self.next_pulsed_measurement_time = None
self.car_voltage_mV = 12e3 # Low-passed version of peripheralState voltage
self.car_voltage_instant_mV = 12e3 # Last value of peripheralState voltage
self.integration_lock = threading.Lock()
self.is_oneplus = os.path.isfile('/ONEPLUS')
car_battery_capacity_uWh = self.params.get("CarBatteryCapacity")
if car_battery_capacity_uWh is None:
car_battery_capacity_uWh = 0
# Reset capacity if it's low
self.car_battery_capacity_uWh = max((CAR_BATTERY_CAPACITY_uWh / 10), int(car_battery_capacity_uWh))
# Calculation tick
def calculate(self, peripheralState, ignition):
try:
now = sec_since_boot()
# If peripheralState is None, we're probably not in a car, so we don't care
if peripheralState is None or peripheralState.pandaType == log.PandaState.PandaType.unknown:
with self.integration_lock:
self.last_measurement_time = None
self.next_pulsed_measurement_time = None
self.power_used_uWh = 0
return
# Low-pass battery voltage
self.car_voltage_instant_mV = peripheralState.voltage
self.car_voltage_mV = ((peripheralState.voltage * CAR_VOLTAGE_LOW_PASS_K) + (self.car_voltage_mV * (1 - CAR_VOLTAGE_LOW_PASS_K)))
statlog.gauge("car_voltage", self.car_voltage_mV / 1e3)
# Cap the car battery power and save it in a param every 10-ish seconds
self.car_battery_capacity_uWh = max(self.car_battery_capacity_uWh, 0)
self.car_battery_capacity_uWh = min(self.car_battery_capacity_uWh, CAR_BATTERY_CAPACITY_uWh)
if now - self.last_save_time >= 10:
put_nonblocking("CarBatteryCapacity", str(int(self.car_battery_capacity_uWh)))
self.last_save_time = now
# First measurement, set integration time
with self.integration_lock:
if self.last_measurement_time is None:
self.last_measurement_time = now
return
if ignition:
# If there is ignition, we integrate the charging rate of the car
with self.integration_lock:
self.power_used_uWh = 0
integration_time_h = (now - self.last_measurement_time) / 3600
if integration_time_h < 0:
raise ValueError(f"Negative integration time: {integration_time_h}h")
self.car_battery_capacity_uWh += (CAR_CHARGING_RATE_W * 1e6 * integration_time_h)
self.last_measurement_time = now
else:
# No ignition, we integrate the offroad power used by the device
is_uno = peripheralState.pandaType == log.PandaState.PandaType.uno
# Get current power draw somehow
current_power = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
if current_power is not None:
pass
elif (self.next_pulsed_measurement_time is not None) and (self.next_pulsed_measurement_time <= now):
# TODO: Figure out why this is off by a factor of 3/4???
FUDGE_FACTOR = 1.33
# Turn off charging for about 10 sec in a thread that does not get killed on SIGINT, and perform measurement here to avoid blocking thermal
def perform_pulse_measurement(now):
try:
HARDWARE.set_battery_charging(False)
time.sleep(5)
# Measure for a few sec to get a good average
voltages = []
currents = []
for _ in range(6):
voltages.append(HARDWARE.get_battery_voltage())
currents.append(HARDWARE.get_battery_current())
time.sleep(1)
current_power = ((mean(voltages) / 1000000) * (mean(currents) / 1000000))
self._perform_integration(now, current_power * FUDGE_FACTOR)
# Enable charging again
HARDWARE.set_battery_charging(True)
except Exception:
cloudlog.exception("Pulsed power measurement failed")
# Start pulsed measurement and return
threading.Thread(target=perform_pulse_measurement, args=(now,)).start()
self.next_pulsed_measurement_time = None
return
elif self.next_pulsed_measurement_time is None and not is_uno:
# On a charging EON with black panda, or drawing more than 400mA out of a white/grey one
# Only way to get the power draw is to turn off charging for a few sec and check what the discharging rate is
# We shouldn't do this very often, so make sure it has been some long-ish random time interval
self.next_pulsed_measurement_time = now + random.randint(120, 180)
return
else:
# Do nothing
return
# Do the integration
self._perform_integration(now, current_power)
except Exception:
cloudlog.exception("Power monitoring calculation failed")
def _perform_integration(self, t: float, current_power: float) -> None:
with self.integration_lock:
try:
if self.last_measurement_time:
integration_time_h = (t - self.last_measurement_time) / 3600
power_used = (current_power * 1000000) * integration_time_h
if power_used < 0:
raise ValueError(f"Negative power used! Integration time: {integration_time_h} h Current Power: {power_used} uWh")
self.power_used_uWh += power_used
self.car_battery_capacity_uWh -= power_used
self.last_measurement_time = t
except Exception:
cloudlog.exception("Integration failed")
# Get the power usage
def get_power_used(self) -> int:
return int(self.power_used_uWh)
def get_car_battery_capacity(self) -> int:
return int(self.car_battery_capacity_uWh)
# See if we need to disable charging
def should_disable_charging(self, ignition: bool, in_car: bool, offroad_timestamp: Optional[float]) -> bool:
if offroad_timestamp is None:
return False
now = sec_since_boot()
disable_charging = False
disable_charging |= (now - offroad_timestamp) > MAX_TIME_OFFROAD_S
disable_charging |= (self.car_voltage_mV < (VBATT_PAUSE_CHARGING * 1e3)) and (self.car_voltage_instant_mV > (VBATT_INSTANT_PAUSE_CHARGING * 1e3))
disable_charging |= (self.car_battery_capacity_uWh <= 0)
disable_charging &= not ignition
disable_charging &= (not self.params.get_bool("DisablePowerDown"))
disable_charging &= in_car
disable_charging |= self.params.get_bool("ForcePowerDown")
return disable_charging
# See if we need to shutdown
def should_shutdown(self, peripheralState, ignition, in_car, offroad_timestamp, started_seen):
if offroad_timestamp is None:
return False
now = sec_since_boot()
panda_charging = (peripheralState.usbPowerMode != log.PeripheralState.UsbPowerMode.client)
BATT_PERC_OFF = 3 if self.is_oneplus else 10
should_shutdown = False
# Wait until we have shut down charging before powering down
should_shutdown |= (not panda_charging and self.should_disable_charging(ignition, in_car, offroad_timestamp))
should_shutdown |= ((HARDWARE.get_battery_capacity() < BATT_PERC_OFF) and (not HARDWARE.get_battery_charging()) and ((now - offroad_timestamp) > 60))
should_shutdown &= started_seen or (now > MIN_ON_TIME_S)
return should_shutdown
|
__init__.py | """Support for functionality to download files."""
import logging
import os
import re
import threading
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.util import sanitize_filename
_LOGGER = logging.getLogger(__name__)
ATTR_FILENAME = 'filename'
ATTR_SUBDIR = 'subdir'
ATTR_URL = 'url'
ATTR_OVERWRITE = 'overwrite'
CONF_DOWNLOAD_DIR = 'download_dir'
DOMAIN = 'downloader'
DOWNLOAD_FAILED_EVENT = 'download_failed'
DOWNLOAD_COMPLETED_EVENT = 'download_completed'
SERVICE_DOWNLOAD_FILE = 'download_file'
SERVICE_DOWNLOAD_FILE_SCHEMA = vol.Schema({
vol.Required(ATTR_URL): cv.url,
vol.Optional(ATTR_SUBDIR): cv.string,
vol.Optional(ATTR_FILENAME): cv.string,
vol.Optional(ATTR_OVERWRITE, default=False): cv.boolean,
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_DOWNLOAD_DIR): cv.string,
}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Listen for download events to download files."""
download_path = config[DOMAIN][CONF_DOWNLOAD_DIR]
# If path is relative, we assume relative to HASS config dir
if not os.path.isabs(download_path):
download_path = hass.config.path(download_path)
if not os.path.isdir(download_path):
_LOGGER.error(
"Download path %s does not exist. File Downloader not active",
download_path)
return False
def download_file(service):
"""Start thread to download file specified in the URL."""
def do_download():
"""Download the file."""
try:
url = service.data[ATTR_URL]
subdir = service.data.get(ATTR_SUBDIR)
filename = service.data.get(ATTR_FILENAME)
overwrite = service.data.get(ATTR_OVERWRITE)
if subdir:
subdir = sanitize_filename(subdir)
final_path = None
req = requests.get(url, stream=True, timeout=10)
if req.status_code != 200:
_LOGGER.warning(
"downloading '%s' failed, status_code=%d",
url,
req.status_code)
else:
if filename is None and \
'content-disposition' in req.headers:
match = re.findall(r"filename=(\S+)",
req.headers['content-disposition'])
if match:
filename = match[0].strip("'\" ")
if not filename:
filename = os.path.basename(url).strip()
if not filename:
filename = 'ha_download'
# Remove stuff to ruin paths
filename = sanitize_filename(filename)
# Do we want to download to subdir, create if needed
if subdir:
subdir_path = os.path.join(download_path, subdir)
# Ensure subdir exist
if not os.path.isdir(subdir_path):
os.makedirs(subdir_path)
final_path = os.path.join(subdir_path, filename)
else:
final_path = os.path.join(download_path, filename)
path, ext = os.path.splitext(final_path)
# If file exist append a number.
# We test filename, filename_2..
if not overwrite:
tries = 1
final_path = path + ext
while os.path.isfile(final_path):
tries += 1
final_path = "{}_{}.{}".format(path, tries, ext)
_LOGGER.debug("%s -> %s", url, final_path)
with open(final_path, 'wb') as fil:
for chunk in req.iter_content(1024):
fil.write(chunk)
_LOGGER.debug("Downloading of %s done", url)
hass.bus.fire(
"{}_{}".format(DOMAIN, DOWNLOAD_COMPLETED_EVENT), {
'url': url,
'filename': filename
})
except requests.exceptions.ConnectionError:
_LOGGER.exception("ConnectionError occurred for %s", url)
hass.bus.fire(
"{}_{}".format(DOMAIN, DOWNLOAD_FAILED_EVENT), {
'url': url,
'filename': filename
})
# Remove file if we started downloading but failed
if final_path and os.path.isfile(final_path):
os.remove(final_path)
threading.Thread(target=do_download).start()
hass.services.register(DOMAIN, SERVICE_DOWNLOAD_FILE, download_file,
schema=SERVICE_DOWNLOAD_FILE_SCHEMA)
return True
|
main.py | import kivy
from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
from kivy.core.audio import SoundLoader
from kivy.clock import Clock
from functools import partial
import numpy as np
import cv2
import sys,os, cPickle
from threading import Thread
from levels import main_game,original_game
import datetime
import MUSE_server as mps
from kivy.config import Config
import keyboard
kivy.require('1.9.0')
def SelfReport(path):
global server,round_set,user_id, round_id, quit
fatigue_history = []
fatigue = []
round_id = None
while (True): # making a loop
if keyboard.is_pressed('a'): # if key 'q' is pressed
fatigue.append(round_id-1)
if quit:
list_fatigue = sorted(list(set(fatigue)))
fatigue_score = list(np.arange(len(list_fatigue)))
for i in range(len(list_fatigue)):
if i == 0:
fatigue_history += [fatigue_score[i]] * (list_fatigue[i])
elif i < len(list_fatigue)-1:
fatigue_history += [fatigue_score[i]] * (list_fatigue[i]-list_fatigue[i-1])
else:
fatigue_history += [fatigue_score[-1]]*(round_id-len(fatigue_history))
if not list_fatigue:
fatigue_history = [0]*round_id
with open(path + '.csv', 'w') as f:
for i in fatigue_history:
f.write((str(i)+'\n'))
f.close()
break
def readMuse(path):
global server,round_set,user_id, round_id, quit
intro = open('test', 'w')
server = mps.initialize(intro)
server.start()
round_id = None
prev_round = round_id
while(True):
if round_id != prev_round:
eeg_name = ('/').join((path,str(round_set) + "_" + str(round_id)))
out = open(eeg_name, 'w')
server.f = out
prev_round = round_id
if quit:
server.stop()
break
def readFrames(path):
global round_set,user_id, round_id, quit,modality, store_data_path
frameCounter=1
cap = cv2.VideoCapture(0)
frame_struct = []
while(True):
ret, frame = cap.read()
if frameCounter%10 == 0:
#cv2.imshow('frame',frame)
if round_set > len(frame_struct)-1:
frame_struct.append([])
frame_struct[round_set].append(frame)
fname = str(round_set)+"_"+str(round_id)+"_"+str(frameCounter)+"_"+str(datetime.datetime.time(datetime.datetime.now()))+".jpg"
cv2.imwrite(os.path.join(path, fname), frame)
frameCounter+=1
if quit:
filename = store_data_path+"/images/user_"+user_id+'_'+modality+"/data"
np.save(filename,frame_struct)
break
# When everything done, release the capture
cap.release()
sys.exit()
#cv2.destroyAllWindows()
# Initialize Variables
class WisconsinGame(FloatLayout):
def __init__(self, **kwargs):
super(WisconsinGame, self).__init__(**kwargs)
global round_set,round_id,modality,game_type
round_set = 0
self.trial = 0
round_id = self.trial
if game_type =='o':
self.levels_all = original_game()
else:
self.levels_all = main_game()
#print "LEVELS:",self.levels_all
self.countdown_time = 6 #seconds (1 -->7)
self.level_change_ratio = 4 #trials to change the level
self.total_trials = len(self.levels_all) * self.level_change_ratio#66 #total trials
self.score = 0
self.score_total = 0
self.question_in_level = 0
self.data = []
self.choice = ""
self.clock = 0
self.correct = 0
#self.errors = 0
self.rule_change_round = 1
self.persistent_errors = 0
self.non_persistent_errors = 0
self.error_persistant = 0
global response_given
response_given = False
self.valid_response = False
self.commands_all = {}
self.commands_all['color'] = ["red","green","blue","yellow","magenta"]
self.commands_all['number'] = ["one","two","three","four","five"]
self.commands_all['shape'] = ["circle","triangle","cross","star","heart"]
self.commands = {}
self.ids['b1'].disabled = True
self.ids['b2'].disabled = True
self.ids['b3'].disabled = True
self.ids['b4'].disabled = True
self.ids['b5'].disabled = True
self.level = 0 #gamelevel
self.prev_level = None
self.prev_ids = None
self.buttons_disabled = []
self.level_change()
self.major_modality = modality#self.modalities[np.random.randint(3)]
self.major_stimuli = np.random.permutation(self.commands.keys())[0]
self.stimuli_type = ''
self.next_round("")
#Evaluate answer according to current major modality
def check_result(self):
if self.stimuli_type in ['red','circle','one'] and self.choice == "b1":
self.correct += 1
self.valid_response = True
self.error_persistant = 0
elif self.stimuli_type in ['green','triangle','two'] and self.choice == "b2":
self.correct += 1
self.valid_response = True
self.error_persistant = 0
elif self.stimuli_type in ['blue','cross','three'] and self.choice == "b3":
self.correct += 1
self.valid_response = True
self.error_persistant = 0
elif self.stimuli_type in ['yellow','star','four'] and self.choice == "b4":
self.correct += 1
self.valid_response = True
self.error_persistant = 0
elif self.stimuli_type in ['magenta','heart','five'] and self.choice == "b5":
self.correct += 1
self.valid_response = True
self.error_persistant = 0
else:
self.valid_response = False
#self.errors+=1
if self.trial == self.rule_change_round +1 or self.trial == self.rule_change_round:
self.non_persistent_errors +=1
self.error_persistant = 1
else:
self.persistent_errors +=1
self.error_persistant = 2
if self.valid_response == True and self.question_in_level >2:
#print "Clock",self.clock
self.score = float(self.level+1)/float((self.clock+1)*(self.question_in_level-2))
self.score_total += self.score
#print "SCORE", self.score
#print "SCORE TOTAL", self.score_total
else:
self.score = 0
#Change object's opacity
def chage_opacity(self,wid_id,_):
self.ids[wid_id].opacity = 0
#Give audiovisual feedback
def feedback(self):
if self.valid_response == True:
self.ids['feedback'].source = "../AppData/correct.png"
sound = SoundLoader.load('../AppData/'+'correct.wav')
sound.play()
else:
self.ids['feedback'].source = "../AppData/wrong.png"
sound = SoundLoader.load('../AppData/'+'wrong.wav')
sound.play()
self.ids['feedback'].opacity = 1
Clock.schedule_once(partial(self.chage_opacity,'feedback'), 1)
#Capture when a button is pressed
def button_pressed(self):
global response_given
response_given = True
#Start countdouwn
def countdown(self,_):
global response_given
if self.clock > self.countdown_time and response_given==False:
self.ids['b1'].disabled = True
self.ids['b2'].disabled = True
self.ids['b3'].disabled = True
self.ids['b4'].disabled = True
self.ids['b5'].disabled = True
self.ids['b5'].disabled = True# give error when predifined time has passed and terminate current timer
self.valid_response = False
self.persistent_errors += 1
self.error_persistant = 2
self.feedback()
#self.data.append([self.trial, self.question_in_level,self.level+1, self.score, self.major_modality,self.major_stimuli, self.stimuli_type,self.valid_response, self.error_persistant ,self.clock+1, self.choice,self.perm[0],self.perm[1],self.perm[2],self.audio,self.text,self.visual,self.correct,self.non_persistent_errors, self.persistent_errors])
self.data.append([self.trial, self.question_in_level,self.level+1, self.score,self.major_stimuli, self.stimuli_type, self.valid_response, self.error_persistant ,self.clock+1, self.correct,self.non_persistent_errors, self.persistent_errors])
if self.trial == self.total_trials:
#if not self.levels_all:
Clock.schedule_once(self.log_and_terminate, 1.5)
Clock.schedule_once(self.next_round, 1.5)
return False
if response_given==True: #terminate timer when answer is given in time
return False
self.clock += 1
#print self.clock
def level_change(self):
#print self.commands_all
# CHANGE LEVELS RANDOMLY
#self.level = 0
#while self.level == 0: #disable level0
#self.level = np.random.randint(5)
# CHOOSE FROM PRE-SELCTED LEVEL SEQUENCES
self.level = self.levels_all.pop(0)
if self.prev_level == self.level:
ids = self.prev_ids
else:
ids = np.random.permutation(5)
self.prev_ids = ids
self.prev_level = self.level
self.commands = dict(self.commands_all)
if self.level == 3:
self.commands['color'] = [self.commands_all['color'][ids[0]],self.commands_all['color'][ids[1]],self.commands_all['color'][ids[2]],self.commands_all['color'][ids[3]]]
self.commands['shape'] = [self.commands_all['shape'][ids[0]],self.commands_all['shape'][ids[1]],self.commands_all['shape'][ids[2]],self.commands_all['shape'][ids[3]]]
self.commands['number'] = [self.commands_all['number'][ids[0]],self.commands_all['number'][ids[1]],self.commands_all['number'][ids[2]],self.commands_all['number'][ids[3]]]
self.ids['b'+str(ids[4]+1)].disabled = True
self.buttons_disabled = ['b'+str(ids[4]+1)]
elif self.level == 2:
self.commands['color'] = [self.commands_all['color'][ids[0]],self.commands_all['color'][ids[1]],self.commands_all['color'][ids[2]]]
self.commands['shape'] = [self.commands_all['shape'][ids[0]],self.commands_all['shape'][ids[1]],self.commands_all['shape'][ids[2]]]
self.commands['number'] = [self.commands_all['number'][ids[0]],self.commands_all['number'][ids[1]],self.commands_all['number'][ids[2]]]
self.ids['b'+str(ids[3]+1)].disabled = True
self.ids['b'+str(ids[4]+1)].disabled = True
self.buttons_disabled = ['b'+str(ids[3]+1),'b'+str(ids[4]+1)]
elif self.level == 1 :
self.commands['color'] = [self.commands_all['color'][ids[0]],self.commands_all['color'][ids[1]]]
self.commands['shape'] = [self.commands_all['shape'][ids[0]],self.commands_all['shape'][ids[1]]]
self.commands['number'] = [self.commands_all['number'][ids[0]],self.commands_all['number'][ids[1]]]
self.ids['b'+str(ids[2]+1)].disabled = True
self.ids['b'+str(ids[3]+1)].disabled = True
self.ids['b'+str(ids[4]+1)].disabled = True
self.buttons_disabled = ['b'+str(ids[2]+1),'b'+str(ids[3]+1),'b'+str(ids[4]+1)]
elif self.level == 0 :
self.commands['color'] = [self.commands_all['color'][ids[0]]]
self.commands['shape'] = [self.commands_all['shape'][ids[0]]]
self.commands['number'] = [self.commands_all['number'][ids[0]]]
self.ids['b'+str(ids[1]+1)].disabled = True
self.ids['b'+str(ids[2]+1)].disabled = True
self.ids['b'+str(ids[3]+1)].disabled = True
self.ids['b'+str(ids[4]+1)].disabled = True
self.buttons_disabled = ['b'+str(ids[1]+1),'b'+str(ids[2]+1),'b'+str(ids[3]+1),'b'+str(ids[4]+1)]
#print
#print "LEVEL: ",self.level+1
#print self.commands
#print
#Draw next round
def next_round(self,_):
global response_given, round_id, round_set
response_given = False
self.trial += 1
round_id = self.trial #[1,#total_trials]
self.question_in_level += 1 #[1,#trials_in_a_round]
#Initialize countdown
self.clock = 0
Clock.schedule_interval(self.countdown, 1)
# Change Stimuli
if self.trial%self.level_change_ratio == 1 and self.trial > 1:# and self.valid_response == True:
#print "CHANGE STIMULI"
self.major_stimuli = np.random.permutation(self.commands.keys())[0]
self.ids['b1'].disabled = False
self.ids['b2'].disabled = False
self.ids['b3'].disabled = False
self.ids['b4'].disabled = False
self.ids['b5'].disabled = False
self.question_in_level = 1
self.buttons_disabled = []
#global round_set
round_set += 1 #[1,#number_of_rounds]
self.rule_change_round = self.trial
self.level_change()
for i in ["b1","b2","b3","b4","b5"]:
if i in self.buttons_disabled:
self.ids[i].disabled = True
else:
self.ids[i].disabled = False
#update stimulis by securing that each stimuli will represent a different button if possible
mix_the_command = np.random.permutation(self.level+1)
if len(mix_the_command) >= 3: # in levels 3 and 4 all stimulis are different
self.color = self.commands['color'][mix_the_command[0]]
self.shape = self.commands['shape'][mix_the_command[1]]
self.number = self.commands['number'][mix_the_command[2]]
elif len(mix_the_command) == 2: #in level two, two of the randomly chosen stimulis represent the same button
rand = np.random.rand()
if rand <= 0.33:
self.color = self.commands['color'][mix_the_command[0]]
self.shape = self.commands['shape'][mix_the_command[0]]
self.number = self.commands['number'][mix_the_command[1]]
elif rand <= 0.66:
self.color = self.commands['color'][mix_the_command[1]]
self.shape = self.commands['shape'][mix_the_command[0]]
self.number = self.commands['number'][mix_the_command[0]]
else:
self.color = self.commands['color'][mix_the_command[0]]
self.shape = self.commands['shape'][mix_the_command[1]]
self.number = self.commands['number'][mix_the_command[0]]
elif len(mix_the_command) == 1:#in level 1 all stimuli represent the same button
self.color = self.commands['color'][mix_the_command[0]]
self.shape = self.commands['shape'][mix_the_command[0]]
self.number = self.commands['number'][mix_the_command[0]]
instruction = ('_').join((self.shape,self.color,self.number))
if self.major_modality == 'v':
self.ids['instruction'].source ='../AppData/wisconsin_visual/'+instruction+'.jpg'
elif self.major_modality == 't':
self.ids['instruction'].source ='../AppData/wisconsin_textual/'+instruction+'.jpg'
else :
self.ids['instruction'].source ='../AppData/wisconsin_auditory/black.jpg'
sound = SoundLoader.load('../AppData/wisconsin_auditory/'+instruction+'.wav')
sound.play()
if self.major_stimuli =="number":
self.stimuli_type = self.number
elif self.major_stimuli == "shape":
self.stimuli_type = self.shape
else:
self.stimuli_type = self.color
#print self.stimuli_type, self.trial
#print "Round:",self.trial
#print self.major_modality
#print self.major_stimuli
#print self.stimuli_type
#Terminate session function
def log_and_terminate(self,_):
global user_id, email,quit,modality,store_data_path
quit = True
path_save = store_data_path+'user_performance/'
if not os.path.exists(path_save):
os.makedirs(path_save)
path_leaderboard = store_data_path + "leaderbord.csv"
leaderbord_pickle = store_data_path+"leaderbord"
if not os.path.exists(path_save):
os.makedirs(path_save)
with open(path_save + 'user_'+ user_id+'_'+modality+'_'+str(self.score_total)+'.csv','w') as f:
f.write("Round\tQuestion\tLevel\t Score\tStimuli\tStimuli Type\tResponse\tPersistence\tTime\tCorrect\tNON-PER Errors\tPER Errorsn\n")
for sample in self.data:
f.write((('\t').join([str(i) for i in sample])+'\n'))
f.close
# save cpickle file with correct,correct_red,correct_blue
d={}
if os.path.isfile(leaderbord_pickle):
fo = open(leaderbord_pickle, "rb")
d = cPickle.load(fo)
fo.close()
d[modality+'_'+user_id+'_'+email] = self.score_total
fo = open(leaderbord_pickle, "wb")
cPickle.dump(d, fo, protocol=cPickle.HIGHEST_PROTOCOL)
fo.close()
with open(path_leaderboard,'w') as f:
f.write('ID\tMODALITY\tEMAIL\tSCORE\n')
for key, value in sorted(d.iteritems(), key=lambda (k,v): (v,k),reverse=True):
f.write((('\t').join((key.split('_')[1],key.split('_')[0],key.split('_')[2],str(value)+"\n"))))
f.close
sys.exit()
#App control function
def on_control(self,choice):
self.ids['b1'].disabled = True
self.ids['b2'].disabled = True
self.ids['b3'].disabled = True
self.ids['b4'].disabled = True
self.ids['b5'].disabled = True
self.choice = choice
self.check_result()
self.feedback()
self.data.append([self.trial, self.question_in_level,self.level+1, self.score,self.major_stimuli, self.stimuli_type, self.valid_response, self.error_persistant ,self.clock+1, self.correct,self.non_persistent_errors, self.persistent_errors])
# terminate session when round limit has been reached
if self.trial >= self.total_trials:
#if not self.levels_all:
Clock.schedule_once(self.log_and_terminate, 1.5)
Clock.schedule_once(self.next_round, 1.5)
class WisconsinApp(App):
def build(self):
return WisconsinGame()
def main(game,user,stimuli,data_path):
global user_id, email, modality, round_set, round_id, quit, game_type,store_data_path
# Config.set('graphics', 'width', str(4000))
# Config.set('graphics', 'height', str(2000))
Config.set('graphics', 'width', str(1000))
Config.set('graphics', 'height', str(1000))
# Parameter initialization
store_data_path =data_path
game_type = game
round_set = 0
quit = False
# Create path to stote images if not there
path_im = store_data_path+ '/images/'
if not os.path.exists(path_im):
os.makedirs(path_im)
path_im = os.path.abspath(path_im)
path_eeg = store_data_path + '/eeg/'
if not os.path.exists(path_eeg):
os.makedirs(path_eeg)
path_eeg = os.path.abspath(path_eeg)
path_self = store_data_path + '/fatigue_self_report/'
if not os.path.exists(path_self):
os.makedirs(path_self)
path_self = os.path.abspath(path_self)
email = user # user email
modality = stimuli # stimuli to use
# Find the ID of the current User
dirs = [i for i in os.listdir(path_im) if os.path.isdir(path_im + '/' + i) and modality in i]
user_id = str(len(dirs))
# Create directory to store current User's images
foldername = "/user_" + user_id + '_' + modality + "/"
path_im = path_im + foldername
if not os.path.exists(path_im):
os.makedirs(path_im)
path_eeg = path_eeg + foldername
if not os.path.exists(path_eeg):
os.makedirs(path_eeg)
print "User: " + user_id, "ID: " + email
# Run game and recording into threads
thread1 = Thread(target=WisconsinApp().run)
thread1.start()
thread2 = Thread(target=readFrames, args=(path_im,))
thread2.start()
thread3 = Thread(target=readMuse, args=(path_eeg,))
thread3.start()
thread4 = Thread(target=SelfReport, args=(path_self + foldername[:-1],))
thread4.start()
thread1.join()
thread2.join()
thread3.join()
thread4.join()
if __name__ == '__main__':
main('m','test_user','v','../../Wisconsin_Unimodal_Data/') |
webapp.py | """
WSGI Middleware for python webservers
"""
import types
import string
import weakref
import sys
import gc
import cgi
import os
from ConfigParser import ConfigParser
import urlparse
import re
import mimetypes
import time
import threading
import traceback
import datetime
from Cookie import SimpleCookie
import uuid
from rfc822 import formatdate
from BaseHTTPServer import BaseHTTPRequestHandler
import logging
from gzip import GzipFile
import StringIO
import websocket
import json
from twentyc.tools.config import dict_conf
WSGI_PROFILING = False
HTTPresponses = BaseHTTPRequestHandler.responses
url_map = []
app_map = {}
sessionCache = {}
__DBG = False
def urlarg_list(args, key):
v = args.get(key)
if type(v) != list:
v = [v]
return v
def post_parser_json(data):
return json.loads(data)
POST_PARSER = {
"application/json" : post_parser_json
}
def dbg_print(msg):
"""
Prints a message to stdout if __DBG is True
"""
if __DBG:
print msg
def verify_referer(request):
"""
Verify that referer matches host
"""
referer = request.get("referer") or ""
host = request.get("host")
protocol = request.get("protocol")
expect_prefix = protocol.lower()+'://' + host.lower() + '/'
if not referer or not referer.lower().startswith(expect_prefix):
log.debug("Referer Mismatch: EXPECTED REFERER: %s, PATH: %s, REFERER: %s, REMOTE_ADDR: %s" % (
expect_prefix,
request.get("path"),
referer.lower(),
request.get("remote_addr")
))
raise HTTPError(403)
def error_handler(code, message, traceback, env, config):
"""
Handle HTTP errors, overwrite this function to display custom error pages
for 404 errors (and other error codes)
Should return content response such as html code.
"""
if code not in [401]:
log.debug("\n%s"%traceback)
return "%s %s<br />%s<br /><pre>%s</pre>" % (str(code), message, env.get("PATH_INFO",""), traceback)
def error_handler_json(code, message, traceback, env, config):
if code not in [401]:
log.debug("\n%s"%traceback)
return json.json.dumps({"meta":{"error":message,"error_type":code}})
ERROR_HANDLERS = {
"application/json" : error_handler_json
}
def set_header(request, name, value):
headers = request.get("headers")
i = 0
for n, v in headers:
if n == name:
headers[i] = (name, str(value))
return
i += 1
headers.append((name, str(value)))
def on_session_expire(self):
"""
This gets called when a session expires and will be a method of the
affected session object, overwrite if needed
"""
return
def session_validate(self, now_s):
"""
This gets called during session clean up loop, it will be passed
a session object, if it returns False, the session object will
be deleted. Overwrite if needed
"""
return True
def format_path(path, request):
"""
Apply formatting to the requested path, overwrite if needed. Should return
formatted path string
"""
return path
def register_app(app, id, mount_to):
"""
Register an application and mount it to the url_map. id should be a unique
id for the app. mount_to should be the mount location, set it to "" to mount
the app as the root app at /.
Returns the registered app
"""
app_map[id] = app
url_map.append([mount_to, app])
return app
def get_app_instance(id):
"""
returns the application that was registered with the specified id
"""
return app_map.get(id, None)
def prepare_request(self, request, environ):
"""
dummy function
"""
return
class own(object):
def __init__(self, owner_id):
self.owner_id = owner_id
def __call__(self, fn):
if hasattr(fn, "owner_id") and fn.owner_id != self.owner_id:
raise Exception("Two different modules are overriding the same method: %s, (%s, %s)" % (fn, fn.owner_id, self.owner-id))
fn.owner_id = self.owner_id
return fn
def json_response(fn):
def _fn(self, *args, **kwargs):
req = self.request_info(**kwargs)
ses = self.get_session(req)
headers = req.get("headers")
headers.extend([("content-type", "text/json")])
rv = fn(self, *args, **kwargs)
t = time.time()
rv = json.json.dumps(rv)
t2 = time.time()
print "Json Parse: %.2f" % (t2-t)
return rv
return _fn
def expose(fn):
"""
Decorator function that exposes methods of a web application to
the web server.
Use infront of functions that you wish to expose
"""
if fn:
fn.exposed = True
return fn
def get_cookie(req, name, default=None, returnCookie=False):
v = req.get("cookies_in", {}).get(name)
if v:
if not returnCookie:
return v.value
else:
return v
return default
def valid_wsgi_response(obj):
t = type(obj)
if t == unicode:
return [str(obj)]
elif isinstance(obj, list):
return obj
elif isinstance(obj, types.GeneratorType):
return obj
elif isinstance(obj, file):
return obj
else:
return [str(obj)]
def clear_kwargs(kwargs):
try:
del kwargs["__request"]
del kwargs["__environ"]
except:
pass
return kwargs
log = logging.getLogger("Vodka WSGI WebApp")
log.setLevel(logging.DEBUG)
def setup_logging(logformat, config):
from twentyc.syslogfix import UTFFixedSysLogHandler
if int(config.get("server",{}).get("syslog",0)):
syslog_address = config.get("server", {}).get("syslog_address", "/dev/log")
syslog_facility = config.get("server", {}).get("syslog_facility", "LOG_LOCAL0")
hdl = UTFFixedSysLogHandler(address=syslog_address, facility=getattr(logging.handlers.SysLogHandler, syslog_facility))
hdl.setFormatter(logging.Formatter(logformat))
else:
hdl = logging.FileHandler("error.log")
hdl.setFormatter(logging.Formatter(logformat))
log.addHandler(hdl)
configs = {}
commandQueue = {
"expireAllSessions" : False
}
def command(cmd, b = True):
"""
Execute a wsgi command, such as expireAllSessions
command('expireAllSessions')
"""
if commandQueue.has_key(cmd):
commandQueue[cmd] = b
shutdown_handlers = []
def shutdown_handler():
log.debug("VODKA SHUTTING DOWN")
stop_plugins()
for func in shutdown_handlers:
func()
for handler in websocket.handlers:
print "Stopping websocket handler %s:%s" % (handler.host, handler.port)
handler.stop()
def redirect(environ, location):
"""
Redirect Wrapper to create a redirect response, environ should be
the environ object provided by the wsgi request. location should be
the location of the redirect.
"""
environ["request"]["headers"].append(
("Location", str(location))
)
environ["request"]["status"] = 302
environ["request"]["done"] = True
return ""
plugins = []
def add_plugin(Plugin, start=False):
"""
add a plugin to be started and stopped with the server process
"""
plugins.append(Plugin)
if start:
Plugin.start()
Plugin._started = True
return Plugin
def start_plugins(config):
"""
starts all the plugins that have been added with the add_plugin
function. You should not need to call it. This will be called
when the server process is started
"""
for plugin in plugins:
if not plugin._started:
plugin.config = config
plugin.start()
plugin._started = True
def stop_plugins():
"""
stops all the plugins that have been added with the add_plugin
function. You should not need to call it. This will be called
when the server process is stopped.
"""
for plugin in plugins:
if plugin._started:
plugin.stop()
plugin._started = False
#####################################################################################
# This is the Base Web App, all your web applications should extend this
#####################################################################################
class BaseApp:
"""
Base Web Application. All Your Web Applications should extend from this
class
"""
# these are the standard headers sent with every request
headers = [
("Pragma", "no-cache"),
("Cache-Control", "no-cache")
]
def handle_request(self, request, environ):
"""
Called on the beginning of each request
"""
pass
def cleanup_request(self, request, environ):
"""
Called on the end of each request
"""
pass
def request_info(self, **kwargs):
return kwargs.get("__request")
def dispatch(self, environ, request, path, query):
"""
Dispatches a request to an exposed function of the web application or
one of it's members
"""
self.handle_request(request, environ)
if not path or not path[0]:
path = ['index']
if hasattr(self, path[0]):
fnc = getattr(self, path[0])
if not fnc or not hasattr(fnc, 'exposed') or not fnc.exposed:
dbg_print("%s not exposed" % str(fnc))
raise HTTPError(404)
path.pop(0)
if hasattr(fnc, 'dispatch'):
environ["dispatch_to"] = fnc
setattr(fnc, 'cleanup_request', self.cleanup_request)
return fnc.dispatch(environ, request, path, query)
else:
# send default headers
request.get("headers", []).extend(self.headers)
return fnc(__environ=environ, __request=request, *path, **query)
else:
raise HTTPError(404)
####################################################################################
####################################################################################
####################################################################################
class HTTPError(Exception):
"""
HTTPError Exception. Raise with error code as argument in case of
HTTP Error
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def __int__(self):
return int(self.value)
class HTTPRedirect(Exception):
"""
HTTPRedirect Exception. Raise to send redirect response. location should
be the argument of the Exception
"""
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
class HTTPCreated(Exception):
"""
HTTPAccept Exception. Raise when you want to send a 201 Status with Location
"""
def __init__(self, location, content=""):
self.value = location
self.content = content
def __str__(self):
return str(self.value)
class BaseHandler(object):
profile = False
config = None
def set_profiling(self):
self.profile = WSGI_PROFILING
def start_profile(self, force=False):
"""
If self.profile is true, return a timestamp float of current time
else return 0
"""
if self.profile or force:
return time.time()
else:
return 0
def end_profile(self, handlerName, environ, t, force=False):
"""
if self.profile is true, store profile for this handler in environ["request"]
"""
if self.profile or force:
d = time.time()-t
if d < 0.0001:
d = 0.0000
environ["request"]["profile"][handlerName] = d
return d
return 0
def save_profile(self, environ):
"""
save the profile data to webapp.profile
"""
if self.profile:
overview = profile.get("overview")
r = environ.get("request", {})
P = r.get("profile", {})
P["total"] = time.time() - r.get("profile_start")
path = r.get("path")
query = r.get("query_string")
if not overview.has_key(path):
overview[path] = {
"num" : 1,
"time" : P
}
else:
overview = overview.get(path)
overview["num"] += 1
for handler, t in P.items():
if overview["time"].has_key(handler):
overview["time"][handler] += t
else:
overview["time"][handler] = t
recent = profile.get("recent")
recent.insert(0, {
"path" : path,
"query" : query,
"time" : P
})
if len(recent) > 15:
recent = recent[0:15]
profile["recent"] = recent
####################################################################################
# Initial Request Handler
####################################################################################
class RequestHandler(BaseHandler):
"""
WSGI Middleware to handle the incoming request and set up the request
object under enviro['request']
"""
def __init__(self, configName):
self.application = None
self.config = {}
# load config
dbg_print("Loading config: %s" % configName)
self.config = dict_conf(configName)
# setup url map from config
url_map.extend(self.config.get("path",{}).items())
dbg_print(str(url_map))
def __call__(self, environ, start_response):
#profiling time spent in this handler
self.set_profiling()
t = self.start_profile()
#set various environment variables that could be useful
environ["request"] = {
"status" : 200,
"now" : datetime.datetime.now(),
"cookies_in" : SimpleCookie(environ.get('HTTP_COOKIE')),
"cookies_out" : {},
"host" : environ.get("HTTP_HOST", environ.get("host")),
"user_agent" : environ.get("HTTP_USER_AGENT", ""),
"protocol" : environ.get("wsgi.url_scheme"),
"referer" : environ.get("HTTP_REFERER"),
"uploads" : {},
"headers" : []
}
if self.profile:
environ["request"]["profile"] = {}
environ["request"]["profile_start"] = t
self.end_profile("request-handler", environ, t)
return ""
####################################################################################
# Dispatch Handler
####################################################################################
class UploadSizeException(Exception):
"""
This Exceptions gets raised if the uploaded file was bigger than the specified
upload limit in the config
"""
pass
class DispatchHandler(BaseHandler):
"""
WSGI Middleware that dispatches the request to either the file system or
the web application
"""
def __init__(self, application):
self.application = application
self.config = application.config
self.set_profiling()
def __call__(self, environ, start_response):
self.application(environ, start_response)
t_p = self.start_profile()
path = environ.get("PATH_INFO", "")
query = environ.get("QUERY_STRING", "")
query = urlparse.parse_qs(query, keep_blank_values = True)
GET_DATA = {}
POST_DATA = {}
#prepare query dict
for key, items in query.items():
if len(items) < 2:
query[key] = items[0]
GET_DATA[key] = items[0]
# handle post requests accordingly
if environ.get("REQUEST_METHOD", "GET") in ["POST","PUT"]:
input = environ.get("wsgi.input")
try:
input_r = input.read(int(environ.get("CONTENT_LENGTH",0)))
except Exception, inst:
log.error(traceback.format_exc())
raise HTTPError(400)
ctype = environ.get("CONTENT_TYPE","").lower()
if ctype.find('application/x-www-form-urlencoded') != 0 and ctype and not POST_PARSER.get(ctype):
raise HTTPError(400)
if POST_PARSER.get(ctype):
try:
POST_DATA = POST_PARSER[ctype](input_r)
except:
log.error(traceback.format_exc())
raise HTTPError(400)
query.update(POST_DATA)
else:
post_query = {}
post_env = environ.copy()
post_env['QUERY_STRING'] = ''
try:
form = cgi.FieldStorage(
fp=StringIO.StringIO(input_r),
environ=post_env,
keep_blank_values=True
)
except Exception, inst:
log.error(traceback.format_exc())
raise HTTPError(400)
for field in form.keys():
if type(form[field]) == list:
post_query[field] = []
for fld in form[field]:
post_query[field].append(fld.value)
else:
if hasattr(form[field], 'filename'):
environ["request"]["uploads"][field] = form[field]
post_query[field] = form[field].value
query.update(post_query)
POST_DATA = post_query
dbg_print("post_data: %s - %s" % (str(query), str(post_query)))
conf = self.config.get("server", {})
addr_loc = conf.get("remote_addr_var", "REMOTE_ADDR")
environ["request"].update({
"method" : environ.get("REQUEST_METHOD"),
"path" : path,
"host" : environ.get("HTTP_HOST", ""),
"user_agent" : environ.get("HTTP_USER_AGENT", ""),
"remote_addr" : environ.get(addr_loc, ""),
"query_string" : environ.get("QUERY_STRING", ""),
"query" : query,
"get_data" : GET_DATA,
"post_data" : POST_DATA
})
#handle file uploads
contentType = environ.get("CONTENT_TYPE","")
if(contentType and contentType.find("multipart/form-data") > -1):
#if(environ.get("CONTENT_TYPE").find("multipart/form-data") > -1):
#print "Handling file upload"
#print "File Size: %s" % str(environ.get("CONTENT_LENGTH"))
s = int(environ.get("CONTENT_LENGTH"))
ms = int(conf.get("upload_max_size", 1000000))
if ms < s:
raise UploadSizeException("Uploaded file too big, maximum size %d allowed" % ms)
#prepend request specific urlmap if it exists
ext_url_map = environ.get("request").get("session").data.get("url_map")
if ext_url_map:
umap = list(ext_url_map)
umap.extend(url_map)
else:
umap = url_map
#print "URL MAP: %s, %s" % (environ.get("request").get("path"), ext_url_map)
#dispatch request to either application or static path
o_path = path
for map in umap:
dbg_print("%s : %s" % (map[0], path))
if re.match(map[0], path):
if type(map[1]) == str:
path = path.replace(map[0], map[1])
path = os.path.join(self.config.get("server",{}).get("root", ""), path)
#print "ROOT: "+path
#allow for app specific path formatting
path = format_path(path, environ["request"])
dbg_print("dispatching to path: %s" % path)
if not os.path.exists(path):
if len(map) > 2:
path = o_path.replace(map[0], map[2])
if not os.path.exists(path):
raise HTTPError(404)
else:
raise HTTPError(404)
dbg_print("path exists")
environ["dispatch_to"] = path
mimetype = mimetypes.guess_type(path)[0]
if not mimetype:
mimetype = "text/html"
environ["request"]["headers"].append(("Content-type", mimetype))
file = open(path, "r")
c = file.read()
file.close()
self.end_profile("dispatch-handler", environ, t_p)
return c
else:
environ["request"]["headers"].append(("Content-type", "text/html"))
path = path.split("/")
path.pop(0)
if map[0] is not "":
e = len(map[0].split("/"))
while e > 1:
path.pop(0)
e = e-1
environ["dispatch_to"] = map[1]
if self.profile:
self.end_profile("dispatch-handler-prep", environ, t_p)
t_p = self.start_profile()
rv = map[1].dispatch(environ, environ.get("request"), path, query)
self.end_profile("dispatch-handler", environ, t_p)
return rv
else:
return map[1].dispatch(environ, environ.get("request"), path, query)
raise HTTPError(404)
####################################################################################
# This is the Error Handler
####################################################################################
class ErrorHandler(BaseHandler):
"""
WSGI Middleware for error handling
"""
def __init__(self, application):
self.application = application
self.config = application.config
def __call__(self, environ, start_response):
accept = environ.get("HTTP_ACCEPT")
try:
return self.application(environ, start_response)
except HTTPError, inst:
errorString = HTTPresponses.get(int(inst), ())[0]
environ["request"].update({
"status" : int(inst),
"headers" : [],
"done" : True
})
hdl = ERROR_HANDLERS.get(accept, error_handler)
return hdl(int(inst), errorString, traceback.format_exc(), environ, self.config.get("error", {}))
except HTTPRedirect, location:
environ["request"]["headers"].append(
("Location", str(location))
)
environ["request"]["status"] = 302
environ["request"]["done"] = True
return ""
except HTTPCreated, location:
environ["request"]["headers"].append(
("Location", str(location))
)
environ["request"]["status"] = 201
environ["request"]["done"] = True
return location.content
except Exception, inst:
errorString = HTTPresponses.get(500, ())[0]
environ["request"].update({
"status" : 500,
"headers" : [],
"done" : True
})
try:
hdl = ERROR_HANDLERS.get(accept, error_handler)
return hdl(500, errorString, traceback.format_exc(), environ, self.config.get("error", {}))
except HTTPRedirect, location:
return redirect(environ, str(location))
####################################################################################
# This is the HTTP Cache Header handler
####################################################################################
class HTTPCacheHandler(BaseHandler):
"""
WSGI Middleware for cache handling
"""
def __init__(self, application):
self.application = application
self.config = application.config
self.set_profiling()
def __call__(self, environ, start_response):
rv = self.application(environ, start_response)
t_p = self.start_profile()
request = environ.get("request", {})
headers = request.get("headers", [])
# if request is done skip execution of this module
if request.get("done", False):
return rv
# see where the request was dispatched to, if value is a string it
# was dispatched to a static file, if target is an object it
# was dispatched to the application, application caching applies
if environ.has_key("dispatch_to"):
dispatch_to = environ.get("dispatch_to")
if type(dispatch_to) == str:
# set cache expiry by config (default value 3600) if
# no cache config is set up for extension
m = re.search("\.([^\.]+)$", environ.get("PATH_INFO", ""))
if m:
extension = m.group(0)
else:
extension = "html"
dbg_print("Sending Cache headers for static file (%s)" % extension)
cacheConfig = self.config.get("cache", {})
if cacheConfig.has_key(extension):
maxAge = int(cacheConfig.get(extension))
else:
maxAge = int(cacheConfig.get(".*", 3600))
#print "Max Age: %d" % maxAge
mtime = formatdate(os.path.getmtime(dispatch_to))
cacheHeaders = [
("Pragma", "cache"),
("Cache-Control", "max-age=%d, must-revalidate" % maxAge)
]
#dbg_print(str(environ))
#check if file has been modified and send cache response
#if possible
if environ.get('HTTP_IF_MODIFIED_SINCE') == mtime:
headers.extend(cacheHeaders)
request["status"] = 304
self.end_profile("cache-handler", environ, t_p)
return ""
# send last modified time to browser
headers.append(("Last-Modified", mtime))
self.end_profile("cache-handler", environ, t_p)
return rv
####################################################################################
# Session Handler
####################################################################################
class SessionObject():
"""
Session object that will be stored in environ['request']['session']
SessionObject.data is the dict in which the session data should be stored
in
SessionObject.forceExpire can be set to true to expire this session object
the next time it is requested
"""
def __init__(self, id, expires):
dbg_print("New session with id %s to expire on %s" % (id, str(expires)))
self.id = id
self.data = {}
self.expires = expires
self.on_expire = on_session_expire
self.forceExpire = False
class SessionManager(object):
def __init__(self):
self.cache = sessionCache
def cleanup(self):
dbg_print("cleaning up sessions")
now = datetime.datetime.now()
now_s = time.mktime(now.timetuple())
cleanup = []
if self.profile:
profile["sessions"] = len(self.cache.keys())
for sid, session in self.cache.items():
if session.expires <= now or session.forceExpire:
cleanup.append(sid)
elif not session_validate(session, now_s):
cleanup.append(sid)
for sid in cleanup:
self.del_session(sid)
def del_session(self, sid):
dbg_print("deleting session "+str(sid))
session = self.load_session(sid)
if session:
session.on_expire(session)
if self.cache.has_key(sid):
del self.cache[sid]
def del_all(self):
dbg_print("deleting all sessions")
for sid, session in self.cache.items():
self.del_session(sid)
def load_session(self, sid):
#print str(self.cache.keys())
if self.cache.has_key(sid):
return weakref.proxy(self.cache.get(sid))
else:
return None
def generate_id(self):
id = str(uuid.uuid4())
while self.cache.has_key(id):
id = str(uuid.uuid4())
return id
class SessionHandler(BaseHandler, SessionManager):
"""
WSGI Middleware for session handling
"""
cache = {}
def __init__(self, application):
global sessionCache
self.application = application
self.config = application.config
self.sesconf = self.config.get("session", {})
self.cache = sessionCache
def __call__(self, environ, start_response):
self.set_profiling()
rv = self.application(environ, start_response)
t_p = self.start_profile()
request = environ.get("request", {})
headers = request.get("headers", [])
sesconf = self.sesconf
# if expireAllSessions command is set, expire all sessions
if commandQueue.get("expireAllSessions", False):
self.del_all()
command("expireAllSessions", False)
self.end_profile("ses1", environ, t_p)
# if request is done skip execution of this module
if request.get("done", False):
self.end_profile("session-handler", environ, t_p)
return rv
# timeout expired sessions
# self.cleanup(request)
expires = request["now"] + datetime.timedelta(seconds=int(sesconf.get("timeout", 60*60*24)))
# load cookie
cookie = SimpleCookie(environ.get('HTTP_COOKIE'))
cookie_name = sesconf.get("cookie_name", "SID")
cookie_path = sesconf.get("cookie_path", "/")
cookie_secure = sesconf.get("cookie_secure", "yes")
cookie_support = len(cookie.keys())
data = cookie.get(cookie_name, None)
self.end_profile("ses2", environ, t_p)
if data:
cookie_support = "ok"
#print "cookie data found, trying to load session %s" % str(data.value)
# session id found in cookie, attempt to load session
request["session"] = self.load_session(data.value)
if request["session"]:
request["session"].expires = expires
cookie[cookie_name]["expires"] = expires.ctime()
cookie[cookie_name]['path'] = cookie_path
if(cookie_secure == "yes"):
cookie[cookie_name]['secure'] = True
if (not data or not request["session"]) and cookie_support:
sid = self.generate_id()
#log.error(
# "Creating new session object for %s, %s, cookie status: %s, sessions: %s" %
# (sid, str(request), str(data), self.cache.keys())
#)
# session id not foind in cookie, create new session and send cookie data
self.cache[sid] = SessionObject(
id = sid, expires = expires
)
if self.profile:
profile["sessions"] += 1
request["session"] = self.load_session(sid)
request["created_session"] = sid
cookie[cookie_name] = sid
cookie[cookie_name]["expires"] = expires.ctime()
cookie[cookie_name]['path'] = cookie_path
if(cookie_secure == "yes"):
cookie[cookie_name]['secure'] = True
#print "Cookie set for %s" % str(sid)
elif not cookie_support:
request["session"] = SessionObject(id=self.generate_id(), expires=expires)
cookie["cookie_ok"] = "ok"
cookie["cookie_ok"]["path"] = "/"
if(cookie_secure == "yes"):
cookie["cookie_ok"]['secure'] = True
request["cookies_out"]["cookie_ok"] = cookie
#cookie["cookie_ok"]["expires"] = expires.ctime()
#print "Cookie support not established yet, no session object stored"
self.end_profile("ses3", environ, t_p)
request["cookies_out"][cookie_name] = cookie
#headers.append(('Set-Cookie', cookie[cookie_name].OutputString()))
self.end_profile("session-handler", environ, t_p)
for app_id, app in app_map.items():
if hasattr(app, "prepare_request"):
app.prepare_request(request, environ)
return rv
####################################################################################
# Response Handler, sends response
####################################################################################
class ResponseHandler(BaseHandler):
"""
WSGI Middleware that will send the response
"""
def __init__(self, application):
self.application = application
self.config = application.config
self.cookie_path = self.config.get("session",{}).get("cookie_path", "/")
self.cookie_secure = self.config.get("session",{}).get("cookie_secure", True)
def __call__(self, environ, start_response):
self.set_profiling()
t_total = self.start_profile();
rv = self.application(environ, start_response)
t_p = self.start_profile()
request = environ.get("request", {})
headers = request.get("headers")
cookies = request.get("cookies_out")
# set cookie headers
if cookies:
for key, value in cookies.items():
if type(value) != SimpleCookie:
cookie = SimpleCookie()
cookie[key] = value
cookie[key]["path"] = self.cookie_path
if self.cookie_secure == "yes":
cookie[key]["secure"] = True
else:
cookie = value
if cookie.has_key(key):
headers.append(('Set-Cookie', cookie[key].OutputString()))
if request.get("created_session"):
log.debug(
"NEW SESSION CREATED %s @ %s - %s... on path %s, HTTP_COOKIE: %s" % (
request.get("user_agent"),
request.get("remote_addr"),
request.get("created_session")[:8],
request.get("path"),
environ.get("HTTP_COOKIE")
)
)
#print "response handler: %s, %s" % (request.get("path"), headers)
# send status and headers
status = int(request.get("status", 200))
status = "%s %s" % (str(status), HTTPresponses.get(status, ('OK'))[0])
headers = request.get("headers", [])
if False and self.config.get("server", {}).get("gzip","no") == "yes":
headers.append(("Content-Length", str(len(rv))))
dbg_print("Sending %s : %s" % (request.get("status"), str(headers)))
t_d = self.end_profile("total-response-time", environ, t_total)
if t_d:
headers.append(("Server-Overhead-Time", str(t_d)))
headers.append(("Server-Time", "%.5f" % time.time()))
start_response(status, headers)
# if dispatch went to app , call cleanup_request
if hasattr(environ.get("dispatch_to"), "cleanup_request"):
environ["dispatch_to"].cleanup_request(request, environ)
self.end_profile("response-handler", environ, t_p)
self.save_profile(environ)
return valid_wsgi_response(rv)
self.end_profile("response-handler", environ, t_p)
self.save_profile(environ)
return valid_wsgi_response(rv)
# send content
####################################################################################
# GzipHandler
####################################################################################
class GzipHandler(BaseHandler):
"""
WSGI Middleware for GZIP Compression
"""
def __init__(self, application, compresslevel=6):
self.application = application
self.config = application.config
self.compresslevel = compresslevel
def __call__(self, environ, start_response):
self.set_profiling()
accept_encoding_header = environ.get("HTTP_ACCEPT_ENCODING", "")
if self.config.get("server",{}).get("gzip", "no") != "yes":
return self.application(environ, start_response)
if(not self.client_wants_gzip(accept_encoding_header)):
return self.application(environ, start_response)
data = "".join(self.application(environ, start_response))
t_p = self.start_profile()
req = environ.get("request")
headers = req.get("headers")
headers.append(("Content-Encoding", "gzip"))
headers.append(("Vary", "Accept-Encoding"))
if self.profile:
rv = self.gzip_string(data, self.compresslevel)
self.end_profile("gzip-handler", environ, t_p)
return rv
else:
return self.gzip_string(data, self.compresslevel)
##############################################################################
def gzip_string(self, string, compression_level):
fake_file = StringIO.StringIO()
gz_file = GzipFile(None, 'wb', compression_level, fileobj=fake_file)
gz_file.write(string)
gz_file.close()
return fake_file.getvalue()
##############################################################################
def parse_encoding_header(self, header):
encodings = {'identity':1.0}
for encoding in header.split(","):
if(encoding.find(";") > -1):
encoding, qvalue = encoding.split(";")
encoding = encoding.strip()
qvalue = qvalue.split('=', 1)[1]
if(qvalue != ""):
encodings[encoding] = float(qvalue)
else:
encodings[encoding] = 1
else:
encodings[encoding] = 1
return encodings
##############################################################################
def client_wants_gzip(self, accept_encoding_header):
encodings = self.parse_encoding_header(accept_encoding_header)
# Do the actual comparisons
if('gzip' in encodings):
return encodings['gzip'] >= encodings['identity']
elif('*' in encodings):
return encodings['*'] >= encodings['identity']
else:
return False
####################################################################################
configPath = "server.conf"
profile = {
"overview" : {},
"sessions" : 0,
"recent" : []
}
SERVER_RUNNING = 1
SERVER_SHUTTING_DOWN = 2
serverStatus = SERVER_RUNNING
def clean_up_session():
while True:
dbg_print("cleaning up sessions")
now = datetime.datetime.now()
now_s = time.mktime(now.timetuple())
cleanup = []
for sid, session in sessionCache.items():
if session.expires <= now or session.forceExpire:
cleanup.append(sid)
elif not session_validate(session, now_s):
cleanup.append(sid)
for sid in cleanup:
del sessionCache[sid]
#log.debug("Cleaned up sessions, %d left" % len(sessionCache.keys()))
time.sleep(30)
ses_clean_up = threading.Thread(target=clean_up_session)
ses_clean_up.daemon = True
ses_clean_up.start()
def get_application(dummy=False):
config = configPath
"""
Get new wsgi application object using the specified config file
"""
application = RequestHandler(config)
application = SessionHandler(application)
application = DispatchHandler(application)
application = HTTPCacheHandler(application)
application = ErrorHandler(application)
application = GzipHandler(application)
application = ResponseHandler(application)
configs[config] = application.config
return application
###############################################################################
# P L U G I N
###############################################################################
class Plugin(object):
"""
Base plugin object, all plugins should extend this object
"""
config = {}
_started = False
#############################################################################
def start(self):
pass
#############################################################################
def stop(self):
pass
class TestPlugin(Plugin):
def start(self):
print "Test plugin started ..."
def stop(self):
print "Test plugin stopped ..."
|
ocr.py | #!/usr/bin/env python3
import threading
import ocrmypdf
import os
import sys
import warnings
import inspect
from PIL import Image
warnings.simplefilter('ignore', Image.DecompressionBombWarning)
def start_job(dir_path, currentfile, progressbar_batch, progressbar_singlefile, outputarea, ocrmypdfsettings):
t = threading.Thread(target=batch_ocr, args=(dir_path, progressbar_batch, progressbar_singlefile, outputarea, ocrmypdfsettings, currentfile), daemon=True)
t.start()
def ocr_run(file_path, ocrmypdfsettings):
print(ocrmypdfsettings)
#runs ocrmypdf on given file
try:
ocr = ocrmypdf.ocr(file_path, file_path, **ocrmypdfsettings, plugins=["ocrmypdfgui.plugin_progressbar"])
print("OCR complete.\n")
return "OCR complete.\n"
except ocrmypdf.exceptions.PriorOcrFoundError:
print("Prior OCR - Skipping\n")
return "Prior OCR - Skipping\n"
except ocrmypdf.exceptions.EncryptedPdfError:
print("PDF File is encrypted. Skipping.\n")
return "PDF File is encrypted. Skipping.\n"
except ocrmypdf.exceptions.BadArgsError:
print("Bad arguments.\n")
except:
e = sys.exc_info()[0]
print(e)
return "Error.\n"
def batch_ocr(dir_path, progressbar_batch, progressbar_singlefile, outputarea, ocrmypdfsettings, currentfile):
# walks through given path and uses OCR Function on every pdf in path
progressbar_batch.set(0.0) #resets Progressbar
progress_precision = 0.0
if(os.path.isfile(dir_path)==True):
#Run OCR on single file
file_ext = os.path.splitext(dir_path)[1]
if file_ext == '.pdf':
print("Path:" + dir_path + "\n")
outputarea.insert("end", "File: " + dir_path + " - ")
outputarea.see("end")
currentfile.set("Current File:" + dir_path )
result = ocr_run(dir_path, ocrmypdfsettings)
outputarea.insert("end", result)
outputarea.see("end")
progressbar_batch.set(100)
elif(os.path.isdir(dir_path)==True):
number_of_files = 0
for dir_name, subdirs, file_list in os.walk(dir_path):
for filename in file_list:
file_ext = os.path.splitext(filename)[1]
if file_ext == '.pdf':
number_of_files=number_of_files+1
if number_of_files >0:
percent = 100/number_of_files
for dir_name, subdirs, file_list in os.walk(dir_path):
print(file_list)
for filename in file_list:
file_ext = os.path.splitext(filename)[1]
if file_ext == '.pdf':
full_path = dir_name + '/' + filename
print("Path:" + full_path + "\n")
currentfile.set("Current File:" + full_path )
outputarea.insert("end", "File: " + full_path + " - ")
outputarea.see("end")
result = ocr_run(full_path, ocrmypdfsettings)
outputarea.insert("end", result)
outputarea.see("end")
progress_precision = progress_precision + percent
print(progress_precision)
progressbar_batch.set(round(progress_precision))
progressbar_singlefile.set(0.0)
else:
print("Error")
def get_api_options():
sig = inspect.signature(ocrmypdf.ocr)
#print (str(sig))
dict = {}
for param in sig.parameters.values():
if (param.kind == param.KEYWORD_ONLY):
#print(param.name)
#print(param.annotation)
if str(param.annotation)[8:-2] == "bool" or str(param.annotation)[8:-2] == "int" or str(param.annotation)[8:-2] == "float" or str(param.annotation)[8:-2] == "str" or str(param.annotation) == "typing.Iterable[str]":
if(str(param.annotation) == "typing.Iterable[str]"):
dict[param.name] = str(param.annotation)
else:
dict[param.name] = str(param.annotation)[8:-2]
return dict
|
start_sploit.py | #!/usr/bin/env python3
from urllib.request import Request, urlopen
from urllib.parse import urljoin
from math import ceil
from enum import Enum
from concurrent.futures import ThreadPoolExecutor
import threading
import time
import subprocess
import stat
import re
import random
import os
import logging
import json
import itertools
import binascii
import argparse
import sys
assert sys.version_info >= (3, 4), 'Python < 3.4 is not supported'
os_windows = (os.name == 'nt')
HEADER = r'''
____ _ _ _ _____
| _ \ ___ ___| |_ _ __ _ _ ___| |_(_)_ _____ | ___|_ _ _ __ _ __ ___
| | | |/ _ \/ __| __| '__| | | |/ __| __| \ \ / / _ \ | |_ / _` | '__| '_ ` _ `
| |_| | __/\__ \ |_| | | |_| | (__| |_| |\ V / __/ | _| (_| | | | | | | |
|____/ \___||___/\__|_| \__,_|\___|\__|_| \_/ \___| |_| \__,_|_| |_| |_| |_
Note that this software is highly destructive. Keep it away from children.
'''[1:]
class Style(Enum):
"""
Bash escape sequences, see:
https://misc.flogisoft.com/bash/tip_colors_and_formatting
"""
BOLD = 1
FG_BLACK = 30
FG_RED = 31
FG_GREEN = 32
FG_YELLOW = 33
FG_BLUE = 34
FG_MAGENTA = 35
FG_CYAN = 36
FG_LIGHT_GRAY = 37
BRIGHT_COLORS = [Style.FG_RED, Style.FG_GREEN, Style.FG_BLUE,
Style.FG_MAGENTA, Style.FG_CYAN]
def highlight(text, style=None):
if os_windows:
return text
if style is None:
style = [Style.BOLD, random.choice(BRIGHT_COLORS)]
return '\033[{}m'.format(';'.join(str(item.value) for item in style)) + text + '\033[0m'
log_format = '%(asctime)s {} %(message)s'.format(
highlight('%(levelname)s', [Style.FG_YELLOW]))
logging.basicConfig(format=log_format, datefmt='%H:%M:%S', level=logging.DEBUG)
def parse_args():
parser = argparse.ArgumentParser(description='Run a sploit on all teams in a loop',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('sploit',
help="Sploit executable (should take a victim's host as the first argument)")
parser.add_argument('-u', '--server-url', metavar='URL',
default='http://farm.kolambda.com:5000',
help='Server URL')
parser.add_argument('-a', '--alias', metavar='ALIAS',
default=None,
help='Sploit alias')
parser.add_argument('--token', metavar='TOKEN',
help='Farm authorization token')
parser.add_argument('--interpreter', metavar='COMMAND',
help='Explicitly specify sploit interpreter (use on Windows, which doesn\'t '
'understand shebangs)')
parser.add_argument('--pool-size', metavar='N', type=int, default=50,
help='Maximal number of concurrent sploit instances. '
'Too little value will make time limits for sploits smaller, '
'too big will eat all RAM on your computer')
parser.add_argument('--attack-period', metavar='N', type=float, default=120,
help='Rerun the sploit on all teams each N seconds '
'Too little value will make time limits for sploits smaller, '
'too big will miss flags from some rounds')
parser.add_argument('-v', '--verbose-attacks', metavar='N', type=int, default=1,
help="Sploits' outputs and found flags will be shown for the N first attacks")
group = parser.add_mutually_exclusive_group()
group.add_argument('--not-per-team', action='store_true',
help='Run a single instance of the sploit instead of an instance per team')
group.add_argument('--distribute', metavar='K/N',
help='Divide the team list to N parts (by address hash modulo N) '
'and run the sploits only on Kth part of it (K >= 1)')
return parser.parse_args()
def fix_args(args):
check_sploit(args)
if '://' not in args.server_url:
args.server_url = 'http://' + args.server_url
if args.distribute is not None:
valid = False
match = re.fullmatch(r'(\d+)/(\d+)', args.distribute)
if match is not None:
k, n = (int(match.group(1)), int(match.group(2)))
if n >= 2 and 1 <= k <= n:
args.distribute = k, n
valid = True
if not valid:
raise ValueError(
'Wrong syntax for --distribute, use --distribute K/N (N >= 2, 1 <= K <= N)')
SCRIPT_EXTENSIONS = {
'.pl': 'perl',
'.py': 'python',
'.rb': 'ruby',
}
def check_script_source(source, interpreter):
errors = []
if not os_windows and not interpreter and source[:2] != '#!':
errors.append(
'Please use shebang (e.g. {}) as the first line of your script'.format(
highlight('#!/usr/bin/env python3', [Style.FG_GREEN])))
if re.search(r'flush[(=]', source) is None:
errors.append(
'Please print the newline and call {} each time after your sploit outputs flags. '
'In Python 3, you can use {}. '
'Otherwise, the flags may be lost (if the sploit process is killed) or '
'sent with a delay.'.format(
highlight('flush()', [Style.FG_RED]),
highlight('print(..., flush=True)', [Style.FG_GREEN])))
return errors
class InvalidSploitError(Exception):
pass
def check_sploit(args):
path = args.sploit
if not os.path.isfile(path):
raise ValueError('No such file: {}'.format(path))
extension = os.path.splitext(path)[1].lower()
is_script = extension in SCRIPT_EXTENSIONS
if is_script:
with open(path, 'r', errors='ignore') as f:
source = f.read()
errors = check_script_source(source, args.interpreter)
if errors:
for message in errors:
logging.error(message)
raise InvalidSploitError(
'Sploit won\'t be run because of validation errors')
if os_windows and args.interpreter is None:
args.interpreter = SCRIPT_EXTENSIONS[extension]
logging.info('Using interpreter `{}`'.format(args.interpreter))
if not os_windows:
file_mode = os.stat(path).st_mode
# TODO: May be check the owner and other X flags properly?
if not file_mode & stat.S_IXUSR:
if is_script:
logging.info('Setting the executable bit on `{}`'.format(path))
os.chmod(path, file_mode | stat.S_IXUSR)
else:
raise InvalidSploitError(
"The provided file doesn't appear to be executable")
if os_windows:
# By default, Ctrl+C does not work on Windows if we spawn subprocesses.
# Here we fix that using WinApi. See https://stackoverflow.com/a/43095532
import signal
import ctypes
from ctypes import wintypes
kernel32 = ctypes.WinDLL("kernel32", use_last_error=True)
# BOOL WINAPI HandlerRoutine(
# _In_ DWORD dwCtrlType
# );
PHANDLER_ROUTINE = ctypes.WINFUNCTYPE(wintypes.BOOL, wintypes.DWORD)
win_ignore_ctrl_c = PHANDLER_ROUTINE() # = NULL
def _errcheck_bool(result, _, args):
if not result:
raise ctypes.WinError(ctypes.get_last_error())
return args
# BOOL WINAPI SetConsoleCtrlHandler(
# _In_opt_ PHANDLER_ROUTINE HandlerRoutine,
# _In_ BOOL Add
# );
kernel32.SetConsoleCtrlHandler.errcheck = _errcheck_bool
kernel32.SetConsoleCtrlHandler.argtypes = (PHANDLER_ROUTINE, wintypes.BOOL)
@PHANDLER_ROUTINE
def win_ctrl_handler(dwCtrlType):
if dwCtrlType == signal.CTRL_C_EVENT:
kernel32.SetConsoleCtrlHandler(win_ignore_ctrl_c, True)
shutdown()
return False
kernel32.SetConsoleCtrlHandler(win_ctrl_handler, True)
class APIException(Exception):
pass
SERVER_TIMEOUT = 5
def get_config(args):
req = Request(urljoin(args.server_url, '/destructivefarm/api/get_config'))
if args.token is not None:
req.add_header('X-Token', args.token)
with urlopen(req, timeout=SERVER_TIMEOUT) as conn:
if conn.status != 200 and conn.status != 201:
raise APIException(conn.read())
return json.loads(conn.read().decode())
def post_flags(args, flags):
if args.alias is not None:
sploit_name = args.alias
else:
sploit_name = os.path.basename(args.sploit)
data = [{'flag': item['flag'], 'sploit': sploit_name, 'team': item['team']}
for item in flags]
req = Request(urljoin(args.server_url, '/destructivefarm/api/post_flags'))
req.add_header('Content-Type', 'application/json')
if args.token is not None:
req.add_header('X-Token', args.token)
with urlopen(req, data=json.dumps(data).encode(), timeout=SERVER_TIMEOUT) as conn:
if conn.status != 200 and conn.status != 201:
raise APIException(conn.read())
exit_event = threading.Event()
def once_in_a_period(period):
for iter_no in itertools.count(1):
start_time = time.time()
yield iter_no
time_spent = time.time() - start_time
if period > time_spent:
exit_event.wait(period - time_spent)
if exit_event.is_set():
break
class FlagStorage:
"""
Thread-safe storage comprised of a set and a post queue.
Any number of threads may call add(), but only one "consumer thread"
may call pick_flags() and mark_as_sent().
"""
def __init__(self):
self._flags_seen = set()
self._queue = []
self._lock = threading.RLock()
def add(self, flags, team_name):
with self._lock:
for item in flags:
if item not in self._flags_seen:
self._flags_seen.add(item)
self._queue.append({'flag': item, 'team': team_name})
def pick_flags(self):
with self._lock:
return self._queue[:]
def mark_as_sent(self, count):
with self._lock:
self._queue = self._queue[count:]
@property
def queue_size(self):
with self._lock:
return len(self._queue)
flag_storage = FlagStorage()
POST_PERIOD = 5
def run_post_loop(args):
try:
for _ in once_in_a_period(POST_PERIOD):
flags_to_post = flag_storage.pick_flags()
if flags_to_post:
try:
post_flags(args, flags_to_post)
flag_storage.mark_as_sent(len(flags_to_post))
logging.info('{} flags posted to the server ({} in the queue)'.format(
len(flags_to_post), flag_storage.queue_size))
except Exception as e:
logging.error(
"Can't post flags to the server: {}".format(repr(e)))
logging.info("The flags will be posted next time")
except Exception as e:
logging.critical('Posting loop died: {}'.format(repr(e)))
shutdown()
display_output_lock = threading.RLock()
def display_sploit_output(team_name, output_lines):
if not output_lines:
logging.info('{}: No output from the sploit'.format(team_name))
return
prefix = highlight(team_name + ': ')
with display_output_lock:
print('\n' + '\n'.join(prefix + line.rstrip()
for line in output_lines) + '\n')
def process_sploit_output(stream, args, team_name, flag_format, attack_no):
try:
output_lines = []
instance_flags = set()
while True:
line = stream.readline()
if not line:
break
line = line.decode(errors='replace')
output_lines.append(line)
line_flags = set(flag_format.findall(line))
if line_flags:
flag_storage.add(line_flags, team_name)
instance_flags |= line_flags
if attack_no <= args.verbose_attacks and not exit_event.is_set():
# We don't want to spam the terminal on KeyboardInterrupt
display_sploit_output(team_name, output_lines)
if instance_flags:
logging.info('Got {} flags from "{}": {}'.format(
len(instance_flags), team_name, instance_flags))
except Exception as e:
logging.error('Failed to process sploit output: {}'.format(repr(e)))
class InstanceStorage:
"""
Storage comprised of a dictionary of all running sploit instances and some statistics.
Always acquire instance_lock before using this class. Do not release the lock
between actual spawning/killing a process and calling register_start()/register_stop().
"""
def __init__(self):
self._counter = 0
self.instances = {}
self.n_completed = 0
self.n_killed = 0
def register_start(self, process):
instance_id = self._counter
self.instances[instance_id] = process
self._counter += 1
return instance_id
def register_stop(self, instance_id, was_killed):
del self.instances[instance_id]
self.n_completed += 1
self.n_killed += was_killed
instance_storage = InstanceStorage()
instance_lock = threading.RLock()
def launch_sploit(args, team_name, team_addr, attack_no, flag_format):
# For sploits written in Python, this env variable forces the interpreter to flush
# stdout and stderr after each newline. Note that this is not default behavior
# if the sploit's output is redirected to a pipe.
env = os.environ.copy()
env['PYTHONUNBUFFERED'] = '1'
command = [os.path.abspath(args.sploit)]
if args.interpreter is not None:
command = [args.interpreter] + command
if team_addr is not None:
command.append(team_addr)
need_close_fds = (not os_windows)
if os_windows:
# On Windows, we block Ctrl+C handling, spawn the process, and
# then recover the handler. This is the only way to make Ctrl+C
# intercepted by us instead of our child processes.
kernel32.SetConsoleCtrlHandler(win_ignore_ctrl_c, True)
proc = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
bufsize=1, close_fds=need_close_fds, env=env)
if os_windows:
kernel32.SetConsoleCtrlHandler(win_ignore_ctrl_c, False)
threading.Thread(target=lambda: process_sploit_output(
proc.stdout, args, team_name, flag_format, attack_no)).start()
return proc, instance_storage.register_start(proc)
def run_sploit(args, team_name, team_addr, attack_no, max_runtime, flag_format):
try:
with instance_lock:
if exit_event.is_set():
return
proc, instance_id = launch_sploit(
args, team_name, team_addr, attack_no, flag_format)
except Exception as e:
if isinstance(e, FileNotFoundError):
logging.error(
'Sploit file or the interpreter for it not found: {}'.format(repr(e)))
logging.error('Check presence of the sploit file and the shebang (use {} for compatibility)'.format(
highlight('#!/usr/bin/env ...', [Style.FG_GREEN])))
else:
logging.error('Failed to run sploit: {}'.format(repr(e)))
if attack_no == 1:
shutdown()
return
try:
try:
proc.wait(timeout=max_runtime)
need_kill = False
except subprocess.TimeoutExpired:
need_kill = True
if attack_no <= args.verbose_attacks:
logging.warning(
'Sploit for "{}" ({}) ran out of time'.format(team_name, team_addr))
with instance_lock:
if need_kill:
proc.kill()
instance_storage.register_stop(instance_id, need_kill)
except Exception as e:
logging.error('Failed to finish sploit: {}'.format(repr(e)))
def show_time_limit_info(args, config, max_runtime, attack_no):
if attack_no == 1:
min_attack_period = config['FLAG_LIFETIME'] - \
config['SUBMIT_PERIOD'] - POST_PERIOD
if args.attack_period >= min_attack_period:
logging.warning("--attack-period should be < {:.1f} sec, "
"otherwise the sploit will not have time "
"to catch flags for each round before their expiration".format(min_attack_period))
logging.info(
'Time limit for a sploit instance: {:.1f} sec'.format(max_runtime))
with instance_lock:
if instance_storage.n_completed > 0:
# TODO: Maybe better for 10 last attacks
logging.info('Total {:.1f}% of instances ran out of time'.format(
float(instance_storage.n_killed) / instance_storage.n_completed * 100))
PRINTED_TEAM_NAMES = 5
def get_target_teams(args, teams, attack_no):
if args.not_per_team:
return {'*': None}
if args.distribute is not None:
k, n = args.distribute
teams = {name: addr for name, addr in teams.items()
if binascii.crc32(addr.encode()) % n == k - 1}
if teams:
if attack_no <= args.verbose_attacks:
names = sorted(teams.keys())
if len(names) > PRINTED_TEAM_NAMES:
names = names[:PRINTED_TEAM_NAMES] + ['...']
logging.info('Sploit will be run on {} teams: {}'.format(
len(teams), ', '.join(names)))
else:
logging.error('There is no teams to attack for this farm client, fix "TEAMS" value '
'in your server config or the usage of --distribute')
return teams
def main(args):
try:
fix_args(args)
except (ValueError, InvalidSploitError) as e:
logging.critical(str(e))
return
print(highlight(HEADER))
logging.info('Connecting to the farm server at {}'.format(args.server_url))
threading.Thread(target=lambda: run_post_loop(args)).start()
config = flag_format = None
pool = ThreadPoolExecutor(max_workers=args.pool_size)
for attack_no in once_in_a_period(args.attack_period):
try:
config = get_config(args)
flag_format = re.compile(config['FLAG_FORMAT'])
except Exception as e:
logging.error(
"Can't get config from the server: {}".format(repr(e)))
if attack_no == 1:
return
logging.info('Using the old config')
teams = get_target_teams(args, config['TEAMS'], attack_no)
if not teams:
if attack_no == 1:
return
continue
print()
logging.info('Launching an attack #{}'.format(attack_no))
max_runtime = args.attack_period / ceil(len(teams) / args.pool_size)
show_time_limit_info(args, config, max_runtime, attack_no)
for team_name, team_addr in teams.items():
pool.submit(run_sploit, args, team_name, team_addr,
attack_no, max_runtime, flag_format)
def shutdown():
# Stop run_post_loop thread
exit_event.set()
# Kill all child processes (so consume_sploit_ouput and run_sploit also will stop)
with instance_lock:
for proc in instance_storage.instances.values():
proc.kill()
if __name__ == '__main__':
try:
main(parse_args())
except KeyboardInterrupt:
logging.info('Got Ctrl+C, shutting down')
finally:
shutdown()
|
analyzer.py | # Date: 2021/7/19
# Author: Lan_zhijiang
# Description: AntiDDoSๅฏนๆฐๆฎ่ฟ่กๅ ๅทฅ
import threading
import time
import os
class Analyzer:
def __init__(self, base):
self.base = base
self.log = base.log
self.raw_queue = base.raw_queue
self.basic_analyze_result_queue = base.basic_analyze_result_queue
self.complex_analyze_result = base.complex_analyze_result
self.settings = base.settings
def basic_analyze(self):
"""
ๅฏนๆฏๆกๆฐๆฎ่ฟ่ก็ฒๅ ๅทฅ๏ผๆๅไฟกๆฏๅนถไปฅdictๅฝขๅผๅญๅจ
:return:
"""
self.log.add_log("Analyzer: start basic analyzing", 1)
count = 0
line = self.raw_queue.get()
while line != "":
time.sleep(0.1)
self.log.add_log("Analyzer: now: %s" % line, 0)
count += 1
# if count == len(data):
# break
output = {
"id": "",
"who": {"src": "", "src_port": "", "dst": "", "dst_port": ""},
"what": {"timestamp": "", "flags": "", "ack": "", "seq": [0, 1], "length": ""},
"more": {"options": []}
}
print("analyzing data-%s" % count)
output["id"] = count
# phase 1
part = line.split(": ")
# print(part)
part1 = part[0].split(" ", 2)
part2 = part[1].split(", ")
# print(part1, "\n", part2)
part3 = part1[2].split(" > ")
# print(part3)
a, b = part3[0].rsplit(".", 1), part3[1].rsplit(".", 1)
output["who"]["src"], output["who"]["src_port"] = a[0], a[1]
output["who"]["dst"], output["who"]["dst_port"] = b[0], b[1]
# print(src_ip, src_port)
# print(dst_ip, dst_port)
date = time.strftime("%Y-%m-%d", time.localtime()) + " " + part1[0]
time_array = time.strptime(date, "%Y-%m-%d %H:%M:%S")
output["what"]["timestamp"] = str(time.mktime(time_array))
for j in part2:
if "Flags" in j:
output["what"]["flags"] = j.split(" ")[1]
elif "seq" in j:
output["what"]["seq"] = j.split(" ")[1].split(":")
elif "ack" in j:
output["what"]["ack"] = j.split(" ")[1]
elif "options" in j:
output["more"]["options"] = j.split(" ", 1)[1]
elif "length" in j:
output["what"]["length"] = j.split(" ")[1]
self.basic_analyze_result_queue.put(output)
line = self.raw_queue.get()
def compute_now_speed(self, connection_id):
"""
่ฎก็ฎๅฝๅ้ๅบฆ
:param connection_id: ่ฆ่ฎก็ฎ็่ฟไธช่ฟๆฅ็id
desc: ็ปดๆๆ่ฟ2s็ๆฐๆฎ๏ผไปฅๆญค่ฎก็ฎๆฏ็ง้ๅบฆ
:return:
"""
connection_data = self.complex_analyze_result[connection_id]
recent_traffic = connection_data["recent_traffic"]
a = list(recent_traffic.keys())
difference = abs(float(recent_traffic[1]) - float(recent_traffic[-1]))
if difference < 1 or len(recent_traffic) < 3:
speed = None
else:
total = 0
for i in a:
total += recent_traffic[i]
speed = total/difference
del self.complex_analyze_result[connection_id]["recent_traffic"][
list(recent_traffic.keys())[1]
]
return speed
def advanced_analyze(self):
"""
ๅฏน็ฒๅ ๅทฅ่ฟๅ็ๆฐๆฎ่ฟ่กๅฝ็ฑปใๅๆใ่ฎก็ฎ
desc:
้ซ็บงๅๆๅ
ๆฌใๅฏนip่ฟๆฅๅ็ฑปใๅบๅ
ฅๅ็ฑปใใใๆดๆฐๆฏไธช่ฟๆฅๅๆปๆ
ๅต็ๅฎๆถ้ๅบฆไธๆปๆต้ใใๅฏน่ฟๆฅ็ฑปๅ/ๆฐ้่ฟ่กๆ ่ฎฐ/็ป่ฎกใใ่ฎฐๅฝๅฝๅ่ฟๆฅ็ถๆใใip่ฟๆฅๆปๆฐ/ๅๅ๏ผip็๏ผๆฏไธชip็๏ผใใๅ ้ค่ฟๆถๆฐๆฎใ
:return:
"""
self.log.add_log("Analyzer: start advanced analyzing", 1)
record_template = {
"src": "",
"dst": "",
"status": "", # handshaking1/2/3 data_transportation fin rst
"protocol": "",
"total_traffic": "",
"now_speed": "",
"connection_count": "",
"handshake_status_count": {
"1": "",
"2": "",
"3": ""
},
"records": [],
"recent_traffic": {}
}
now_connection_ids = list(self.complex_analyze_result.keys())
line = self.basic_analyze_result_queue.get()
while line != "":
is_new = False
time.sleep(0.15)
src = line["who"]["src"] + ":" + line["who"]["src_port"]
dst = line["who"]["dst"] + ":" + line["who"]["dst_port"]
connection_id = src + " > " + dst
# ---------------classify---------------
self.log.add_log("Analyzer: AA stage 1, classify c_id-%s" % connection_id, 1)
if connection_id not in now_connection_ids:
is_new = True
now_connection_ids.append(connection_id)
self.complex_analyze_result[connection_id] = record_template
self.complex_analyze_result[connection_id]["records"].insert(0, line)
self.complex_analyze_result[connection_id]["src"] = src
self.complex_analyze_result[connection_id]["dst"] = dst
# ---------------compute base status---------------
self.complex_analyze_result[connection_id]["total_traffic"] += line["what"]["length"]
self.complex_analyze_result[connection_id]["recent_traffic"][line["what"]["timestamp"]] = line["what"]["length"]
self.complex_analyze_result[connection_id]["now_speed"] = self.compute_now_speed(connection_id)
self.complex_analyze_result["overall"]["recent_traffic"][line["what"]["timestamp"]] = line["what"]["length"]
self.complex_analyze_result[connection_id]["now_speed"] = self.compute_now_speed("overall")
self.complex_analyze_result["overall"]["total_traffic"] += line["what"]["length"]
if dst == self.settings["host"]: # in traffic
self.complex_analyze_result["overall"]["in_traffic"] += line["what"]["length"]
elif src == self.settings["host"]: # out traffic
self.complex_analyze_result["overall"]["out_traffic"] += line["what"]["length"]
# count now ip connections
# single ip
if dst == self.settings["host"]:
connections = os.popen("netstat -ntu | awk '{print $5}' | cut -d: -f1 | uniq -c | sort -n | awk '{print $1}'").read().split("\n")[1:-1]
address = os.popen("netstat -ntu | awk '{print $5}' | cut -d: -f1 | uniq -c | sort -n | awk '{print $2}").read().split("\n")[1:-1]
if src in address:
self.complex_analyze_result[connection_id]["connection_count"] = connections[address.index(src)]
else:
self.complex_analyze_result[connection_id]["connection_count"] = None
else:
self.complex_analyze_result[connection_id]["connection_count"] = None
# overall(2sไนๅ
ๆฐๅปบ็ip่ฟๆฅๅฐ่ขซ้็นๅ
ณๆณจ๏ผ
if is_new:
if not self.complex_analyze_result["overall"]["new_connections"]:
self.complex_analyze_result["overall"]["new_connections"] = []
self.complex_analyze_result["overall"]["new_connections"].append(connection_id)
else:
difference = abs(float(self.complex_analyze_result[self.complex_analyze_result["overall"]["new_connections"][0]]["what"]["timestamp"]) - \
float(self.complex_analyze_result[connection_id]["what"]["timestamp"]))
if difference > 2:
del self.complex_analyze_result["overall"]["new_connections"][0:]
else:
del self.complex_analyze_result["overall"]["new_connections"][0]
self.complex_analyze_result["overall"]["new_connections"].append(connection_id)
if len(self.complex_analyze_result["overall"]["new_connections"]) >= self.settings["new_connections_limit"]:
self.log.add_log("Analyzer: new_connections was over the limit", 2)
self.complex_analyze_result["overall"]["new_connections_warning"] = True
self.complex_analyze_result["overall"]["under_tracking_connections"].append(self.complex_analyze_result["overall"]["new_connections"])
del self.complex_analyze_result["overall"]["new_connections"][0:]
# delete outdated records
record_length = len(self.complex_analyze_result[connection_id]["records"])
if record_length > 60:
now_timestamp = time.time()
node = None
for index in range(0, record_length):
if now_timestamp - float(self.complex_analyze_result[connection_id]["records"][index]["what"]["timestamp"]) >= 20:
node = index
break
if node != 0:
for index in range(node, record_length+1):
del self.complex_analyze_result[connection_id]["records"][index]
def run(self):
"""
ๅฏๅจๅๆ
:return:
"""
basic_analyze = threading.Thread(target=self.basic_analyze, args=())
advanced_analyze = threading.Thread(target=self.advanced_analyze, args=())
basic_analyze.run()
time.sleep(5)
advanced_analyze.run()
|
locomotion.py | """A model based controller framework."""
from absl import logging
from datetime import datetime
import enum
import ml_collections
import numpy as np
import pandas as pd
import os
import pickle
import pybullet
from pybullet_utils import bullet_client
import threading
import time
from typing import Tuple
from src.convex_mpc_controller import com_velocity_estimator
from src.convex_mpc_controller import offset_gait_generator
from src.convex_mpc_controller import raibert_swing_leg_controller
from src.convex_mpc_controller import torque_stance_leg_controller_mpc
from src.robots import a1
from src.robots import spirit
from src.robots.motors import MotorCommand
from src.robots.motors import MotorControlMode
from src.robots.terrain import randomRockyTerrain, Sloped
from src.controller.leg_controller import LegController
from src.controller.global_planner import GlobalPlanner
from src.controller.local_planner import LocalPlanner
import gbp_python_interface
import lp_python_interface
# class ControllerMode(enum.Enum):
# DOWN = 1
# STAND = 2
# WALK = 3
# TERMINATE = 4
# class GaitType(enum.Enum):
# CRAWL = 1
# TROT = 2
# FLYTROT = 3
def get_sim_conf():
config = ml_collections.ConfigDict()
config.timestep: float = 0.001
config.timestep_per_index: int =3
config.action_repeat: int = 1
config.reset_time_s: float = 3.
config.standup_time_s: float = 3.
config.num_solver_iterations: int = 30
config.init_position: Tuple[float, float, float] = (0., 0., 1.0)
config.init_rack_position: Tuple[float, float, float] = [0., 0., 1]
config.on_rack: bool = False
config.debug_flag: bool = True
return config
class Locomotion(object):
"""Generates the quadruped locomotion.
The actual effect of this controller depends on the composition of each
individual subcomponent.
"""
def __init__(self,
use_real_robot: bool = False,
show_gui: bool = False,
logdir: str = 'logs/'):
"""Initializes the class.
Args:
robot: A robot instance.
gait_generator: Generates the leg swing/stance pattern.
state_estimator: Estimates the state of the robot (e.g. center of mass
position or velocity that may not be observable from sensors).
swing_leg_controller: Generates motor actions for swing legs.
stance_leg_controller: Generates motor actions for stance legs.
clock: A real or fake clock source.
"""
self._use_real_robot = use_real_robot
self._show_gui = show_gui
self._sim_conf = get_sim_conf()
self._setup_robot_and_terrain()
self._setup_planner_and_controller()
self.reset_robot()
self.reset_planners()
# self._reset_time = 0.0 # timer: calculate the simulation time
self._global_plan_reset_time = self._reset_time
self._local_plan_reset_time = self._reset_time
self._logs = []
self._logdir = logdir
self._node_interval = 0.03
self._global_plan = None
self._local_plan = None
self.run_thread = threading.Thread(target=self.run)
self.run_thread.start()
def _setup_robot_and_terrain(self):
# Construct robot
if self._show_gui:
p = bullet_client.BulletClient(connection_mode=pybullet.GUI)
else:
p = bullet_client.BulletClient(connection_mode=pybullet.DIRECT)
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 0)
p.setAdditionalSearchPath('src/data')
self.pybullet_client = p
p.setPhysicsEngineParameter(numSolverIterations=30)
p.setTimeStep(self._sim_conf.timestep)
p.setGravity(0, 0, -9.8)
p.setPhysicsEngineParameter(enableConeFriction=0)
# Construct terrain:
self._goal = [3, 3]
self._terrain = randomRockyTerrain()
self._terrain.generate(goal=self._goal)
self._ground_id = self._terrain.terrainBody
self._terrain_map = self._terrain.sensedHeightMapSquare()
# pd.DataFrame(self._terrain_map).to_csv('terrain_1.csv')
self._terrain_map = np.ascontiguousarray(np.rot90(np.transpose(np.rot90(self._terrain_map)),3))
# pd.DataFrame(self._terrain_map).to_csv('terrain_2.csv')
# Construct robot class:
self._robot = spirit.Spirit(pybullet_client=p,
sim_conf=get_sim_conf(),
motor_control_mode=MotorControlMode.HYBRID)
if self._show_gui and not self._use_real_robot:
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1)
# self._clock = self._robot.time_since_reset()
def _setup_planner_and_controller(self):
self._gait_pattern = lp_python_interface.GaitPattern()
self._gait_pattern.phase_offset = [0.0, 0.5, 0.5, 0.0]
self._gait_pattern.duty_cycle = [0.3, 0.3, 0.3, 0.3]
self._global_planner = GlobalPlanner(pybullet_client=self.pybullet_client,
robot=self._robot,
terrain_map=self._terrain_map)
self._global_planner.set_goal_point(self._goal)
self._local_planner = LocalPlanner(pybullet_client=self.pybullet_client,
robot=self._robot,
sim_conf=get_sim_conf(),
terrain_map=self._terrain_map,
origin=[0., 0.],
gait_pattern=self._gait_pattern)
self._leg_controller = LegController(self._robot)
def reset_robot(self):
self._robot.reset()
self._robot.stand_up()
self._robot.change_controller_param([10, 10, 10], [1, 1, 1])
if self._show_gui and not self._use_real_robot:
self.pybullet_client.configureDebugVisualizer(
self.pybullet_client.COV_ENABLE_RENDERING, 1)
def reset_planners(self):
# Resetting other components
self._reset_time = self._robot.time_since_reset # 0.0
self._global_plan_reset_time = self._reset_time
self._local_plan_reset_time = self._reset_time
self._global_planner.reset(self._reset_time)
self._local_planner.reset(self._reset_time)
self._leg_controller.reset(self._reset_time)
@property
def time_since_global_planner_update(self):
return self._time_since_global_plan_update
@property
def time_since_local_planner_update(self):
return self._time_since_local_plan_update
def update(self):
self._time_since_reset = self._robot.time_since_reset - self._reset_time
self._time_since_global_plan_update = self._robot.time_since_reset - self._global_plan_reset_time
self._time_since_local_plan_update = self._robot.time_since_reset - self._local_plan_reset_time
def get_action(self):
"""Returns the control ouputs (e.g. positions/torques) for all motors."""
self._leg_controller.receive_local_plan(self._local_plan)
action = self._leg_controller.get_action(self._time_since_reset)
return action
# def get_action(self):
# """Returns the control ouputs (e.g. positions/torques) for all motors."""
# swing_action = self._swing_controller.get_action()
# stance_action, qp_sol = self._stance_controller.get_action()
# actions = []
# for joint_id in range(self._robot.num_motors):
# if joint_id in swing_action:
# actions.append(swing_action[joint_id])
# else:
# assert joint_id in stance_action
# actions.append(stance_action[joint_id])
# vectorized_action = MotorCommand(
# desired_position=[action.desired_position for action in actions],
# kp=[action.kp for action in actions],
# desired_velocity=[action.desired_velocity for action in actions],
# kd=[action.kd for action in actions],
# desired_extra_torque=[
# action.desired_extra_torque for action in actions
# ])
# return vectorized_action, dict(qp_sol=qp_sol)
# def _handle_mode_switch(self):
# if self._mode == self._desired_mode:
# return
# self._mode = self._desired_mode
# if self._desired_mode == ControllerMode.DOWN:
# logging.info("Entering joint damping mode.")
# self._flush_logging()
# elif self._desired_mode == ControllerMode.STAND:
# logging.info("Standing up.")
# self.reset_robot()
# else:
# logging.info("Walking.")
# self.reset_controllers()
# self._start_logging()
def run(self):
logging.info("main thread started...")
time.sleep(0.1)
self._global_plan = self._global_planner.update()
self._global_plan_reset_time = self._robot.time_since_reset
self._local_planner.set_global_plan(self._global_plan)
time.sleep(1000)
while True:
self.update() # update time
if self._time_since_local_plan_update % self._node_interval == 0:
self._local_plan = self._local_planner.update()
self._local_plan_reset_time = self._robot.time_since_reset
action = self.get_action()
self._robot.step(action)
self._update_logging(action)
time.sleep(0.001) # nothing to do with simulation step
# Camera setup:
if self._show_gui:
self.pybullet_client.resetDebugVisualizerCamera(
cameraDistance=1.0,
cameraYaw=30 +
self._robot.base_orientation_rpy[2] / np.pi * 180,
cameraPitch=-30,
cameraTargetPosition=self._robot.base_position,
)
@property
def is_safe(self):
rot_mat = np.array(
self._robot.pybullet_client.getMatrixFromQuaternion(
self._state_estimator.com_orientation_quat_ground_frame)
).reshape((3, 3))
up_vec = rot_mat[2, 2]
base_height = self._robot.base_position[2]
return up_vec > 0.85 and base_height > 0.18
def _start_logging(self):
self._logs = []
def _update_logging(self, action):
frame = dict(
desired_speed=(self._swing_controller.desired_speed,
self._swing_controller.desired_twisting_speed),
timestamp=self._time_since_reset,
base_rpy=self._robot.base_orientation_rpy,
motor_angles=self._robot.motor_angles,
base_vel=self._robot.motor_velocities,
#base_vels_body_frame=self._state_estimator.com_velocity_body_frame,
base_rpy_rate=self._robot.base_rpy_rate,
motor_vels=self._robot.motor_velocities,
motor_torques=self._robot.motor_torques,
#contacts=self._robot.foot_contacts,
#desired_grf=qp_sol,
robot_action=action,
#gait_generator_phase=self._gait_generator.current_phase.copy(),
#gait_generator_state=self._gait_generator.leg_state,
#ground_orientation=self._state_estimator.
#ground_orientation_world_frame,
)
self._logs.append(frame)
def _flush_logging(self):
if not os.path.exists(self._logdir):
os.makedirs(self._logdir)
filename = 'log_{}.pkl'.format(
datetime.now().strftime('%Y_%m_%d_%H_%M_%S'))
pickle.dump(self._logs, open(os.path.join(self._logdir, filename),
'wb'))
logging.info("Data logged to: {}".format(
os.path.join(self._logdir, filename)))
|
AWSBucketDump.py | #!/usr/bin/env python
# AWSBucketDump is a tool to quickly enumerate AWS S3 buckets to look for loot.
# It's similar to a subdomain bruteforcer but is made specifically to S3
# buckets and also has some extra features that allow you to grep for
# delicous files as well as download interesting files if you're not
# afraid to quickly fill up your hard drive.
# by Jordan Potti
# @ok_bye_now
from argparse import ArgumentParser
import codecs
import requests
import xmltodict
import sys
import os
import shutil
import traceback
from queue import Queue
from threading import Thread, Lock
bucket_q = Queue()
download_q = Queue()
grep_list = None
arguments = None
def fetch(url):
print('Fetching ' + url + '...')
response = requests.get(url)
if response.status_code == 403 or response.status_code == 404:
status403(url)
if response.status_code == 200:
if "Content" in response.text:
returnedList=status200(response,grep_list,url)
def bucket_worker():
while True:
item = bucket_q.get()
try:
fetch(item)
except Exception as e:
traceback.print_exc(file=sys.stdout)
print(e)
bucket_q.task_done()
def downloadWorker():
print('Download worker running...')
while True:
item = download_q.get()
try:
downloadFile(item)
except Exception as e:
traceback.print_exc(file=sys.stdout)
print(e)
download_q.task_done()
directory_lock = Lock()
def get_directory_lock():
directory_lock.acquire()
def release_directory_lock():
directory_lock.release()
def get_make_directory_return_filename_path(url):
global arguments
bits = url.split('/')
directory = arguments.savedir
for i in range(2,len(bits)-1):
directory = os.path.join(directory, bits[i])
try:
get_directory_lock()
if not os.path.isdir(directory):
os.makedirs(directory)
except Exception as e:
traceback.print_exc(file=sys.stdout)
print(e)
finally:
release_directory_lock()
return os.path.join(directory, bits[-1]).rstrip()
interesting_file_lock = Lock()
def get_interesting_file_lock():
interesting_file_lock.acquire()
def release_interesting_file_lock():
interesting_file_lock.release()
def write_interesting_file(filepath):
try:
get_interesting_file_lock()
with open('interesting_file.txt', 'ab+') as interesting_file:
interesting_file.write(filepath.encode('utf-8'))
interesting_file.write('\n'.encode('utf-8'))
finally:
release_interesting_file_lock()
#def downloadFile(filename):
# global arguments
# print('Downloading {}'.format(filename) + '...')
# local_path = get_make_directory_return_filename_path(filename)
# local_filename = (filename.split('/')[-1]).rstrip()
# print('local {}'.format(local_path))
# if local_filename =="":
# print("Directory..\n")
# else:
# r = requests.get(filename.rstrip(), stream=True)
# if 'Content-Length' in r.headers:
# if int(r.headers['Content-Length']) > arguments.maxsize:
# print("This file is greater than the specified max size... skipping...\n")
# else:
# with open(local_path, 'wb') as f:
# shutil.copyfileobj(r.raw, f)
# r.close()
def print_banner():
print('''\nDescription:
AWSBucketDump is a tool to quickly enumerate AWS S3 buckets to look for loot.
It's similar to a subdomain bruteforcer but is made specifically to S3
buckets and also has some extra features that allow you to grep for
delicous files as well as download interesting files if you're not
afraid to quickly fill up your hard drive.
by Jordan Potti
@ok_bye_now'''
)
def cleanUp():
print("Cleaning up files...")
def status403(line):
print(line.rstrip() + " is not accessible.")
def queue_up_download(filepath):
download_q.put(filepath)
print('Collectable: {}'.format(filepath))
write_interesting_file(filepath)
def status200(response,grep_list,line):
print("Pilfering "+line.rstrip() + '...')
objects=xmltodict.parse(response.text)
Keys = []
interest=[]
try:
for child in objects['ListBucketResult']['Contents']:
Keys.append(child['Key'])
except:
pass
hit = False
for words in Keys:
words = (str(words)).rstrip()
collectable = line+'/'+words
if grep_list != None and len(grep_list) > 0:
for grep_line in grep_list:
grep_line = (str(grep_line)).rstrip()
if grep_line in words:
queue_up_download(collectable)
break
else:
queue_up_download(collectable)
def main():
global arguments
global grep_list
parser = ArgumentParser()
parser.add_argument("-D", dest="download", required=False, action="store_true", default=False, help="Download files. This requires significant disk space.")
parser.add_argument("-d", dest="savedir", required=False, default='', help="If -D, then -d 1 to create save directories for each bucket with results.")
parser.add_argument("-l", dest="hostlist", required=True, help="")
parser.add_argument("-g", dest="grepwords", required=False, help="Provide a wordlist to grep for.")
parser.add_argument("-m", dest="maxsize", type=int, required=False, default=1024, help="Maximum file size to download.")
parser.add_argument("-t", dest="threads", type=int, required=False, default=1, help="Number of threads.")
if len(sys.argv) == 1:
print_banner()
parser.error("No arguments given.")
parser.print_usage
sys.exit()
# output parsed arguments into a usable object
arguments = parser.parse_args()
# specify primary variables
# with open(arguments.grepwords, "r") as grep_file:
# grep_content = grep_file.readlines()
# grep_list = [ g.strip() for g in grep_content ]
if arguments.download and arguments.savedir:
print("Downloads enabled (-D), save directories (-d) for each host will be created/used.")
elif arguments.download and not arguments.savedir:
print("Downloads enabled (-D), will be saved to current directory.")
else:
print("Downloads were not enabled (-D), not saving results locally.")
# start up bucket workers
for i in range(0,arguments.threads):
print('Starting thread...')
t = Thread(target=bucket_worker)
t.daemon = True
t.start()
# start download workers
for i in range(1, arguments.threads):
t = Thread(target=downloadWorker)
t.daemon = True
t.start()
with open(arguments.hostlist) as f:
for line in f:
bucket = 'http://'+line.rstrip()+'.s3.amazonaws.com'
print('Queuing {}'.format(bucket) + '...')
bucket_q.put(bucket)
bucket_q.join()
if arguments.download:
download_q.join()
cleanUp()
if __name__ == "__main__":
main()
|
test_parser.py | #!/usr/bin/env python
import re
import random
import string
import threading
from tests import unittest, OrderedDict
from jmespath import parser
from jmespath import visitor
from jmespath import ast
from jmespath import exceptions
class TestParser(unittest.TestCase):
def setUp(self):
self.parser = parser.Parser()
def assert_parsed_ast(self, expression, expected_ast):
parsed = self.parser.parse(expression)
self.assertEqual(parsed.parsed, expected_ast)
def test_parse_empty_string_raises_exception(self):
with self.assertRaises(exceptions.EmptyExpressionError):
self.parser.parse('')
def test_field(self):
self.assert_parsed_ast('foo', ast.field('foo'))
def test_dot_syntax(self):
self.assert_parsed_ast('foo.bar',
ast.subexpression([ast.field('foo'),
ast.field('bar')]))
def test_multiple_dots(self):
parsed = self.parser.parse('foo.bar.baz')
self.assertEqual(
parsed.search({'foo': {'bar': {'baz': 'correct'}}}), 'correct')
def test_index(self):
parsed = self.parser.parse('foo[1]')
self.assertEqual(
parsed.search({'foo': ['zero', 'one', 'two']}),
'one')
def test_quoted_subexpression(self):
self.assert_parsed_ast('"foo"."bar"',
ast.subexpression([
ast.field('foo'),
ast.field('bar')]))
def test_wildcard(self):
parsed = self.parser.parse('foo[*]')
self.assertEqual(
parsed.search({'foo': ['zero', 'one', 'two']}),
['zero', 'one', 'two'])
def test_wildcard_with_children(self):
parsed = self.parser.parse('foo[*].bar')
self.assertEqual(
parsed.search({'foo': [{'bar': 'one'}, {'bar': 'two'}]}),
['one', 'two'])
def test_or_expression(self):
parsed = self.parser.parse('foo || bar')
self.assertEqual(parsed.search({'foo': 'foo'}), 'foo')
self.assertEqual(parsed.search({'bar': 'bar'}), 'bar')
self.assertEqual(parsed.search({'foo': 'foo', 'bar': 'bar'}), 'foo')
self.assertEqual(parsed.search({'bad': 'bad'}), None)
def test_complex_or_expression(self):
parsed = self.parser.parse('foo.foo || foo.bar')
self.assertEqual(parsed.search({'foo': {'foo': 'foo'}}), 'foo')
self.assertEqual(parsed.search({'foo': {'bar': 'bar'}}), 'bar')
self.assertEqual(parsed.search({'foo': {'baz': 'baz'}}), None)
def test_or_repr(self):
self.assert_parsed_ast('foo || bar', ast.or_expression(ast.field('foo'),
ast.field('bar')))
def test_unicode_literals_escaped(self):
self.assert_parsed_ast(r'`"\u2713"`', ast.literal(u'\u2713'))
def test_multiselect(self):
parsed = self.parser.parse('foo.{bar: bar,baz: baz}')
self.assertEqual(
parsed.search({'foo': {'bar': 'bar', 'baz': 'baz', 'qux': 'qux'}}),
{'bar': 'bar', 'baz': 'baz'})
def test_multiselect_subexpressions(self):
parsed = self.parser.parse('foo.{"bar.baz": bar.baz, qux: qux}')
self.assertEqual(
parsed.search({'foo': {'bar': {'baz': 'CORRECT'}, 'qux': 'qux'}}),
{'bar.baz': 'CORRECT', 'qux': 'qux'})
def test_multiselect_with_all_quoted_keys(self):
parsed = self.parser.parse('foo.{"bar": bar.baz, "qux": qux}')
result = parsed.search({'foo': {'bar': {'baz': 'CORRECT'}, 'qux': 'qux'}})
self.assertEqual(result, {"bar": "CORRECT", "qux": "qux"})
def test_multiselect_with_key_and_star(self):
parsed = self.parser.parse('foo.{"foo1": "foo", "bar1": bar.baz, *}')
result = parsed.search({'foo': {'foo': 'foo', 'foo1': 'foo1', 'bar': {'baz': 'CORRECT'}, 'qux': 'qux', 'uxp': {'uxp': 'aaa'}}})
self.assertEqual(result, {'bar': {'baz': 'CORRECT'}, 'bar1': 'CORRECT', 'foo1': 'foo', 'qux': 'qux', 'uxp': {'uxp': 'aaa'}})
def test_multiselect_with_only_star(self):
parsed = self.parser.parse('foo.{*}')
result = parsed.search({'foo': {'bar': {'baz': 'CORRECT'}, 'qux': 'qux'}})
self.assertEqual(result, {'bar': {'baz': 'CORRECT'}, 'qux': 'qux'})
def test_function_call_with_and_statement(self):
self.assert_parsed_ast(
'f(@ && @)',
{'children': [{'children': [{'children': [], 'type': 'current'},
{'children': [], 'type': 'current'}],
'type': 'and_expression'}],
'type': 'function_expression',
'value': 'f'})
class TestErrorMessages(unittest.TestCase):
def setUp(self):
self.parser = parser.Parser()
def assert_error_message(self, expression, error_message,
exception=exceptions.ParseError):
try:
self.parser.parse(expression)
except exception as e:
self.assertEqual(error_message, str(e))
return
except Exception as e:
self.fail(
"Unexpected error raised (%s: %s) for bad expression: %s" %
(e.__class__.__name__, e, expression))
else:
self.fail(
"ParseError not raised for bad expression: %s" % expression)
def test_bad_parse(self):
with self.assertRaises(exceptions.ParseError):
self.parser.parse('foo]baz')
def test_bad_parse_error_message(self):
error_message = (
'Unexpected token: ]: Parse error at column 3, '
'token "]" (RBRACKET), for expression:\n'
'"foo]baz"\n'
' ^')
self.assert_error_message('foo]baz', error_message)
def test_bad_parse_error_message_with_multiselect(self):
error_message = (
'Invalid jmespath expression: Incomplete expression:\n'
'"foo.{bar: baz,bar: bar"\n'
' ^')
self.assert_error_message('foo.{bar: baz,bar: bar', error_message)
def test_incomplete_expression_with_missing_paren(self):
error_message = (
'Invalid jmespath expression: Incomplete expression:\n'
'"length(@,"\n'
' ^')
self.assert_error_message('length(@,', error_message)
def test_bad_lexer_values(self):
error_message = (
'Bad jmespath expression: '
'Unclosed " delimiter:\n'
'foo."bar\n'
' ^')
self.assert_error_message('foo."bar', error_message,
exception=exceptions.LexerError)
def test_bad_unicode_string(self):
# This error message is straight from the JSON parser
# and pypy has a slightly different error message,
# so we're not using assert_error_message.
error_message = re.compile(
r'Bad jmespath expression: '
r'Invalid \\uXXXX escape.*\\uAZ12', re.DOTALL)
with self.assertRaisesRegexp(exceptions.LexerError, error_message):
self.parser.parse(r'"\uAZ12"')
class TestParserWildcards(unittest.TestCase):
def setUp(self):
self.parser = parser.Parser()
self.data = {
'foo': [
{'bar': [{'baz': 'one'}, {'baz': 'two'}]},
{'bar': [{'baz': 'three'}, {'baz': 'four'}, {'baz': 'five'}]},
]
}
def test_multiple_index_wildcards(self):
parsed = self.parser.parse('foo[*].bar[*].baz')
self.assertEqual(parsed.search(self.data),
[['one', 'two'], ['three', 'four', 'five']])
def test_wildcard_mix_with_indices(self):
parsed = self.parser.parse('foo[*].bar[0].baz')
self.assertEqual(parsed.search(self.data),
['one', 'three'])
def test_wildcard_mix_last(self):
parsed = self.parser.parse('foo[0].bar[*].baz')
self.assertEqual(parsed.search(self.data),
['one', 'two'])
def test_indices_out_of_bounds(self):
parsed = self.parser.parse('foo[*].bar[2].baz')
self.assertEqual(parsed.search(self.data),
['five'])
def test_root_indices(self):
parsed = self.parser.parse('[0]')
self.assertEqual(parsed.search(['one', 'two']), 'one')
def test_root_wildcard(self):
parsed = self.parser.parse('*.foo')
data = {'top1': {'foo': 'bar'}, 'top2': {'foo': 'baz'},
'top3': {'notfoo': 'notfoo'}}
# Sorted is being used because the order of the keys are not
# required to be in any specific order.
self.assertEqual(sorted(parsed.search(data)), sorted(['bar', 'baz']))
self.assertEqual(sorted(self.parser.parse('*.notfoo').search(data)),
sorted(['notfoo']))
def test_only_wildcard(self):
parsed = self.parser.parse('*')
data = {'foo': 'a', 'bar': 'b', 'baz': 'c'}
self.assertEqual(sorted(parsed.search(data)), sorted(['a', 'b', 'c']))
def test_escape_sequences(self):
self.assertEqual(self.parser.parse(r'"foo\tbar"').search(
{'foo\tbar': 'baz'}), 'baz')
self.assertEqual(self.parser.parse(r'"foo\nbar"').search(
{'foo\nbar': 'baz'}), 'baz')
self.assertEqual(self.parser.parse(r'"foo\bbar"').search(
{'foo\bbar': 'baz'}), 'baz')
self.assertEqual(self.parser.parse(r'"foo\fbar"').search(
{'foo\fbar': 'baz'}), 'baz')
self.assertEqual(self.parser.parse(r'"foo\rbar"').search(
{'foo\rbar': 'baz'}), 'baz')
def test_consecutive_escape_sequences(self):
parsed = self.parser.parse(r'"foo\\nbar"')
self.assertEqual(parsed.search({'foo\\nbar': 'baz'}), 'baz')
parsed = self.parser.parse(r'"foo\n\t\rbar"')
self.assertEqual(parsed.search({'foo\n\t\rbar': 'baz'}), 'baz')
def test_escape_sequence_at_end_of_string_not_allowed(self):
with self.assertRaises(ValueError):
self.parser.parse('foobar\\')
def test_wildcard_with_multiselect(self):
parsed = self.parser.parse('foo.*.{a: a, b: b}')
data = {
'foo': {
'one': {
'a': {'c': 'CORRECT', 'd': 'other'},
'b': {'c': 'ALSOCORRECT', 'd': 'other'},
},
'two': {
'a': {'c': 'CORRECT', 'd': 'other'},
'c': {'c': 'WRONG', 'd': 'other'},
},
}
}
match = parsed.search(data)
self.assertEqual(len(match), 2)
self.assertIn('a', match[0])
self.assertIn('b', match[0])
self.assertIn('a', match[1])
self.assertIn('b', match[1])
class TestMergedLists(unittest.TestCase):
def setUp(self):
self.parser = parser.Parser()
self.data = {
"foo": [
[["one", "two"], ["three", "four"]],
[["five", "six"], ["seven", "eight"]],
[["nine"], ["ten"]]
]
}
def test_merge_with_indices(self):
parsed = self.parser.parse('foo[][0]')
match = parsed.search(self.data)
self.assertEqual(match, ["one", "three", "five", "seven",
"nine", "ten"])
def test_trailing_merged_operator(self):
parsed = self.parser.parse('foo[]')
match = parsed.search(self.data)
self.assertEqual(
match,
[["one", "two"], ["three", "four"],
["five", "six"], ["seven", "eight"],
["nine"], ["ten"]])
class TestParserCaching(unittest.TestCase):
def test_compile_lots_of_expressions(self):
# We have to be careful here because this is an implementation detail
# that should be abstracted from the user, but we need to make sure we
# exercise the code and that it doesn't blow up.
p = parser.Parser()
compiled = []
compiled2 = []
for i in range(parser.Parser._MAX_SIZE + 1):
compiled.append(p.parse('foo%s' % i))
# Rerun the test and half of these entries should be from the
# cache but they should still be equal to compiled.
for i in range(parser.Parser._MAX_SIZE + 1):
compiled2.append(p.parse('foo%s' % i))
self.assertEqual(len(compiled), len(compiled2))
self.assertEqual(
[expr.parsed for expr in compiled],
[expr.parsed for expr in compiled2])
def test_cache_purge(self):
p = parser.Parser()
first = p.parse('foo')
cached = p.parse('foo')
p.purge()
second = p.parse('foo')
self.assertEqual(first.parsed,
second.parsed)
self.assertEqual(first.parsed,
cached.parsed)
def test_thread_safety_of_cache(self):
errors = []
expressions = [
''.join(random.choice(string.ascii_letters) for _ in range(3))
for _ in range(2000)
]
def worker():
p = parser.Parser()
for expression in expressions:
try:
p.parse(expression)
except Exception as e:
errors.append(e)
threads = []
for i in range(10):
threads.append(threading.Thread(target=worker))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertEqual(errors, [])
class TestParserAddsExpressionAttribute(unittest.TestCase):
def test_expression_available_from_parser(self):
p = parser.Parser()
parsed = p.parse('foo.bar')
self.assertEqual(parsed.expression, 'foo.bar')
class TestParsedResultAddsOptions(unittest.TestCase):
def test_can_have_ordered_dict(self):
p = parser.Parser()
parsed = p.parse('{a: a, b: b, c: c}')
options = visitor.Options(dict_cls=OrderedDict)
result = parsed.search(
{"c": "c", "b": "b", "a": "a"}, options=options)
# The order should be 'a', 'b' because we're using an
# OrderedDict
self.assertEqual(list(result), ['a', 'b', 'c'])
class TestRenderGraphvizFile(unittest.TestCase):
def test_dot_file_rendered(self):
p = parser.Parser()
result = p.parse('foo')
dot_contents = result._render_dot_file()
self.assertEqual(dot_contents,
'digraph AST {\nfield1 [label="field(foo)"]\n}')
def test_dot_file_subexpr(self):
p = parser.Parser()
result = p.parse('foo.bar')
dot_contents = result._render_dot_file()
self.assertEqual(
dot_contents,
'digraph AST {\n'
'subexpression1 [label="subexpression()"]\n'
' subexpression1 -> field2\n'
'field2 [label="field(foo)"]\n'
' subexpression1 -> field3\n'
'field3 [label="field(bar)"]\n}')
if __name__ == '__main__':
unittest.main()
|
Opa.py |
import threading
import os, time
import json
import traceback
from base.urlop import UrlRequestOp
from core import login, course
from handler.struct.user import UserData
from handler import Pool
from handler.Error import LoginBasicException, UrlOpTimeOutError, PullerBasicException
import gui
import llogger
NOTICE_URL = 'http://jxfw.gdut.edu.cn/notice!getNotice.action?_=%d'
def get_cdatetime():
return time.asctime(time.localtime())
class UserOp(UrlRequestOp, object):
def __init__(self, parent, account, password, keys):
UrlRequestOp.__init__(self)
self.parent = parent
self.user = UserData(account, password, keys, self)
self.loginer = login.Login(self.user)
self.taker = course.Taker(self.user)
self._thread = None
self._get_notice_thread = None
self.assign_thread_lock = threading.Lock()
def login(self):
with self.assign_thread_lock:
if self._thread and self._thread.isAlive() and self._thread != threading.current_thread():
return
if self.user.status == Pool.TAKING or self.user.status == Pool.TIMING_TAKE:
return
self._thread = threading.current_thread()
self.reInit()
# print(self._thread)
self.loginer.run()
def takeCourse(self):
with self.assign_thread_lock:
if self._thread and self._thread.isAlive() and self._thread != threading.current_thread():
return
if self.user.status == Pool.DONE:
return
self._thread = threading.current_thread()
# print(self._thread)
if self.user.ready:
self.taker.run()
else:
if self.getStatus() == Pool.DONE:
return
self.reInit()
self.login()
self.join()
if self.getStatus() != Pool.FAILURE:
self.takeCourse()
def saveCookie(self):
self.loginer.cookiejar.save('cookies/%s.txt' % self.user.account, ignore_discard=True, ignore_expires=True)
def loadCookie(self):
if os.path.exists('cookies/%s.txt' % self.user.account):
self.setNewCookieJar()
self.cookiejar.load('cookies/%s.txt' % self.user.account, ignore_discard=True, ignore_expires=True)
self.buildOpener(self.cookiejar, self.proxy)
self.loginer.loadUrlop(self)
self.getReady()
llogger.ok(self.user.account, 'ๅ ่ฝฝCookieๆๅใ[ๆช้ช่ฏ]')
return True
else:
return False
def verify(self):
self.user.status = Pool.VERIFYING
self.taker.puller.pullSelected()
targets = self.taker.getTargets()
if targets:
tar_str = ''
for i in targets:
tar_str += '[%s]' % i.__str__()
llogger.error(self.user.account, 'ไปฅไธ็ฎๆ ๆชๆๅ: {%s}' % tar_str)
self.fail()
else:
self.done()
def getStatus(self):
return self.user.status
def onTimingtake(self):
self.user.status = Pool.TIMING_TAKE
def fail(self):
self.user.status = Pool.FAILURE
self.cancelGetNotice()
llogger.error(self.user.account, 'ไปปๅกๅคฑ่ดฅ๏ผๅๆญขๅทฅไฝใๆๅ้ๅ[%d]้กน' % self.user.success_counter)
def done(self):
self.user.status = Pool.DONE
self.cancelGetNotice()
llogger.ok(self.user.account, 'ๅฎๆไปปๅก๏ผๅๆญขๅทฅไฝใๆๅ้ๅ[%d]้กน' % self.user.success_counter)
def getReady(self):
self.user.status = Pool.READY
self.user.ready = True
self.saveCookie()
self.loadUrlop(self.loginer)
self.taker.loadUrlop(self.loginer)
self.timingGetNotice()
def reInit(self):
self.user.status = Pool.UNREADY
self.user.ready = False
self.cancelGetNotice()
def join(self, exec_foo=None, args=()):
if exec_foo:
threading.Thread(target=self.__join__, args=(exec_foo, args)).start()
else:
self.__join__(exec_foo, args)
def __join__(self, exec_foo=None, args=()):
while True:
with self.assign_thread_lock:
if not self._thread or not self._thread.isAlive() or self._thread == threading.current_thread():
break
time.sleep(0.01)
if exec_foo:
exec_foo(*args)
def timingGetNotice(self):
if not self._get_notice_thread or not self._get_notice_thread.isAlive():
llogger.normal(self.user.account, '[%ds]ๅๆๅ้็ฅใ' % self.user.timer_refresh)
self._get_notice_thread = threading.Timer(self.user.timer_refresh, self.getNotice, args=(False,))
self._get_notice_thread.start()
def cancelGetNotice(self):
while True:
if self._get_notice_thread:
self._get_notice_thread.cancel()
if not self._get_notice_thread or not self._get_notice_thread.isAlive() \
or self._get_notice_thread == threading.current_thread():
break
else:
break
time.sleep(0.01)
def getNotice(self, once=True):
try:
res = self.__getNotice__()
llogger.ok(self.user.account, 'ๆๅ้็ฅๆๅใ', res)
self.user.status = Pool.READY
except LoginBasicException as e:
traceback.print_exc()
except PullerBasicException as e:
traceback.print_exc()
except UrlOpTimeOutError as e:
traceback.print_exc()
if not once:
# self.cancelGetNotice()
self._get_notice_thread = None
self.timingGetNotice()
else:
if not once:
# self.cancelGetNotice()
self._get_notice_thread = None
self.timingGetNotice()
def __getNotice__(self):
raw, res = self.request(branch_num=0, method='GET',
url=NOTICE_URL % (time.time() * 1000), max_retry=3)
text = bytes.decode(raw) if isinstance(raw, bytes) else raw
try:
res_json = json.loads(text)
except json.JSONDecodeError as e:
llogger.warning(self.user.account, 'ๆๅ้็ฅๅคฑ่ดฅใ', text)
raise PullerBasicException(self.user, text, e)
return res_json
def set_MemberView_Item(self):
cur_status = self.getStatus()
index = self.parent.getUserIndex(self.user)
gui.frame_main.listctrl_member.SetItem(index, 2, Pool.status2str(cur_status))
gui.frame_main.listctrl_member.SetItem(index, 3, 'โ' if cur_status == Pool.READY else 'ร')
gui.frame_main.listctrl_member.SetItem(index, 6, get_cdatetime()) |
test_set_jy.py | import unittest
from test import test_support, test_set
import pickle
import threading
from java.io import (ByteArrayInputStream, ByteArrayOutputStream,
ObjectInputStream, ObjectOutputStream)
from java.util import Random, HashSet, LinkedHashSet
from javatests import PySetInJavaTest
class SetTestCase(unittest.TestCase):
def test_binops(self):
class Foo(object):
__rsub__ = lambda self, other: 'rsub'
__ror__ = lambda self, other: 'ror'
__rand__ = lambda self, other: 'rand'
__rxor__ = lambda self, other: 'rxor'
foo = Foo()
s = set()
self.assertEqual(s - foo, 'rsub')
self.assertEqual(s | foo, 'ror')
self.assertEqual(s & foo, 'rand')
self.assertEqual(s ^ foo, 'rxor')
def test_pop_race(self):
# issue 1854
nthreads = 200
# the race might not happen the first time so we try a few just in case
for i in xrange(4):
s = set(range(200))
threads = [threading.Thread(target=s.pop) for i in range(nthreads)]
for t in threads: t.start()
for t in threads: t.join()
self.assertEqual(len(s), 0)
def test_big_set(self):
"""Verify that fairly large collection literals of primitives can be constructed."""
# use \n to separate to avoid parser problems
s = eval("{" + ",\n".join((str(x) for x in xrange(64000))) +"}")
self.assertEqual(len(s), 64000)
self.assertEqual(sum(s), 2047968000)
class SetInJavaTestCase(unittest.TestCase):
"""Tests for derived dict behaviour"""
def test_using_PySet_as_Java_Set(self):
PySetInJavaTest.testPySetAsJavaSet()
def test_accessing_items_added_in_java(self):
s = PySetInJavaTest.createPySetContainingJavaObjects()
for v in s:
self.assert_(v in s)
if isinstance(v, unicode):
self.assertEquals("value", v)
else:
# Should be a java.util.Random; ensure we can call it
v.nextInt()
def test_java_accessing_items_added_in_python(self):
# Test a type that should be coerced into a Java type, a Java
# instance that should be wrapped, and a Python instance that
# should pass through as itself with str, Random and tuple
# respectively.
s = set(["value", Random(), ("tuple", "of", "stuff")])
PySetInJavaTest.accessAndRemovePySetItems(s)
# Check that the Java removal affected the underlying set
self.assertEquals(0, len(s))
def test_serialization(self):
s = set(range(5, 10))
output = ByteArrayOutputStream()
serializer = ObjectOutputStream(output)
serializer.writeObject(s)
serializer.close()
input = ByteArrayInputStream(output.toByteArray())
unserializer = ObjectInputStream(input)
self.assertEqual(s, unserializer.readObject())
class TestJavaSet(test_set.TestSet):
thetype = HashSet
def test_init(self):
# Instances of Java types cannot be re-initialized
pass
def test_cyclical_repr(self):
pass
def test_cyclical_print(self):
pass
def test_pickling(self):
for i in range(pickle.HIGHEST_PROTOCOL + 1):
p = pickle.dumps(self.s, i)
dup = pickle.loads(p)
self.assertEqual(self.s, dup, "%s != %s" % (self.s, dup))
class TestJavaHashSet(TestJavaSet):
thetype = HashSet
class TestJavaLinkedHashSet(TestJavaSet):
thetype = LinkedHashSet
def test_main():
tests = [
SetTestCase,
SetInJavaTestCase,
TestJavaHashSet,
TestJavaLinkedHashSet,
]
test_support.run_unittest(*tests)
if __name__ == '__main__':
test_main()
|
server_runner.py | import logging
import threading
import time
import array
import mlperf_loadgen as lg
import numpy as np
from ..constants import QUERY_COUNT, NANO_SEC, MILLI_SEC
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class ServerRunner():
def __init__(self, session, ds, optimization_config, onnx_output_names):
self.session = session
self.threads = optimization_config.threads_num
self.max_batchsize = optimization_config.dynamic_batching_size
self.ds = ds
self.onnx_output_names = onnx_output_names
self.guess = None
self.cv = threading.Condition()
self.done = False
self.q_idx = []
self.q_query_id = []
self.workers = []
self.settings = lg.TestSettings()
self.settings.scenario = lg.TestScenario.Server
self.settings.mode = lg.TestMode.FindPeakPerformance
log_output_settings = lg.LogOutputSettings()
log_output_settings.outdir = optimization_config.result_path
log_output_settings.copy_summary_to_stdout = False
self.log_settings = lg.LogSettings()
self.log_settings.enable_trace = False
self.log_settings.log_output = log_output_settings
self.sut = lg.ConstructSUT(self.issue_queries, self.flush_queries, self.process_latencies)
self.qsl = lg.ConstructQSL(QUERY_COUNT, QUERY_COUNT, ds.load_query_samples, ds.unload_query_samples)
self.settings.server_coalesce_queries = True
self.settings.server_target_latency_ns = int(optimization_config.max_latency_ms * NANO_SEC / MILLI_SEC)
self.settings.server_target_latency_percentile = optimization_config.max_latency_percentile
self.settings.min_duration_ms = optimization_config.min_duration_sec * MILLI_SEC
# start all threads
for _ in range(self.threads):
worker = threading.Thread(target=self.handle_tasks, args=(self.cv,))
worker.daemon = True
self.workers.append(worker)
worker.start()
time.sleep(1)
def issue_queries(self, query_samples):
self.enqueue(query_samples)
def flush_queries(self):
pass
def process_latencies(self, latencies_ms):
pass
def handle_tasks(self, cv):
"""Worker thread."""
max_batchsize = self.max_batchsize
stats = [0] * (max_batchsize + 1)
while True:
with cv:
# wait for something to do
while len(self.q_idx) == 0 and not self.done:
cv.wait()
idx = self.q_idx
query_id = self.q_query_id
if len(idx) > max_batchsize:
# only take max_batchsize
self.q_idx = idx[max_batchsize:]
self.q_query_id = query_id[max_batchsize:]
idx = idx[:max_batchsize]
query_id = query_id[:max_batchsize]
# wake up somebody to take care of it
cv.notify()
else:
# swap the entire queue
self.q_idx = []
self.q_query_id = []
if self.done:
# parent wants us to exit
break
# run inference, lock is released
feed = self.ds.make_batch(idx)
self.run_one_item((query_id, idx, feed))
# count stats
stats[len(idx)] += 1
def run_one_item(self, qitem):
# run the prediction
processed_results = []
query_id, content_id, feed = qitem
results = self.session.run(self.onnx_output_names, feed)
processed_results = [[]] * len(query_id)
response_array_refs = []
response = []
for idx, qid in enumerate(query_id):
response_array = array.array("B", np.array(processed_results[idx], np.float32).tobytes())
response_array_refs.append(response_array)
bi = response_array.buffer_info()
response.append(lg.QuerySampleResponse(qid, bi[0], bi[1]))
lg.QuerySamplesComplete(response)
def enqueue(self, query_samples):
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
with self.cv:
scheduled = len(self.q_idx)
# add new items to the queue
self.q_idx.extend(idx)
self.q_query_id.extend(query_id)
# notify only if queue was empty
if scheduled == 0:
self.cv.notify()
def finish(self):
# exit all threads
self.done = True
for worker in self.workers:
with self.cv:
self.cv.notify()
for worker in self.workers:
worker.join()
def start_run(self):
lg.StartTestWithLogSettings(self.sut, self.qsl, self.settings, self.log_settings)
def warmup(self, warmup_num):
self.ds.load_query_samples([0])
start = time.time()
for _ in range(warmup_num):
feed = self.ds.make_batch([0])
_ = self.session.run(self.onnx_output_names, feed)
self.guess = (time.time() - start) / warmup_num
self.settings.server_target_qps = int(1 / self.guess / 3)
self.ds.unload_query_samples(None) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.