gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
from __future__ import print_function, absolute_import
import ctypes
from ctypes import *
from ctypes.util import find_library
import gc
import subprocess
import sys
import unittest
import platform
import locale
from llvmlite import six
from llvmlite import binding as llvm
from llvmlite.binding import ffi
from . import TestCase
def no_de_locale():
cur = locale.setlocale(locale.LC_ALL)
try:
locale.setlocale(locale.LC_ALL, 'de_DE')
except locale.Error:
return True
else:
return False
finally:
locale.setlocale(locale.LC_ALL, cur)
asm_sum = r"""
; ModuleID = '<string>'
target triple = "{triple}"
@glob = global i32 0
@glob_b = global i8 0
@glob_f = global float 1.5
@glob_struct = global {{ i64, [2 x i64]}} {{i64 0, [2 x i64] [i64 0, i64 0]}}
define i32 @sum(i32 %.1, i32 %.2) {{
%.3 = add i32 %.1, %.2
%.4 = add i32 0, %.3
ret i32 %.4
}}
"""
asm_sum2 = r"""
; ModuleID = '<string>'
target triple = "{triple}"
define i32 @sum(i32 %.1, i32 %.2) {{
%.3 = add i32 %.1, %.2
ret i32 %.3
}}
"""
asm_mul = r"""
; ModuleID = '<string>'
target triple = "{triple}"
define i32 @mul(i32 %.1, i32 %.2) {{
%.3 = mul i32 %.1, %.2
ret i32 %.3
}}
"""
# `fadd` used on integer inputs
asm_parse_error = r"""
; ModuleID = '<string>'
target triple = "{triple}"
define i32 @sum(i32 %.1, i32 %.2) {{
%.3 = fadd i32 %.1, %.2
ret i32 %.3
}}
"""
# "%.bug" definition references itself
asm_verification_fail = r"""
; ModuleID = '<string>'
target triple = "{triple}"
define void @sum() {{
%.bug = add i32 1, %.bug
ret void
}}
"""
asm_sum_declare = r"""
; ModuleID = '<string>'
target triple = "{triple}"
declare i32 @sum(i32 %.1, i32 %.2)
"""
asm_double_locale = r"""
; ModuleID = '<string>'
target triple = "{triple}"
define void @foo() {{
%const = fadd double 0.0, 3.14
ret void
}}
"""
class BaseTest(TestCase):
def setUp(self):
llvm.initialize()
llvm.initialize_native_target()
llvm.initialize_native_asmprinter()
gc.collect()
self.old_garbage = gc.garbage[:]
gc.garbage[:] = []
def tearDown(self):
# Test that no uncollectable objects were created
# (llvmlite objects have a __del__ so a reference cycle could
# create some).
gc.collect()
self.assertEqual(gc.garbage, [])
# This will probably put any existing garbage in gc.garbage again
del self.old_garbage
def module(self, asm=asm_sum):
asm = asm.format(triple=llvm.get_default_triple())
mod = llvm.parse_assembly(asm)
return mod
def glob(self, name='glob', mod=None):
if mod is None:
mod = self.module()
return mod.get_global_variable(name)
def target_machine(self):
target = llvm.Target.from_default_triple()
return target.create_target_machine()
class TestMisc(BaseTest):
"""
Test miscellaneous functions in llvm.binding.
"""
def test_parse_assembly(self):
self.module(asm_sum)
def test_parse_assembly_error(self):
with self.assertRaises(RuntimeError) as cm:
self.module(asm_parse_error)
s = str(cm.exception)
self.assertIn("parsing error", s)
self.assertIn("invalid operand type", s)
def test_dylib_symbols(self):
llvm.add_symbol("__xyzzy", 1234)
llvm.add_symbol("__xyzzy", 5678)
addr = llvm.address_of_symbol("__xyzzy")
self.assertEqual(addr, 5678)
addr = llvm.address_of_symbol("__foobar")
self.assertIs(addr, None)
def test_get_default_triple(self):
triple = llvm.get_default_triple()
self.assertIsInstance(triple, str)
self.assertTrue(triple)
def test_get_host_cpu_name(self):
cpu = llvm.get_host_cpu_name()
self.assertIsInstance(cpu, str)
self.assertTrue(cpu)
def test_initfini(self):
code = """if 1:
from llvmlite import binding as llvm
llvm.initialize()
llvm.initialize_native_target()
llvm.initialize_native_asmprinter()
llvm.shutdown()
"""
subprocess.check_call([sys.executable, "-c", code])
def test_set_option(self):
# We cannot set an option multiple times (LLVM would exit() the
# process), so run the code in a subprocess.
code = """if 1:
from llvmlite import binding as llvm
llvm.set_option("progname", "-debug-pass=Disabled")
"""
subprocess.check_call([sys.executable, "-c", code])
def test_version(self):
major, minor, patch = llvm.llvm_version_info
self.assertIn((major, minor), [(3, 5), (3, 6)])
self.assertIn(patch, range(10))
def test_check_jit_execution(self):
llvm.check_jit_execution()
@unittest.skipIf(no_de_locale(), "Locale not available")
def test_print_double_locale(self):
m = self.module(asm_double_locale)
expect = str(m)
# Change the locale so that comma is used as decimal-point
# to trigger the LLVM bug (llvmlite issue #80)
locale.setlocale(locale.LC_ALL, 'de_DE')
# The LLVM bug is trigged by print the module with double constant
got = str(m)
# Changing the locale should not affect the LLVM IR
self.assertEqual(expect, got)
class TestModuleRef(BaseTest):
def test_str(self):
mod = self.module()
s = str(mod).strip()
self.assertTrue(s.startswith('; ModuleID ='), s)
def test_close(self):
mod = self.module()
str(mod)
mod.close()
with self.assertRaises(ctypes.ArgumentError):
str(mod)
mod.close()
def test_with(self):
mod = self.module()
str(mod)
with mod:
str(mod)
with self.assertRaises(ctypes.ArgumentError):
str(mod)
with self.assertRaises(RuntimeError):
with mod:
pass
def test_name(self):
mod = self.module()
mod.name = "foo"
self.assertEqual(mod.name, "foo")
mod.name = "bar"
self.assertEqual(mod.name, "bar")
def test_data_layout(self):
mod = self.module()
s = mod.data_layout
self.assertIsInstance(s, str)
mod.data_layout = s
self.assertEqual(s, mod.data_layout)
def test_triple(self):
mod = self.module()
s = mod.triple
self.assertEqual(s, llvm.get_default_triple())
mod.triple = ''
self.assertEqual(mod.triple, '')
def test_verify(self):
# Verify successful
mod = self.module()
self.assertIs(mod.verify(), None)
# Verify failed
mod = self.module(asm_verification_fail)
with self.assertRaises(RuntimeError) as cm:
mod.verify()
s = str(cm.exception)
self.assertIn("%.bug = add i32 1, %.bug", s)
def test_get_function(self):
mod = self.module()
fn = mod.get_function("sum")
self.assertIsInstance(fn, llvm.ValueRef)
self.assertEqual(fn.name, "sum")
with self.assertRaises(NameError):
mod.get_function("foo")
# Check that fn keeps the module instance alive
del mod
str(fn.module)
def test_get_global_variable(self):
mod = self.module()
gv = mod.get_global_variable("glob")
self.assertIsInstance(gv, llvm.ValueRef)
self.assertEqual(gv.name, "glob")
with self.assertRaises(NameError):
mod.get_global_variable("bar")
# Check that gv keeps the module instance alive
del mod
str(gv.module)
def test_global_variables(self):
mod = self.module()
it = mod.global_variables
del mod
globs = sorted(it, key=lambda value: value.name)
self.assertEqual(len(globs), 4)
self.assertEqual([g.name for g in globs],
["glob", "glob_b", "glob_f", "glob_struct"])
def test_functions(self):
mod = self.module()
it = mod.functions
del mod
funcs = list(it)
self.assertEqual(len(funcs), 1)
self.assertEqual(funcs[0].name, "sum")
def test_link_in(self):
dest = self.module()
src = self.module(asm_mul)
dest.link_in(src)
self.assertEqual(sorted(f.name for f in dest.functions), ["mul", "sum"])
dest.get_function("mul")
dest.close()
with self.assertRaises(ctypes.ArgumentError):
src.get_function("mul")
def test_link_in_preserve(self):
dest = self.module()
src2 = self.module(asm_mul)
dest.link_in(src2, preserve=True)
self.assertEqual(sorted(f.name for f in dest.functions), ["mul", "sum"])
dest.close()
self.assertEqual(sorted(f.name for f in src2.functions), ["mul"])
src2.get_function("mul")
def test_link_in_error(self):
# Raise an error by trying to link two modules with the same global
# definition "sum".
dest = self.module()
src = self.module(asm_sum2)
with self.assertRaises(RuntimeError) as cm:
dest.link_in(src)
self.assertIn("symbol multiply defined", str(cm.exception))
def test_as_bitcode(self):
mod = self.module()
bc = mod.as_bitcode()
# Refer to http://llvm.org/docs/doxygen/html/ReaderWriter_8h_source.html#l00064
# and http://llvm.org/docs/doxygen/html/ReaderWriter_8h_source.html#l00092
bitcode_wrapper_magic = b'\xde\xc0\x17\x0b'
bitcode_magic = b'BC'
self.assertTrue(bc.startswith(bitcode_magic) or
bc.startswith(bitcode_wrapper_magic))
def test_parse_bitcode_error(self):
with self.assertRaises(RuntimeError) as cm:
llvm.parse_bitcode(b"")
self.assertIn("LLVM bitcode parsing error", str(cm.exception))
self.assertIn("Invalid bitcode signature", str(cm.exception))
def test_bitcode_roundtrip(self):
bc = self.module().as_bitcode()
mod = llvm.parse_bitcode(bc)
self.assertEqual(mod.as_bitcode(), bc)
mod.get_function("sum")
mod.get_global_variable("glob")
def test_cloning(self):
m = self.module()
cloned = m.clone()
self.assertIsNot(cloned, m)
self.assertEqual(cloned.as_bitcode(), m.as_bitcode())
class JITTestMixin(object):
"""
Mixin for ExecutionEngine tests.
"""
def get_sum(self, ee, func_name="sum"):
ee.finalize_object()
cfptr = ee.get_function_address(func_name)
self.assertTrue(cfptr)
return CFUNCTYPE(c_int, c_int, c_int)(cfptr)
def test_run_code(self):
mod = self.module()
with self.jit(mod) as ee:
cfunc = self.get_sum(ee)
res = cfunc(2, -5)
self.assertEqual(-3, res)
def test_close(self):
ee = self.jit(self.module())
ee.close()
ee.close()
with self.assertRaises(ctypes.ArgumentError):
ee.finalize_object()
def test_with(self):
ee = self.jit(self.module())
with ee:
pass
with self.assertRaises(RuntimeError):
with ee:
pass
with self.assertRaises(ctypes.ArgumentError):
ee.finalize_object()
def test_module_lifetime(self):
mod = self.module()
ee = self.jit(mod)
ee.close()
mod.close()
def test_module_lifetime2(self):
mod = self.module()
ee = self.jit(mod)
mod.close()
ee.close()
def test_add_module(self):
ee = self.jit(self.module())
mod = self.module(asm_mul)
ee.add_module(mod)
with self.assertRaises(KeyError):
ee.add_module(mod)
self.assertFalse(mod.closed)
ee.close()
self.assertTrue(mod.closed)
def test_add_module_lifetime(self):
ee = self.jit(self.module())
mod = self.module(asm_mul)
ee.add_module(mod)
mod.close()
ee.close()
def test_add_module_lifetime2(self):
ee = self.jit(self.module())
mod = self.module(asm_mul)
ee.add_module(mod)
ee.close()
mod.close()
def test_remove_module(self):
ee = self.jit(self.module())
mod = self.module(asm_mul)
ee.add_module(mod)
ee.remove_module(mod)
with self.assertRaises(KeyError):
ee.remove_module(mod)
self.assertFalse(mod.closed)
ee.close()
self.assertFalse(mod.closed)
def test_target_data(self):
mod = self.module()
ee = self.jit(mod)
td = ee.target_data
# A singleton is returned
self.assertIs(ee.target_data, td)
str(td)
del mod, ee
str(td)
def test_target_data_abi_enquiries(self):
mod = self.module()
ee = self.jit(mod)
td = ee.target_data
gv_i32 = mod.get_global_variable("glob")
gv_i8 = mod.get_global_variable("glob_b")
gv_struct = mod.get_global_variable("glob_struct")
# A global is a pointer, it has the ABI size of a pointer
pointer_size = 4 if sys.maxsize < 2 ** 32 else 8
for g in (gv_i32, gv_i8, gv_struct):
self.assertEqual(td.get_abi_size(g.type), pointer_size)
self.assertEqual(td.get_pointee_abi_size(gv_i32.type), 4)
self.assertEqual(td.get_pointee_abi_alignment(gv_i32.type), 4)
self.assertEqual(td.get_pointee_abi_size(gv_i8.type), 1)
self.assertIn(td.get_pointee_abi_alignment(gv_i8.type), (1, 2, 4))
self.assertEqual(td.get_pointee_abi_size(gv_struct.type), 24)
self.assertIn(td.get_pointee_abi_alignment(gv_struct.type), (4, 8))
def test_object_cache_notify(self):
notifies = []
def notify(mod, buf):
notifies.append((mod, buf))
mod = self.module()
ee = self.jit(mod)
ee.set_object_cache(notify)
self.assertEqual(len(notifies), 0)
cfunc = self.get_sum(ee)
cfunc(2, -5)
self.assertEqual(len(notifies), 1)
# The right module object was found
self.assertIs(notifies[0][0], mod)
self.assertIsInstance(notifies[0][1], bytes)
notifies[:] = []
mod2 = self.module(asm_mul)
ee.add_module(mod2)
cfunc = self.get_sum(ee, "mul")
self.assertEqual(len(notifies), 1)
# The right module object was found
self.assertIs(notifies[0][0], mod2)
self.assertIsInstance(notifies[0][1], bytes)
def test_object_cache_getbuffer(self):
notifies = []
getbuffers = []
def notify(mod, buf):
notifies.append((mod, buf))
def getbuffer(mod):
getbuffers.append(mod)
mod = self.module()
ee = self.jit(mod)
ee.set_object_cache(notify, getbuffer)
# First return None from getbuffer(): the object is compiled normally
self.assertEqual(len(notifies), 0)
self.assertEqual(len(getbuffers), 0)
cfunc = self.get_sum(ee)
self.assertEqual(len(notifies), 1)
self.assertEqual(len(getbuffers), 1)
self.assertIs(getbuffers[0], mod)
sum_buffer = notifies[0][1]
# Recreate a new EE, and use getbuffer() to return the previously
# compiled object.
def getbuffer_successful(mod):
getbuffers.append(mod)
return sum_buffer
notifies[:] = []
getbuffers[:] = []
# Use another source module to make sure it is ignored
mod = self.module(asm_mul)
ee = self.jit(mod)
ee.set_object_cache(notify, getbuffer_successful)
self.assertEqual(len(notifies), 0)
self.assertEqual(len(getbuffers), 0)
cfunc = self.get_sum(ee)
self.assertEqual(cfunc(2, -5), -3)
self.assertEqual(len(notifies), 0)
self.assertEqual(len(getbuffers), 1)
class JITWithTMTestMixin(JITTestMixin):
def test_emit_assembly(self):
"""Test TargetMachineRef.emit_assembly()"""
target_machine = self.target_machine()
mod = self.module()
ee = self.jit(mod, target_machine)
raw_asm = target_machine.emit_assembly(mod)
self.assertIn("sum", raw_asm)
def test_emit_object(self):
"""Test TargetMachineRef.emit_object()"""
target_machine = self.target_machine()
mod = self.module()
ee = self.jit(mod, target_machine)
code_object = target_machine.emit_object(mod)
self.assertIsInstance(code_object, six.binary_type)
if sys.platform.startswith('linux'):
# Sanity check
self.assertIn(b"ELF", code_object[:10])
class TestMCJit(BaseTest, JITWithTMTestMixin):
"""
Test JIT engines created with create_mcjit_compiler().
"""
def jit(self, mod, target_machine=None):
if target_machine is None:
target_machine = self.target_machine()
return llvm.create_mcjit_compiler(mod, target_machine)
class TestValueRef(BaseTest):
def test_str(self):
mod = self.module()
glob = mod.get_global_variable("glob")
self.assertEqual(str(glob), "@glob = global i32 0")
def test_name(self):
mod = self.module()
glob = mod.get_global_variable("glob")
self.assertEqual(glob.name, "glob")
glob.name = "foobar"
self.assertEqual(glob.name, "foobar")
def test_linkage(self):
mod = self.module()
glob = mod.get_global_variable("glob")
linkage = glob.linkage
self.assertIsInstance(glob.linkage, llvm.Linkage)
glob.linkage = linkage
self.assertEqual(glob.linkage, linkage)
for linkage in ("internal", "external"):
glob.linkage = linkage
self.assertIsInstance(glob.linkage, llvm.Linkage)
self.assertEqual(glob.linkage.name, linkage)
def test_visibility(self):
mod = self.module()
glob = mod.get_global_variable("glob")
visibility = glob.visibility
self.assertIsInstance(glob.visibility, llvm.Visibility)
glob.visibility = visibility
self.assertEqual(glob.visibility, visibility)
for visibility in ("hidden", "protected", "default"):
glob.visibility = visibility
self.assertIsInstance(glob.visibility, llvm.Visibility)
self.assertEqual(glob.visibility.name, visibility)
def test_storage_class(self):
mod = self.module()
glob = mod.get_global_variable("glob")
storage_class = glob.storage_class
self.assertIsInstance(glob.storage_class, llvm.StorageClass)
glob.storage_class = storage_class
self.assertEqual(glob.storage_class, storage_class)
for storage_class in ("dllimport", "dllexport", "default"):
glob.storage_class = storage_class
self.assertIsInstance(glob.storage_class, llvm.StorageClass)
self.assertEqual(glob.storage_class.name, storage_class)
def test_add_function_attribute(self):
mod = self.module()
fn = mod.get_function("sum")
fn.add_function_attribute("zext")
def test_module(self):
mod = self.module()
glob = mod.get_global_variable("glob")
self.assertIs(glob.module, mod)
def test_type(self):
mod = self.module()
glob = mod.get_global_variable("glob")
tp = glob.type
self.assertIsInstance(tp, ffi.LLVMTypeRef)
def test_close(self):
glob = self.glob()
glob.close()
glob.close()
def test_is_declaration(self):
defined = self.module().get_function('sum')
declared = self.module(asm_sum_declare).get_function('sum')
self.assertFalse(defined.is_declaration)
self.assertTrue(declared.is_declaration)
class TestTarget(BaseTest):
def test_from_triple(self):
f = llvm.Target.from_triple
with self.assertRaises(RuntimeError) as cm:
f("foobar")
self.assertIn("No available targets are compatible with this triple",
str(cm.exception))
triple = llvm.get_default_triple()
target = f(triple)
self.assertEqual(target.triple, triple)
target.close()
def test_create_target_machine(self):
target = llvm.Target.from_triple(llvm.get_default_triple())
# With the default settings
target.create_target_machine('', '', 1, 'default', 'default')
# With the host's CPU
cpu = llvm.get_host_cpu_name()
target.create_target_machine(cpu, '', 1, 'default', 'default')
def test_name(self):
t = llvm.Target.from_triple(llvm.get_default_triple())
u = llvm.Target.from_default_triple()
self.assertIsInstance(t.name, str)
self.assertEqual(t.name, u.name)
def test_description(self):
t = llvm.Target.from_triple(llvm.get_default_triple())
u = llvm.Target.from_default_triple()
self.assertIsInstance(t.description, str)
self.assertEqual(t.description, u.description)
def test_str(self):
target = llvm.Target.from_triple(llvm.get_default_triple())
s = str(target)
self.assertIn(target.name, s)
self.assertIn(target.description, s)
class TestTargetData(BaseTest):
def target_data(self):
return llvm.create_target_data("e-m:e-i64:64-f80:128-n8:16:32:64-S128")
def test_get_abi_size(self):
td = self.target_data()
glob = self.glob()
self.assertEqual(td.get_abi_size(glob.type), 8)
def test_add_pass(self):
td = self.target_data()
pm = llvm.create_module_pass_manager()
td.add_pass(pm)
class TestTargetMachine(BaseTest):
def test_add_analysis_passes(self):
tm = self.target_machine()
pm = llvm.create_module_pass_manager()
tm.add_analysis_passes(pm)
def test_target_data_from_tm(self):
tm = self.target_machine()
td = tm.target_data
mod = self.module()
gv_i32 = mod.get_global_variable("glob")
# A global is a pointer, it has the ABI size of a pointer
pointer_size = 4 if sys.maxsize < 2 ** 32 else 8
self.assertEqual(td.get_abi_size(gv_i32.type), pointer_size)
class TestTargetLibraryInfo(BaseTest):
def tli(self):
return llvm.create_target_library_info(llvm.get_default_triple())
def test_create_target_library_info(self):
tli = llvm.create_target_library_info(llvm.get_default_triple())
with tli:
pass
tli.close()
def test_get_libfunc(self):
tli = self.tli()
with self.assertRaises(NameError):
tli.get_libfunc("xyzzy")
fmin = tli.get_libfunc("fmin")
self.assertEqual(fmin.name, "fmin")
self.assertIsInstance(fmin.identity, int)
fmax = tli.get_libfunc("fmax")
self.assertNotEqual(fmax.identity, fmin.identity)
def test_set_unavailable(self):
tli = self.tli()
fmin = tli.get_libfunc("fmin")
tli.set_unavailable(fmin)
def test_disable_all(self):
tli = self.tli()
tli.disable_all()
def test_add_pass(self):
tli = self.tli()
pm = llvm.create_module_pass_manager()
tli.add_pass(pm)
class TestPassManagerBuilder(BaseTest):
def pmb(self):
return llvm.PassManagerBuilder()
def test_old_api(self):
# Test the create_pass_manager_builder() factory function
pmb = llvm.create_pass_manager_builder()
pmb.inlining_threshold = 2
pmb.opt_level = 3
def test_close(self):
pmb = self.pmb()
pmb.close()
pmb.close()
def test_opt_level(self):
pmb = self.pmb()
self.assertIsInstance(pmb.opt_level, six.integer_types)
for i in range(4):
pmb.opt_level = i
self.assertEqual(pmb.opt_level, i)
def test_size_level(self):
pmb = self.pmb()
self.assertIsInstance(pmb.size_level, six.integer_types)
for i in range(4):
pmb.size_level = i
self.assertEqual(pmb.size_level, i)
def test_inlining_threshold(self):
pmb = self.pmb()
with self.assertRaises(NotImplementedError):
pmb.inlining_threshold
for i in (25, 80, 350):
pmb.inlining_threshold = i
def test_disable_unit_at_a_time(self):
pmb = self.pmb()
self.assertIsInstance(pmb.disable_unit_at_a_time, bool)
for b in (True, False):
pmb.disable_unit_at_a_time = b
self.assertEqual(pmb.disable_unit_at_a_time, b)
def test_disable_unroll_loops(self):
pmb = self.pmb()
self.assertIsInstance(pmb.disable_unroll_loops, bool)
for b in (True, False):
pmb.disable_unroll_loops = b
self.assertEqual(pmb.disable_unroll_loops, b)
def test_populate_module_pass_manager(self):
pmb = self.pmb()
pm = llvm.create_module_pass_manager()
pmb.populate(pm)
pmb.close()
pm.close()
def test_populate_function_pass_manager(self):
mod = self.module()
pmb = self.pmb()
pm = llvm.create_function_pass_manager(mod)
pmb.populate(pm)
pmb.close()
pm.close()
class PassManagerTestMixin(object):
def pmb(self):
pmb = llvm.create_pass_manager_builder()
pmb.opt_level = 2
return pmb
def test_close(self):
pm = self.pm()
pm.close()
pm.close()
class TestModulePassManager(BaseTest, PassManagerTestMixin):
def pm(self):
return llvm.create_module_pass_manager()
def test_run(self):
pm = self.pm()
self.pmb().populate(pm)
mod = self.module()
orig_asm = str(mod)
pm.run(mod)
opt_asm = str(mod)
# Quick check that optimizations were run
self.assertIn("%.3", orig_asm)
self.assertNotIn("%.3", opt_asm)
class TestFunctionPassManager(BaseTest, PassManagerTestMixin):
def pm(self, mod=None):
mod = mod or self.module()
return llvm.create_function_pass_manager(mod)
def test_initfini(self):
pm = self.pm()
pm.initialize()
pm.finalize()
def test_run(self):
mod = self.module()
fn = mod.get_function("sum")
pm = self.pm(mod)
self.pmb().populate(pm)
mod.close()
orig_asm = str(fn)
pm.initialize()
pm.run(fn)
pm.finalize()
opt_asm = str(fn)
# Quick check that optimizations were run
self.assertIn("%.4", orig_asm)
self.assertNotIn("%.4", opt_asm)
class TestDylib(BaseTest):
def test_bad_library(self):
with self.assertRaises(RuntimeError):
llvm.load_library_permanently("zzzasdkf;jasd;l")
@unittest.skipUnless(platform.system() in ["Linux", "Darwin"],
"test only works on Linux and Darwin")
def test_libm(self):
system = platform.system()
if system == "Linux":
libm = find_library("m")
elif system == "Darwin":
libm = find_library("libm")
llvm.load_library_permanently(libm)
if __name__ == "__main__":
unittest.main()
| |
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DocumentHtmlDisplayAnchor(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'case_sensitive': 'bool',
'display_settings': 'DocumentHtmlDisplaySettings',
'end_anchor': 'str',
'remove_end_anchor': 'bool',
'remove_start_anchor': 'bool',
'start_anchor': 'str'
}
attribute_map = {
'case_sensitive': 'caseSensitive',
'display_settings': 'displaySettings',
'end_anchor': 'endAnchor',
'remove_end_anchor': 'removeEndAnchor',
'remove_start_anchor': 'removeStartAnchor',
'start_anchor': 'startAnchor'
}
def __init__(self, case_sensitive=None, display_settings=None, end_anchor=None, remove_end_anchor=None, remove_start_anchor=None, start_anchor=None): # noqa: E501
"""DocumentHtmlDisplayAnchor - a model defined in Swagger""" # noqa: E501
self._case_sensitive = None
self._display_settings = None
self._end_anchor = None
self._remove_end_anchor = None
self._remove_start_anchor = None
self._start_anchor = None
self.discriminator = None
if case_sensitive is not None:
self.case_sensitive = case_sensitive
if display_settings is not None:
self.display_settings = display_settings
if end_anchor is not None:
self.end_anchor = end_anchor
if remove_end_anchor is not None:
self.remove_end_anchor = remove_end_anchor
if remove_start_anchor is not None:
self.remove_start_anchor = remove_start_anchor
if start_anchor is not None:
self.start_anchor = start_anchor
@property
def case_sensitive(self):
"""Gets the case_sensitive of this DocumentHtmlDisplayAnchor. # noqa: E501
# noqa: E501
:return: The case_sensitive of this DocumentHtmlDisplayAnchor. # noqa: E501
:rtype: bool
"""
return self._case_sensitive
@case_sensitive.setter
def case_sensitive(self, case_sensitive):
"""Sets the case_sensitive of this DocumentHtmlDisplayAnchor.
# noqa: E501
:param case_sensitive: The case_sensitive of this DocumentHtmlDisplayAnchor. # noqa: E501
:type: bool
"""
self._case_sensitive = case_sensitive
@property
def display_settings(self):
"""Gets the display_settings of this DocumentHtmlDisplayAnchor. # noqa: E501
:return: The display_settings of this DocumentHtmlDisplayAnchor. # noqa: E501
:rtype: DocumentHtmlDisplaySettings
"""
return self._display_settings
@display_settings.setter
def display_settings(self, display_settings):
"""Sets the display_settings of this DocumentHtmlDisplayAnchor.
:param display_settings: The display_settings of this DocumentHtmlDisplayAnchor. # noqa: E501
:type: DocumentHtmlDisplaySettings
"""
self._display_settings = display_settings
@property
def end_anchor(self):
"""Gets the end_anchor of this DocumentHtmlDisplayAnchor. # noqa: E501
# noqa: E501
:return: The end_anchor of this DocumentHtmlDisplayAnchor. # noqa: E501
:rtype: str
"""
return self._end_anchor
@end_anchor.setter
def end_anchor(self, end_anchor):
"""Sets the end_anchor of this DocumentHtmlDisplayAnchor.
# noqa: E501
:param end_anchor: The end_anchor of this DocumentHtmlDisplayAnchor. # noqa: E501
:type: str
"""
self._end_anchor = end_anchor
@property
def remove_end_anchor(self):
"""Gets the remove_end_anchor of this DocumentHtmlDisplayAnchor. # noqa: E501
# noqa: E501
:return: The remove_end_anchor of this DocumentHtmlDisplayAnchor. # noqa: E501
:rtype: bool
"""
return self._remove_end_anchor
@remove_end_anchor.setter
def remove_end_anchor(self, remove_end_anchor):
"""Sets the remove_end_anchor of this DocumentHtmlDisplayAnchor.
# noqa: E501
:param remove_end_anchor: The remove_end_anchor of this DocumentHtmlDisplayAnchor. # noqa: E501
:type: bool
"""
self._remove_end_anchor = remove_end_anchor
@property
def remove_start_anchor(self):
"""Gets the remove_start_anchor of this DocumentHtmlDisplayAnchor. # noqa: E501
# noqa: E501
:return: The remove_start_anchor of this DocumentHtmlDisplayAnchor. # noqa: E501
:rtype: bool
"""
return self._remove_start_anchor
@remove_start_anchor.setter
def remove_start_anchor(self, remove_start_anchor):
"""Sets the remove_start_anchor of this DocumentHtmlDisplayAnchor.
# noqa: E501
:param remove_start_anchor: The remove_start_anchor of this DocumentHtmlDisplayAnchor. # noqa: E501
:type: bool
"""
self._remove_start_anchor = remove_start_anchor
@property
def start_anchor(self):
"""Gets the start_anchor of this DocumentHtmlDisplayAnchor. # noqa: E501
# noqa: E501
:return: The start_anchor of this DocumentHtmlDisplayAnchor. # noqa: E501
:rtype: str
"""
return self._start_anchor
@start_anchor.setter
def start_anchor(self, start_anchor):
"""Sets the start_anchor of this DocumentHtmlDisplayAnchor.
# noqa: E501
:param start_anchor: The start_anchor of this DocumentHtmlDisplayAnchor. # noqa: E501
:type: str
"""
self._start_anchor = start_anchor
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DocumentHtmlDisplayAnchor, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DocumentHtmlDisplayAnchor):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| |
from share.transform.chain import * # noqa
from share.transform.chain.utils import format_address
def format_mendeley_address(ctx):
return format_address(
address1=ctx['name'],
city=ctx['city'],
state_or_province=ctx['state'],
country=ctx['country']
)
RELATION_MAP = {
'related_to': 'WorkRelation',
'derived_from': 'IsDerivedFrom',
'source_of': 'IsDerivedFrom',
'compiles': 'Compiles',
'compiled_by': 'Compiles',
'cites': 'Cites',
'cited_by': 'Cites',
}
INVERSE_RELATIONS = {
'cited_by',
'compiled_by',
'derived_from'
}
RELATIONS = {
'cites',
'compiles',
'source_of',
'related_to',
}
def get_related_works(options, inverse):
results = []
for option in options:
relation = option['rel']
if inverse and relation in INVERSE_RELATIONS:
results.append(option)
elif not inverse and relation in RELATIONS:
results.append(option)
return results
def get_relation_type(relation_type):
return RELATION_MAP.get(relation_type, 'WorkRelation')
def get_related_work_type(work_type):
if work_type == 'other':
return 'creativework'
return work_type
class WorkIdentifier(Parser):
uri = ctx
class Tag(Parser):
name = ctx.label
class Extra:
id = ctx.id
class ThroughTags(Parser):
tag = Delegate(Tag, ctx)
class Subject(Parser):
name = ctx
class ThroughSubjects(Parser):
subject = Delegate(Subject, ctx)
class RelatedWork(Parser):
schema = RunPython(get_related_work_type, ctx.type)
identifiers = Map(
Delegate(WorkIdentifier),
Try(
IRI(ctx.href),
exceptions=(InvalidIRI,)
)
)
class WorkRelation(Parser):
schema = RunPython(get_relation_type, ctx.rel)
related = Delegate(RelatedWork, ctx)
class InverseWorkRelation(Parser):
schema = RunPython(get_relation_type, ctx.rel)
subject = Delegate(RelatedWork, ctx)
class RelatedArticle(Parser):
schema = 'Article'
title = Try(ctx.title)
identifiers = Map(
Delegate(WorkIdentifier),
Try(
IRI(ctx.doi),
exceptions=(InvalidIRI,)
)
)
class Extra:
journal = Try(ctx.journal)
title = Try(ctx.title)
doi = Try(ctx.doi)
article_id = Try(ctx.id)
class UsesDataFrom(Parser):
subject = Delegate(RelatedArticle, ctx)
class AgentIdentifier(Parser):
uri = ctx
class AgentInstitution(Parser):
schema = GuessAgentType(ctx.name, default='organization')
name = Try(ctx.name)
location = Try(RunPython(format_mendeley_address, ctx))
identifiers = Map(
Delegate(AgentIdentifier),
Concat(
Try(
IRI(ctx.urls),
exceptions=(InvalidIRI,)
),
Try(
IRI(ctx.profile_url),
exceptions=(InvalidIRI,)
)
)
)
class Extra:
name = Try(ctx.name)
scival_id = Try(ctx.scival_id)
instituion_id = Try(ctx.id)
city = Try(ctx.city)
state = Try(ctx.state)
country = Try(ctx.country)
parent_id = Try(ctx.parent_id)
urls = Try(ctx.urls)
profile_url = Try(ctx.profile_url)
alt_names = Try(ctx.alt_names)
class AgentWorkRelation(Parser):
agent = Delegate(AgentInstitution, ctx)
class IsAffiliatedWith(Parser):
related = Delegate(AgentInstitution, ctx)
class Person(Parser):
"""
{
"id": "",
"first_name": "",
"last_name": "",
"display_name": "",
"link": "",
"folder": "",
"institution": "",
"institution_details": {
"scival_id": 0,
"id": "",
"name": "",
"city": "",
"state": "",
"country": "",
"parent_id": "",
"urls": [
""
],
"profile_url": "",
"alt_names": [
{
"name": ""
}
]
},
"location": {
"id": "",
"latitude": 0,
"longitude": 0,
"name": "",
"city": "",
"state": "",
"country": ""
},
"created": "",
"title": "",
"web_user_id": 0,
"scopus_author_ids": [
""
],
"orcid_id": "",
}
"""
given_name = ctx.first_name
family_name = ctx.last_name
location = RunPython(format_mendeley_address, Try(ctx.full_profile.location))
identifiers = Map(
Delegate(AgentIdentifier),
Concat(
Try(
IRI(ctx.full_profile.orcid_id),
exceptions=(InvalidIRI,)
),
Try(
IRI(ctx.full_profile.link),
exceptions=(InvalidIRI,)
)
)
)
related_agents = Concat(
Map(Delegate(IsAffiliatedWith), Try(ctx.full_profile.institution_details)),
Map(Delegate(IsAffiliatedWith), Try(ctx.institution)),
)
class Extra:
profile_id = Try(ctx.profile_id)
first_name = ctx.first_name
last_name = ctx.last_name
contribution = Try(ctx.contribution)
full_profile = Try(ctx.full_profile)
class Contributor(Parser):
agent = Delegate(Person, ctx)
class Creator(Contributor):
order_cited = ctx('index')
cited_as = RunPython('full_name', ctx)
def full_name(self, ctx):
return '{} {}'.format(ctx['first_name'], ctx['last_name'])
class DataSet(Parser):
"""
{
"id": "",
"doi": {
"id": "",
"status": ""
},
"name": "",
"description": "",
"contributors": [
{
"contribution": "",
"institution": {
"scival_id": 0,
"id": "",
"name": "",
"city": "",
"state": "",
"country": "",
"parent_id": "",
"urls": [""],
"profile_url": "",
"alt_names": [{"name": ""}]
},
"profile_id": "",
"first_name": "",
"last_name": ""
}
],
"articles": [
{
"journal": {
"url": "",
"issn": "",
"name": ""
},
"title": "",
"doi": "",
"id": ""
}
],
"institutions": [
{
"scival_id": 0,
"id": "",
"name": "",
"city": "",
"state": "",
"country": "",
"parent_id": "",
"urls": [],
"profile_url": "",
"alt_names": [{"name": ""}]
}
],
"related_links": [
{
"type": "",
"rel": "",
"href": ""
}
],
"publish_date": "",
"data_licence": {
"description": "",
"url": "",
"full_name": "",
"short_name": "",
"id": ""
},
"embargo_date": ""
}
"""
schema = 'DataSet'
title = Try(ctx.name)
description = Try(ctx.description)
# publish_date "reflects the published date of the most recent version of the dataset"
date_published = ParseDate(Try(ctx.publish_date))
date_updated = ParseDate(Try(ctx.publish_date))
tags = Map(
Delegate(ThroughTags),
Try(ctx.categories)
)
subjects = Map(
Delegate(ThroughSubjects),
Subjects(Try(ctx.categories.label))
)
rights = Try(ctx.data_licence.description)
free_to_read_type = Try(ctx.data_licence.url)
free_to_read_date = ParseDate(Try(ctx.embargo_date))
related_agents = Concat(
Map(
Delegate(Creator), RunPython('filter_contributors', Try(ctx.contributors), 'creator')
),
Map(
Delegate(Contributor), RunPython('filter_contributors', Try(ctx.contributors), 'contributor')
),
Map(
Delegate(AgentWorkRelation), Try(ctx.institutions)
)
)
related_works = Concat(
Map(
Delegate(UsesDataFrom),
Try(ctx.articles) # Journal articles associated with the dataset
),
Map(
Delegate(WorkRelation),
RunPython(
get_related_works,
Try(ctx.related_links),
False
)
),
Map(
Delegate(InverseWorkRelation),
RunPython(
get_related_works,
Try(ctx.related_links),
True
)
)
)
identifiers = Map(
Delegate(WorkIdentifier),
Concat(
RunPython(lambda mendeley_id: 'https://data.mendeley.com/datasets/{}'.format(mendeley_id) if mendeley_id else None, Try(ctx.id)),
Try(
IRI(ctx.doi.id),
exceptions=(InvalidIRI,)
)
)
)
def filter_contributors(self, contributor_list, contributor_type):
filtered = []
for contributor in contributor_list:
try:
if not contributor['contribution'] and contributor_type == 'creator':
filtered.append(contributor)
elif contributor['contribution'] and contributor_type == 'contributor':
filtered.append(contributor)
except KeyError:
if contributor_type == 'creator':
filtered.append(contributor)
return filtered
class Extra:
""" Documentation:
http://dev.mendeley.com/methods/#datasets
http://dev.mendeley.com/methods/#profile-attributes
"""
mendeley_id = Try(ctx.id)
doi = Try(ctx.doi)
name = Try(ctx.name)
description = Try(ctx.description)
version = Try(ctx.version)
contributors = Try(ctx.contributors)
versions = Try(ctx.versions)
files = Try(ctx.files)
articles = Try(ctx.articles)
categories = Try(ctx.categories)
institutions = Try(ctx.institutions)
metrics = Try(ctx.metrics)
available = Try(ctx.available)
method = Try(ctx.method)
related_links = Try(ctx.related_links)
publish_date = ctx.publish_date
data_licence = Try(ctx.data_licence)
owner_id = Try(ctx.owner_id)
embargo_date = Try(ctx.embargo_date)
class MendeleyTransformer(ChainTransformer):
VERSION = 1
root_parser = DataSet
| |
# -*- coding: utf-8 -*-
from __future__ import with_statement
from django.utils.numberformat import format
from django.db import models
from cms import constants
from cms.api import add_plugin, create_page, create_title
from cms.exceptions import DuplicatePlaceholderWarning
from cms.models.fields import PlaceholderField
from cms.models.placeholdermodel import Placeholder
from cms.plugin_pool import plugin_pool
from cms.plugin_rendering import render_placeholder
from cms.plugins.link.cms_plugins import LinkPlugin
from cms.utils.compat.tests import UnittestCompatMixin
from djangocms_text_ckeditor.cms_plugins import TextPlugin
from djangocms_text_ckeditor.models import Text
from cms.test_utils.fixtures.fakemlng import FakemlngFixtures
from cms.test_utils.project.fakemlng.models import Translations
from cms.test_utils.project.placeholderapp.models import (
Example1,
TwoPlaceholderExample,
DynamicPlaceholderSlotExample,
MultilingualExample1
)
from cms.test_utils.testcases import CMSTestCase
from cms.test_utils.util.context_managers import (SettingsOverride, UserLoginContext)
from cms.test_utils.util.mock import AttributeObject
from cms.utils.compat.dj import force_unicode
from cms.utils.placeholder import PlaceholderNoAction, MLNGPlaceholderActions
from cms.utils.plugins import get_placeholders
from django.conf import settings
from django.contrib import admin
from django.contrib.auth.models import User, Permission
from cms.test_utils.project.objectpermissionsapp.models import UserObjectPermission
from django.contrib.messages.storage import default_storage
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.db.models import Model
from django.http import HttpResponseForbidden, HttpResponse
from django.template import TemplateSyntaxError, Template
from django.template.context import Context, RequestContext
from django.test import TestCase
import itertools
class PlaceholderTestCase(CMSTestCase, UnittestCompatMixin):
def setUp(self):
u = User(username="test", is_staff=True, is_active=True, is_superuser=True)
u.set_password("test")
u.save()
self._login_context = self.login_user_context(u)
self._login_context.__enter__()
def tearDown(self):
self._login_context.__exit__(None, None, None)
def test_placeholder_scanning_extend(self):
placeholders = get_placeholders('placeholder_tests/test_one.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'three']))
def test_placeholder_scanning_sekizai_extend(self):
placeholders = get_placeholders('placeholder_tests/test_one_sekizai.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'three']))
def test_placeholder_scanning_include(self):
placeholders = get_placeholders('placeholder_tests/test_two.html')
self.assertEqual(sorted(placeholders), sorted([u'child', u'three']))
def test_placeholder_scanning_double_extend(self):
placeholders = get_placeholders('placeholder_tests/test_three.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'new_three']))
def test_placeholder_scanning_sekizai_double_extend(self):
placeholders = get_placeholders('placeholder_tests/test_three_sekizai.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'new_three']))
def test_placeholder_scanning_complex(self):
placeholders = get_placeholders('placeholder_tests/test_four.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'child', u'four']))
def test_placeholder_scanning_super(self):
placeholders = get_placeholders('placeholder_tests/test_five.html')
self.assertEqual(sorted(placeholders), sorted([u'one', u'extra_one', u'two', u'three']))
def test_placeholder_scanning_nested(self):
placeholders = get_placeholders('placeholder_tests/test_six.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'new_two', u'new_three']))
def test_placeholder_scanning_duplicate(self):
placeholders = self.assertWarns(DuplicatePlaceholderWarning,
'Duplicate {% placeholder "one" %} in template placeholder_tests/test_seven.html.',
get_placeholders, 'placeholder_tests/test_seven.html')
self.assertEqual(sorted(placeholders), sorted([u'one']))
def test_placeholder_scanning_extend_outside_block(self):
placeholders = get_placeholders('placeholder_tests/outside.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'base_outside']))
def test_placeholder_scanning_sekizai_extend_outside_block(self):
placeholders = get_placeholders('placeholder_tests/outside_sekizai.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'base_outside']))
def test_placeholder_scanning_extend_outside_block_nested(self):
placeholders = get_placeholders('placeholder_tests/outside_nested.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'base_outside']))
def test_placeholder_scanning_sekizai_extend_outside_block_nested(self):
placeholders = get_placeholders('placeholder_tests/outside_nested_sekizai.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'base_outside']))
def test_fieldsets_requests(self):
response = self.client.get(reverse('admin:placeholderapp_example1_add'))
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse('admin:placeholderapp_twoplaceholderexample_add'))
self.assertEqual(response.status_code, 200)
def test_page_only_plugins(self):
ex = Example1(
char_1='one',
char_2='two',
char_3='tree',
char_4='four'
)
ex.save()
response = self.client.get(reverse('admin:placeholderapp_example1_change', args=(ex.pk,)))
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'InheritPagePlaceholderPlugin')
def test_inter_placeholder_plugin_move(self):
ex = TwoPlaceholderExample(
char_1='one',
char_2='two',
char_3='tree',
char_4='four'
)
ex.save()
ph1 = ex.placeholder_1
ph2 = ex.placeholder_2
ph1_pl1 = add_plugin(ph1, TextPlugin, 'en', body='ph1 plugin1').cmsplugin_ptr
ph1_pl2 = add_plugin(ph1, TextPlugin, 'en', body='ph1 plugin2').cmsplugin_ptr
ph1_pl3 = add_plugin(ph1, TextPlugin, 'en', body='ph1 plugin3').cmsplugin_ptr
ph2_pl1 = add_plugin(ph2, TextPlugin, 'en', body='ph2 plugin1').cmsplugin_ptr
ph2_pl2 = add_plugin(ph2, TextPlugin, 'en', body='ph2 plugin2').cmsplugin_ptr
ph2_pl3 = add_plugin(ph2, TextPlugin, 'en', body='ph2 plugin3').cmsplugin_ptr
response = self.client.post(reverse('admin:placeholderapp_twoplaceholderexample_move_plugin'), {
'placeholder_id': str(ph2.pk),
'plugin_id': str(ph1_pl2.pk),
'plugin_order[]': [str(p.pk) for p in [ph2_pl3, ph2_pl1, ph2_pl2, ph1_pl2]]
})
self.assertEqual(response.status_code, 200)
self.assertEqual([ph1_pl1, ph1_pl3], list(ph1.cmsplugin_set.order_by('position')))
self.assertEqual([ph2_pl3, ph2_pl1, ph2_pl2, ph1_pl2, ], list(ph2.cmsplugin_set.order_by('position')))
def test_nested_plugin_escapejs(self):
"""
Checks #1366 error condition.
When adding/editing a plugin whose icon_src() method returns a URL
containing an hyphen, the hyphen is escaped by django escapejs resulting
in a incorrect URL
"""
with SettingsOverride(CMS_PERMISSION=False):
ex = Example1(
char_1='one',
char_2='two',
char_3='tree',
char_4='four'
)
ex.save()
ph1 = ex.placeholder
###
# add the test plugin
###
test_plugin = add_plugin(ph1, u"EmptyPlugin", u"en")
test_plugin.save()
pl_url = "%sedit-plugin/%s/" % (
reverse('admin:placeholderapp_example1_change', args=(ex.pk,)),
test_plugin.pk)
response = self.client.post(pl_url, {})
self.assertContains(response, "CMS.API.Helpers.reloadBrowser")
def test_nested_plugin_escapejs_page(self):
"""
Sibling test of the above, on a page.
#1366 does not apply to placeholder defined in a page
"""
with SettingsOverride(CMS_PERMISSION=False):
page = create_page('page', 'col_two.html', 'en')
ph1 = page.placeholders.get(slot='col_left')
###
# add the test plugin
###
test_plugin = add_plugin(ph1, u"EmptyPlugin", u"en")
test_plugin.save()
pl_url = "%sedit-plugin/%s/" % (
reverse('admin:cms_page_change', args=(page.pk,)),
test_plugin.pk)
response = self.client.post(pl_url, {})
self.assertContains(response, "CMS.API.Helpers.reloadBrowser")
def test_placeholder_scanning_fail(self):
self.assertRaises(TemplateSyntaxError, get_placeholders, 'placeholder_tests/test_eleven.html')
def test_placeholder_tag(self):
template = Template("{% load placeholder_tags %}{% render_placeholder placeholder %}")
ctx = Context()
self.assertEqual(template.render(ctx), "")
request = self.get_request('/')
rctx = RequestContext(request)
self.assertEqual(template.render(rctx), "")
placeholder = Placeholder.objects.create(slot="test")
rctx['placeholder'] = placeholder
self.assertEqual(template.render(rctx), "")
self.assertEqual(placeholder.cmsplugin_set.count(), 0)
add_plugin(placeholder, "TextPlugin", settings.LANGUAGES[0][0], body="test")
self.assertEqual(placeholder.cmsplugin_set.count(), 1)
rctx = RequestContext(request)
placeholder = self.reload(placeholder)
rctx['placeholder'] = placeholder
self.assertEqual(template.render(rctx).strip(), "test")
def test_placeholder_tag_language(self):
template = Template("{% load placeholder_tags %}{% render_placeholder placeholder language language %}")
placeholder = Placeholder.objects.create(slot="test")
add_plugin(placeholder, "TextPlugin", 'en', body="English")
add_plugin(placeholder, "TextPlugin", 'de', body="Deutsch")
request = self.get_request('/')
rctx = RequestContext(request)
rctx['placeholder'] = placeholder
rctx['language'] = 'en'
self.assertEqual(template.render(rctx).strip(), "English")
rctx['language'] = 'de'
self.assertEqual(template.render(rctx).strip(), "Deutsch")
def test_placeholder_context_leaking(self):
TEST_CONF = {'test': {'extra_context': {'width': 10}}}
ph = Placeholder.objects.create(slot='test')
class NoPushPopContext(Context):
def push(self):
pass
pop = push
context = NoPushPopContext()
context['request'] = self.get_request()
with SettingsOverride(CMS_PLACEHOLDER_CONF=TEST_CONF):
render_placeholder(ph, context)
self.assertTrue('width' in context)
self.assertEqual(context['width'], 10)
ph.render(context, None)
self.assertTrue('width' in context)
self.assertEqual(context['width'], 10)
def test_placeholder_scanning_nested_super(self):
placeholders = get_placeholders('placeholder_tests/nested_super_level1.html')
self.assertEqual(sorted(placeholders), sorted([u'level1', u'level2', u'level3', u'level4']))
def test_placeholder_field_no_related_name(self):
self.assertRaises(ValueError, PlaceholderField, 'placeholder', related_name='+')
def test_placeholder_field_valid_slotname(self):
self.assertRaises(ImproperlyConfigured, PlaceholderField, 10)
def test_placeholder_field_dynamic_slot_generation(self):
instance = DynamicPlaceholderSlotExample.objects.create(char_1='slot1', char_2='slot2')
self.assertEquals(instance.char_1, instance.placeholder_1.slot)
self.assertEquals(instance.char_2, instance.placeholder_2.slot)
def test_placeholder_field_dynamic_slot_update(self):
instance = DynamicPlaceholderSlotExample.objects.create(char_1='slot1', char_2='slot2')
# Plugin counts
old_placeholder_1_plugin_count = len(instance.placeholder_1.get_plugins())
old_placeholder_2_plugin_count = len(instance.placeholder_2.get_plugins())
# Switch around the slot names
instance.char_1, instance.char_2 = instance.char_2, instance.char_1
# Store the ids before save, to test that a new placeholder is NOT created.
placeholder_1_id = instance.placeholder_1.pk
placeholder_2_id = instance.placeholder_2.pk
# Save instance
instance.save()
current_placeholder_1_plugin_count = len(instance.placeholder_1.get_plugins())
current_placeholder_2_plugin_count = len(instance.placeholder_2.get_plugins())
# Now test that the placeholder slots have changed
self.assertEquals(instance.char_2, 'slot1')
self.assertEquals(instance.char_1, 'slot2')
# Test that a new placeholder was never created
self.assertEquals(instance.placeholder_1.pk, placeholder_1_id)
self.assertEquals(instance.placeholder_2.pk, placeholder_2_id)
# And test the plugin counts remain the same
self.assertEqual(old_placeholder_1_plugin_count, current_placeholder_1_plugin_count)
self.assertEqual(old_placeholder_2_plugin_count, current_placeholder_2_plugin_count)
def test_plugins_language_fallback(self):
""" Tests language_fallback placeholder configuration """
page_en = create_page('page_en', 'col_two.html', 'en')
title_de = create_title("de", "page_de", page_en)
placeholder_en = page_en.placeholders.get(slot='col_left')
placeholder_de = title_de.page.placeholders.get(slot='col_left')
add_plugin(placeholder_en, TextPlugin, 'en', body='en body')
class NoPushPopContext(Context):
def push(self):
pass
pop = push
context_en = NoPushPopContext()
context_en['request'] = self.get_request(language="en", page=page_en)
context_de = NoPushPopContext()
context_de['request'] = self.get_request(language="de", page=page_en)
# First test the default (non-fallback) behavior)
## English page should have the text plugin
content_en = render_placeholder(placeholder_en, context_en)
self.assertRegexpMatches(content_en, "^en body$")
## Deutsch page should have no text
content_de = render_placeholder(placeholder_de, context_de)
self.assertNotRegex(content_de, "^en body$")
self.assertEqual(len(content_de), 0)
conf = {
'col_left': {
'language_fallback': True,
},
}
with SettingsOverride(CMS_PLACEHOLDER_CONF=conf):
## Deutsch page should have no text
content_de = render_placeholder(placeholder_de, context_de)
self.assertRegexpMatches(content_de, "^en body$")
# remove the cached plugins instances
del(placeholder_de._de_plugins_cache)
# Then we add a plugin to check for proper rendering
add_plugin(placeholder_de, TextPlugin, 'de', body='de body')
content_de = render_placeholder(placeholder_de, context_de)
self.assertRegexpMatches(content_de, "^de body$")
def test_plugins_non_default_language_fallback(self):
""" Tests language_fallback placeholder configuration """
page_en = create_page('page_en', 'col_two.html', 'en')
title_de = create_title("de", "page_de", page_en)
placeholder_en = page_en.placeholders.get(slot='col_left')
placeholder_de = title_de.page.placeholders.get(slot='col_left')
add_plugin(placeholder_de, TextPlugin, 'de', body='de body')
class NoPushPopContext(Context):
def push(self):
pass
pop = push
context_en = NoPushPopContext()
context_en['request'] = self.get_request(language="en", page=page_en)
context_de = NoPushPopContext()
context_de['request'] = self.get_request(language="de", page=page_en)
# First test the default (non-fallback) behavior)
## Deutsch page should have the text plugin
content_de = render_placeholder(placeholder_en, context_de)
self.assertRegexpMatches(content_de, "^de body$")
## English page should have no text
content_en = render_placeholder(placeholder_en, context_en)
self.assertNotRegex(content_en, "^de body$")
self.assertEqual(len(content_en), 0)
conf = {
'col_left': {
'language_fallback': True,
},
}
with SettingsOverride(CMS_PLACEHOLDER_CONF=conf):
## English page should have deutsch text
content_en = render_placeholder(placeholder_en, context_en)
self.assertRegexpMatches(content_en, "^de body$")
# remove the cached plugins instances
del(placeholder_en._en_plugins_cache)
# Then we add a plugin to check for proper rendering
add_plugin(placeholder_en, TextPlugin, 'en', body='en body')
content_en = render_placeholder(placeholder_en, context_en)
self.assertRegexpMatches(content_en, "^en body$")
def test_placeholder_pk_thousands_format(self):
page = create_page("page", "nav_playground.html", "en", published=True)
for placeholder in page.placeholders.all():
page.placeholders.remove(placeholder)
placeholder.pk += 1000
placeholder.save()
page.placeholders.add(placeholder)
page.reload()
for placeholder in page.placeholders.all():
plugin = add_plugin(placeholder, "TextPlugin", "en", body="body",
id=placeholder.pk)
with SettingsOverride(USE_THOUSAND_SEPARATOR=True, USE_L10N=True):
# Superuser
user = self.get_superuser()
self.client.login(username=user.username, password=user.username)
response = self.client.get("/en/?edit")
for placeholder in page.placeholders.all():
self.assertContains(
response, "'placeholder_id': '%s'" % placeholder.pk)
self.assertNotContains(
response, "'placeholder_id': '%s'" % format(
placeholder.pk, ".", grouping=3, thousand_sep=","))
self.assertNotContains(
response, "'plugin_id': '%s'" % format(
placeholder.pk, ".", grouping=3, thousand_sep=","))
self.assertNotContains(
response, "'clipboard': '%s'" % format(
response.context['request'].toolbar.clipboard.pk, ".",
grouping=3, thousand_sep=","))
class PlaceholderActionTests(FakemlngFixtures, CMSTestCase):
def test_placeholder_no_action(self):
actions = PlaceholderNoAction()
self.assertEqual(actions.get_copy_languages(), [])
self.assertFalse(actions.copy())
def test_mlng_placeholder_actions_get_copy_languages(self):
actions = MLNGPlaceholderActions()
fr = Translations.objects.get(language_code='fr')
de = Translations.objects.get(language_code='de')
en = Translations.objects.get(language_code='en')
fieldname = 'placeholder'
fr_copy_languages = actions.get_copy_languages(
fr.placeholder, Translations, fieldname
)
de_copy_languages = actions.get_copy_languages(
de.placeholder, Translations, fieldname
)
en_copy_languages = actions.get_copy_languages(
en.placeholder, Translations, fieldname
)
EN = ('en', 'English')
FR = ('fr', 'French')
self.assertEqual(set(fr_copy_languages), set([EN]))
self.assertEqual(set(de_copy_languages), set([EN, FR]))
self.assertEqual(set(en_copy_languages), set([FR]))
def test_mlng_placeholder_actions_copy(self):
actions = MLNGPlaceholderActions()
fr = Translations.objects.get(language_code='fr')
de = Translations.objects.get(language_code='de')
self.assertEqual(fr.placeholder.cmsplugin_set.count(), 1)
self.assertEqual(de.placeholder.cmsplugin_set.count(), 0)
new_plugins = actions.copy(de.placeholder, 'fr', 'placeholder', Translations, 'de')
self.assertEqual(len(new_plugins), 1)
de = self.reload(de)
fr = self.reload(fr)
self.assertEqual(fr.placeholder.cmsplugin_set.count(), 1)
self.assertEqual(de.placeholder.cmsplugin_set.count(), 1)
def test_mlng_placeholder_actions_empty_copy(self):
actions = MLNGPlaceholderActions()
fr = Translations.objects.get(language_code='fr')
de = Translations.objects.get(language_code='de')
self.assertEqual(fr.placeholder.cmsplugin_set.count(), 1)
self.assertEqual(de.placeholder.cmsplugin_set.count(), 0)
new_plugins = actions.copy(fr.placeholder, 'de', 'placeholder', Translations, 'fr')
self.assertEqual(len(new_plugins), 0)
de = self.reload(de)
fr = self.reload(fr)
self.assertEqual(fr.placeholder.cmsplugin_set.count(), 1)
self.assertEqual(de.placeholder.cmsplugin_set.count(), 0)
def test_mlng_placeholder_actions_no_placeholder(self):
actions = MLNGPlaceholderActions()
Translations.objects.filter(language_code='nl').update(placeholder=None)
de = Translations.objects.get(language_code='de')
nl = Translations.objects.get(language_code='nl')
self.assertEqual(nl.placeholder, None)
self.assertEqual(de.placeholder.cmsplugin_set.count(), 0)
okay = actions.copy(de.placeholder, 'nl', 'placeholder', Translations, 'de')
self.assertEqual(okay, False)
de = self.reload(de)
nl = self.reload(nl)
nl = Translations.objects.get(language_code='nl')
de = Translations.objects.get(language_code='de')
class PlaceholderModelTests(CMSTestCase):
def get_mock_user(self, superuser):
return AttributeObject(
is_superuser=superuser,
has_perm=lambda string: False,
)
def get_mock_request(self, superuser=True):
return AttributeObject(
superuser=superuser,
user=self.get_mock_user(superuser)
)
def test_check_placeholder_permissions_ok_for_superuser(self):
ph = Placeholder.objects.create(slot='test', default_width=300)
result = ph.has_change_permission(self.get_mock_request(True))
self.assertTrue(result)
def test_check_placeholder_permissions_nok_for_user(self):
ph = Placeholder.objects.create(slot='test', default_width=300)
result = ph.has_change_permission(self.get_mock_request(False))
self.assertFalse(result)
def test_check_unicode_rendering(self):
ph = Placeholder.objects.create(slot='test', default_width=300)
result = force_unicode(ph)
self.assertEqual(result, u'test')
def test_excercise_get_attached_model(self):
ph = Placeholder.objects.create(slot='test', default_width=300)
result = ph._get_attached_model()
self.assertEqual(result, None) # Simple PH - no model
def test_excercise_get_attached_field_name(self):
ph = Placeholder.objects.create(slot='test', default_width=300)
result = ph._get_attached_field_name()
self.assertEqual(result, None) # Simple PH - no field name
def test_excercise_get_attached_models_notplugins(self):
ex = Example1(
char_1='one',
char_2='two',
char_3='tree',
char_4='four'
)
ex.save()
ph = ex.placeholder
result = list(ph._get_attached_models())
self.assertEqual(result, [Example1]) # Simple PH - Example1 model
add_plugin(ph, TextPlugin, 'en', body='en body')
result = list(ph._get_attached_models())
self.assertEqual(result, [Example1]) # Simple PH still one Example1 model
def test_excercise_get_attached_fields_notplugins(self):
ex = Example1(
char_1='one',
char_2='two',
char_3='tree',
char_4='four',
)
ex.save()
ph = ex.placeholder
result = [f.name for f in list(ph._get_attached_fields())]
self.assertEqual(result, ['placeholder']) # Simple PH - placeholder field name
add_plugin(ph, TextPlugin, 'en', body='en body')
result = [f.name for f in list(ph._get_attached_fields())]
self.assertEqual(result, ['placeholder']) # Simple PH - still one placeholder field name
class PlaceholderAdminTestBase(CMSTestCase):
def get_placeholder(self):
return Placeholder.objects.create(slot='test')
def get_admin(self):
admin.autodiscover()
return admin.site._registry[Example1]
def get_post_request(self, data):
return self.get_request(post_data=data)
class PlaceholderAdminTest(PlaceholderAdminTestBase):
placeholderconf = {'test': {
'limits': {
'global': 2,
'TextPlugin': 1,
}
}
}
def test_global_limit(self):
placeholder = self.get_placeholder()
admin = self.get_admin()
data = {
'plugin_type': 'LinkPlugin',
'placeholder_id': placeholder.pk,
'plugin_language': 'en',
}
superuser = self.get_superuser()
with UserLoginContext(self, superuser):
with SettingsOverride(CMS_PLACEHOLDER_CONF=self.placeholderconf):
request = self.get_post_request(data)
response = admin.add_plugin(request) # first
self.assertEqual(response.status_code, 200)
response = admin.add_plugin(request) # second
self.assertEqual(response.status_code, 200)
response = admin.add_plugin(request) # third
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content, b"This placeholder already has the maximum number of plugins (2).")
def test_type_limit(self):
placeholder = self.get_placeholder()
admin = self.get_admin()
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': placeholder.pk,
'plugin_language': 'en',
}
superuser = self.get_superuser()
with UserLoginContext(self, superuser):
with SettingsOverride(CMS_PLACEHOLDER_CONF=self.placeholderconf):
request = self.get_post_request(data)
response = admin.add_plugin(request) # first
self.assertEqual(response.status_code, 200)
response = admin.add_plugin(request) # second
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content,
b"This placeholder already has the maximum number (1) of allowed Text plugins.")
def test_global_limit_on_plugin_move(self):
admin = self.get_admin()
superuser = self.get_superuser()
source_placeholder = Placeholder.objects.create(slot='source')
target_placeholder = self.get_placeholder()
data = {
'placeholder': source_placeholder,
'plugin_type': 'LinkPlugin',
'language': 'en',
}
plugin_1 = add_plugin(**data)
plugin_2 = add_plugin(**data)
plugin_3 = add_plugin(**data)
with UserLoginContext(self, superuser):
with SettingsOverride(CMS_PLACEHOLDER_CONF=self.placeholderconf):
request = self.get_post_request({'placeholder_id': target_placeholder.pk, 'plugin_id': plugin_1.pk})
response = admin.move_plugin(request) # first
self.assertEqual(response.status_code, 200)
request = self.get_post_request({'placeholder_id': target_placeholder.pk, 'plugin_id': plugin_2.pk})
response = admin.move_plugin(request) # second
self.assertEqual(response.status_code, 200)
request = self.get_post_request({'placeholder_id': target_placeholder.pk, 'plugin_id': plugin_3.pk})
response = admin.move_plugin(request) # third
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content, b"This placeholder already has the maximum number of plugins (2).")
def test_type_limit_on_plugin_move(self):
admin = self.get_admin()
superuser = self.get_superuser()
source_placeholder = Placeholder.objects.create(slot='source')
target_placeholder = self.get_placeholder()
data = {
'placeholder': source_placeholder,
'plugin_type': 'TextPlugin',
'language': 'en',
}
plugin_1 = add_plugin(**data)
plugin_2 = add_plugin(**data)
with UserLoginContext(self, superuser):
with SettingsOverride(CMS_PLACEHOLDER_CONF=self.placeholderconf):
request = self.get_post_request({'placeholder_id': target_placeholder.pk, 'plugin_id': plugin_1.pk})
response = admin.move_plugin(request) # first
self.assertEqual(response.status_code, 200)
request = self.get_post_request({'placeholder_id': target_placeholder.pk, 'plugin_id': plugin_2.pk})
response = admin.move_plugin(request) # second
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content,
b"This placeholder already has the maximum number (1) of allowed Text plugins.")
def test_edit_plugin_and_cancel(self):
placeholder = self.get_placeholder()
admin = self.get_admin()
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': placeholder.pk,
'plugin_language': 'en',
}
superuser = self.get_superuser()
with UserLoginContext(self, superuser):
with SettingsOverride(CMS_PLACEHOLDER_CONF=self.placeholderconf):
request = self.get_post_request(data)
response = admin.add_plugin(request)
self.assertEqual(response.status_code, 200)
plugin_id = int(str(response.content).split('edit-plugin/')[1].split("/")[0])
data = {
'body': 'Hello World',
}
request = self.get_post_request(data)
response = admin.edit_plugin(request, plugin_id)
self.assertEqual(response.status_code, 200)
text_plugin = Text.objects.get(pk=plugin_id)
self.assertEquals('Hello World', text_plugin.body)
# edit again, but this time press cancel
data = {
'body': 'Hello World!!',
'_cancel': True,
}
request = self.get_post_request(data)
response = admin.edit_plugin(request, plugin_id)
self.assertEqual(response.status_code, 200)
text_plugin = Text.objects.get(pk=plugin_id)
self.assertEquals('Hello World', text_plugin.body)
class PlaceholderPluginPermissionTests(PlaceholderAdminTestBase):
def _testuser(self):
u = User(username="test", is_staff=True, is_active=True, is_superuser=False)
u.set_password("test")
u.save()
return u
def _create_example(self):
ex = Example1(
char_1='one',
char_2='two',
char_3='tree',
char_4='four'
)
ex.save()
self._placeholder = ex.placeholder
self.example_object = ex
def _create_plugin(self):
self._plugin = add_plugin(self._placeholder, 'TextPlugin', 'en')
def _give_permission(self, user, model, permission_type, save=True):
codename = '%s_%s' % (permission_type, model._meta.object_name.lower())
user.user_permissions.add(Permission.objects.get(codename=codename))
def _delete_permission(self, user, model, permission_type, save=True):
codename = '%s_%s' % (permission_type, model._meta.object_name.lower())
user.user_permissions.remove(Permission.objects.get(codename=codename))
def _give_object_permission(self, user, object, permission_type, save=True):
codename = '%s_%s' % (permission_type, object.__class__._meta.object_name.lower())
UserObjectPermission.objects.assign_perm(codename, user=user, obj=object)
def _delete_object_permission(self, user, object, permission_type, save=True):
codename = '%s_%s' % (permission_type, object.__class__._meta.object_name.lower())
UserObjectPermission.objects.remove_perm(codename, user=user, obj=object)
def _post_request(self, user):
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': self._placeholder.pk,
'plugin_language': 'en',
}
request = self.get_post_request(data)
request.user = self.reload(user)
request._messages = default_storage(request)
return request
def test_plugin_add_requires_permissions(self):
"""User wants to add a plugin to the example app placeholder but has no permissions"""
self._test_plugin_action_requires_permissions('add')
def test_plugin_edit_requires_permissions(self):
"""User wants to edit a plugin to the example app placeholder but has no permissions"""
self._test_plugin_action_requires_permissions('change')
def _test_plugin_action_requires_permissions(self, key):
self._create_example()
if key=='change':
self._create_plugin()
normal_guy = self._testuser()
admin = self.get_admin()
# check all combinations of plugin, app and object permission
for perms in itertools.product(*[[False, True]]*3):
self._set_perms(normal_guy, [Text, Example1, self.example_object], perms, key)
request = self._post_request(normal_guy)
if key=='add':
response = admin.add_plugin(request)
elif key=='change':
response = admin.edit_plugin(request, self._plugin.id)
should_pass = perms[0] and (perms[1] or perms[2])
expected_status_code = HttpResponse.status_code if should_pass else HttpResponseForbidden.status_code
self.assertEqual(response.status_code, expected_status_code)
# cleanup
self._set_perms(normal_guy, [Text, Example1, self.example_object], (False,)*3, key)
def _set_perms(self, user, objects, perms, key):
for obj, perm in zip(objects, perms):
action = 'give' if perm else 'delete'
object = '_object' if isinstance(obj, models.Model) else ''
method_name = '_%s%s_permission' % (action, object)
getattr(self, method_name)(user, obj, key)
class PlaceholderConfTests(TestCase):
def test_get_all_plugins_single_page(self):
page = create_page('page', 'col_two.html', 'en')
placeholder = page.placeholders.get(slot='col_left')
conf = {
'col_two': {
'plugins': ['TextPlugin', 'LinkPlugin'],
},
'col_two.html col_left': {
'plugins': ['LinkPlugin'],
},
}
with SettingsOverride(CMS_PLACEHOLDER_CONF=conf):
plugins = plugin_pool.get_all_plugins(placeholder, page)
self.assertEqual(len(plugins), 1, plugins)
self.assertEqual(plugins[0], LinkPlugin)
def test_get_all_plugins_inherit(self):
parent = create_page('parent', 'col_two.html', 'en')
page = create_page('page', constants.TEMPLATE_INHERITANCE_MAGIC, 'en', parent=parent)
placeholder = page.placeholders.get(slot='col_left')
conf = {
'col_two': {
'plugins': ['TextPlugin', 'LinkPlugin'],
},
'col_two.html col_left': {
'plugins': ['LinkPlugin'],
},
}
with SettingsOverride(CMS_PLACEHOLDER_CONF=conf):
plugins = plugin_pool.get_all_plugins(placeholder, page)
self.assertEqual(len(plugins), 1, plugins)
self.assertEqual(plugins[0], LinkPlugin)
class PlaceholderI18NTest(CMSTestCase):
def _testuser(self):
u = User(username="test", is_staff=True, is_active=True, is_superuser=True)
u.set_password("test")
u.save()
return u
def test_hvad_tabs(self):
ex = MultilingualExample1(
char_1='one',
char_2='two',
)
ex.save()
user = self._testuser()
self.client.login(username='test', password='test')
response = self.client.get('/de/admin/placeholderapp/multilingualexample1/%d/' % ex.pk)
self.assertContains(response, '<input type="hidden" class="language_button selected" name="de" />')
def test_no_tabs(self):
ex = Example1(
char_1='one',
char_2='two',
char_3='one',
char_4='two',
)
ex.save()
user = self._testuser()
self.client.login(username='test', password='test')
response = self.client.get('/de/admin/placeholderapp/example1/%d/' % ex.pk)
self.assertNotContains(response, '<input type="hidden" class="language_button selected" name="de" />')
def test_placeholder_tabs(self):
ex = TwoPlaceholderExample(
char_1='one',
char_2='two',
char_3='one',
char_4='two',
)
ex.save()
user = self._testuser()
self.client.login(username='test', password='test')
response = self.client.get('/de/admin/placeholderapp/twoplaceholderexample/%d/' % ex.pk)
self.assertNotContains(response,
"""<input type="button" onclick="trigger_lang_button(this,'./?language=en');" class="language_button selected" id="debutton" name="en" value="English">""")
| |
import nltk
import datetime
import time
from configparser import ConfigParser
from modules import faq_generator as faq_generator_module
def intersect(titles):
ret_set = set(titles[0])
for index, element in enumerate(titles):
if index > 0:
ret_set = ret_set & set(element)
return list(ret_set)
def handle_repost(r, submission, search_query=None, flair_and_comment=False):
if flair_and_comment and search_query is not None:
search_url = 'https://www.reddit.com/r/explainlikeimfive/search?q=title%3A%28'
for word in search_query.split():
search_url += word + '+'
search_url = search_url[:-1]
search_url += '%29&restrict_sr=on&sort=relevance&t=all'
submission.report("Potential repost")
r.set_flair('explainlikeimfive', submission, flair_text='Repost', flair_css_class='Repost')
s1 = submission.author
s2 = search_url
s3 = 'https://www.reddit.com/r/explainlikeimfive/wiki/reposts#wiki_why_we_allow_reposts'
s4 = 'https://www.reddit.com/r/explainlikeimfive/wiki/reposts#wiki_how_to_filter_reposts'
s5 = 'https://www.reddit.com/message/compose/?to=/r/explainlikeimfive'
comment = ("""Hi /u/%s,
I've ran a search for your question and detected it is a commonly asked question, so I've
marked this question as repost. It will still be visible in the subreddit nonetheless.
**You can see previous similar questions [here](%s).**
*[Why we allow reposts](%s) | [How to filter out reposts permanently](%s)*
---
**This search was performed automatically using keywords from your submission**.
*Please [contact the moderators of this subreddit](%s) if you believe this is a false positive.*
""") % (s1, s2, s3, s4, s5)
comment_obj = submission.add_comment(comment)
comment_obj.distinguish(sticky=True)
elif flair_and_comment and search_query is None:
r.set_flair('explainlikeimfive', submission, flair_text='Repost', flair_css_class='Repost')
s1 = submission.author
s2 = 'https://www.reddit.com/r/explainlikeimfive/wiki/reposts#wiki_why_we_allow_reposts'
s3 = 'https://www.reddit.com/r/explainlikeimfive/wiki/reposts#wiki_how_to_filter_reposts'
s4 = 'https://www.reddit.com/message/compose/?to=/r/explainlikeimfive'
comment = ("""Hi /u/%s,
This question has been marked as a repost as it is a commonly asked question.
It will still be visible in the subreddit nonetheless.
*[Why we allow reposts](%s) | [How to filter out reposts permanently](%s)*
---
**This search was performed automatically using keywords from your submission**.
*Please [contact the moderators of this subreddit](%s) if you believe this is a false positive.*
""") % (s1, s2, s3, s4)
comment_obj = submission.add_comment(comment)
comment_obj.distinguish(sticky=True)
class Filters:
"""This class implements a set of filters through which submissions can be ran through"""
def __init__(self, r, s, db, subreddit):
# Parse config
config = ConfigParser()
config.read('modules_config.ini')
self.tags = config.get('filters', 'tags').split(',')
self.verbose = config.getboolean('filters', 'verbose')
self.r = r
self.s = s
self.db = db
self.already_done_reposts = []
self.already_checked_cur_rules = []
self.filters = []
self.subreddit = subreddit
for name, f in Filters.__dict__.items():
if callable(f) and name[0] != "_" and name != "run_filters":
self.filters.append(name)
# -------------- DEFINE INTERNAL METHODS NEEDED BY THE FILTERS HERE --------------
def _create_c_events_rule(self, search_results):
list_of_tokenized_titles = []
final_words_list = []
for index, submission in enumerate(search_results):
if index <= 3:
tokenized_title = submission.title.lower().split()
list_of_tokenized_titles.append(tokenized_title)
# submission.remove()
title_words_list = intersect(list_of_tokenized_titles)
tokens = nltk.word_tokenize(' '.join(title_words_list))
tagged = nltk.pos_tag(tokens)
for word, tag in tagged:
if tag in self.tags:
final_words_list.append(word)
try:
final_words_list.remove('eli5')
except ValueError:
pass
try:
final_words_list.remove(':')
except ValueError:
pass
try:
final_words_list.remove(';')
except ValueError:
pass
self.db.insert_entry('recent_event', event_keywords=final_words_list)
self.s.send_msg("*Created current event rule, posts containing:* '%s'" % ' '.join(final_words_list),
channel_name="eli5bot-dev",
confirm=False)
def _get_broken_cur_rule(self, title_words_list):
broken_rule = None
submission_title = ' '.join(title_words_list)
current_rules = self.db.retrieve_entries('current_events')
for rule in current_rules:
if all(x in submission_title for x in rule):
broken_rule = rule
break
if broken_rule is not None:
ret = ' '.join(broken_rule)
else:
ret = None
return ret
def run_filters(self, submission):
passed_list = []
for filter_method in self.filters:
passed = getattr(self, filter_method)(submission)
passed_list.append(passed)
if False in passed_list:
return False
else:
return True
# -------------- DEFINE FILTERS HERE --------------
def check_current_rule(self, submission):
if submission.id not in self.already_checked_cur_rules:
title_words_list = nltk.word_tokenize(submission.title.lower())
broken_rule = self._get_broken_cur_rule(title_words_list)
self.already_checked_cur_rules.append(submission.id)
if broken_rule is not None:
# submission.remove()
submission.report("Current rule: %s" % broken_rule)
return False
else:
return True
def search_reposts(self, submission):
nltk.data.path.append('./nltk_data/')
faq_generator = faq_generator_module.FaqGenerator(self.r, self.subreddit)
if submission.id not in self.already_done_reposts:
words_list = []
search_results_in_last_threehours = []
search_result_list = []
total_in_threehours = 0
title = submission.title.lower()
search_url = 'https://www.reddit.com/r/explainlikeimfive/search?q=title%3A%28'
self.already_done_reposts.append(submission.id)
tokens = nltk.word_tokenize(title)
try:
tokens.remove('eli5')
except ValueError:
pass
try:
tokens.remove(':')
except ValueError:
pass
try:
tokens.remove(';')
except ValueError:
pass
tagged = nltk.pos_tag(tokens)
for word, tag in tagged:
if tag in self.tags:
words_list.append(word)
search_url += word + '+'
search_url += '&restrict_sr=on&sort=relevance&t=all'
search_query = ' '.join(words_list)
full_search_query = "title:(" + search_query + ")"
while True:
try:
search_result = self.r.search(full_search_query, subreddit=self.subreddit,
period='year', sort='new')
search_result_list = list(search_result)
break
except AssertionError:
time.sleep(1)
continue
if search_result_list:
for item in search_result_list:
comment_time = datetime.datetime.fromtimestamp(item.created_utc)
d = datetime.datetime.now() - comment_time
delta_time = d.total_seconds()
if int(delta_time / 60) < 180:
total_in_threehours += 1
search_results_in_last_threehours.append(item)
if len(search_result_list) >= 5:
faq_generator.add_entry(submission, search_query)
if self.verbose:
msg_string = "---\n*Potential repost detected*\n" + \
title + '\n' + "*POS tagger output:* " + str(tagged) + '\n' + \
'*Link:* ' + submission.permalink + '\n' + "*Search query:* " +\
full_search_query + '\n' + '*Search results:*\n'
for item in search_result_list:
msg_string += str(item) + '\n'
msg = self.s.send_msg(msg_string, channel_name="eli5bot-dev", confirm=False)
handle_repost(self.r, submission, search_query, flair_and_comment=True)
# return False
if total_in_threehours >= 3:
msg_string = "---\n*Potential large influx of question*\n" + \
title + '\n' + "*Search query:* " + full_search_query + '\n' + '*Link:* ' + \
submission.permalink
msg = self.s.send_msg(msg_string, channel_name="eli5bot-dev", confirm=False)
return False
while True:
try:
search_result = self.r.search(full_search_query, subreddit=self.subreddit,
period='month', sort='new')
search_result_list = list(search_result)
break
except AssertionError:
time.sleep(1)
continue
if search_result_list:
if len(search_result_list) >= 3:
submission.report("Potential extremely common repost (asked more than once a month)")
return False
return True
| |
from __future__ import absolute_import, print_function
import numpy as np
from numpy.testing import TestCase, assert_array_equal, run_module_suite
from scipy.weave import size_check
from scipy.weave.ast_tools import harvest_variables
empty = np.array(())
class TestMakeSameLength(TestCase):
def generic_check(self,x,y,desired):
actual = size_check.make_same_length(x,y)
desired = desired
assert_array_equal(actual,desired)
def test_scalar(self):
x,y = (),()
desired = empty,empty
self.generic_check(x,y,desired)
def test_x_scalar(self):
x,y = (),(1,2)
desired = np.array((1,1)), np.array((1,2))
self.generic_check(x,y,desired)
def test_y_scalar(self):
x,y = (1,2),()
desired = np.array((1,2)), np.array((1,1))
self.generic_check(x,y,desired)
def test_x_short(self):
x,y = (1,2),(1,2,3)
desired = np.array((1,1,2)), np.array((1,2,3))
self.generic_check(x,y,desired)
def test_y_short(self):
x,y = (1,2,3),(1,2)
desired = np.array((1,2,3)), np.array((1,1,2))
self.generic_check(x,y,desired)
class TestBinaryOpSize(TestCase):
def generic_check(self,x,y,desired):
actual = size_check.binary_op_size(x,y)
desired = desired
assert_array_equal(actual,desired)
def generic_error_check(self,x,y):
self.assertRaises(ValueError, size_check.binary_op_size, x, y)
def desired_type(self,val):
return np.array(val)
def test_scalar(self):
x,y = (),()
desired = self.desired_type(())
self.generic_check(x,y,desired)
def test_x1(self):
x,y = (1,),()
desired = self.desired_type((1,))
self.generic_check(x,y,desired)
def test_y1(self):
x,y = (),(1,)
desired = self.desired_type((1,))
self.generic_check(x,y,desired)
def test_x_y(self):
x,y = (5,),(5,)
desired = self.desired_type((5,))
self.generic_check(x,y,desired)
def test_x_y2(self):
x,y = (5,10),(5,10)
desired = self.desired_type((5,10))
self.generic_check(x,y,desired)
def test_x_y3(self):
x,y = (5,10),(1,10)
desired = self.desired_type((5,10))
self.generic_check(x,y,desired)
def test_x_y4(self):
x,y = (1,10),(5,10)
desired = self.desired_type((5,10))
self.generic_check(x,y,desired)
def test_x_y5(self):
x,y = (5,1),(1,10)
desired = self.desired_type((5,10))
self.generic_check(x,y,desired)
def test_x_y6(self):
x,y = (1,10),(5,1)
desired = self.desired_type((5,10))
self.generic_check(x,y,desired)
def test_x_y7(self):
x,y = (5,4,3,2,1),(3,2,1)
desired = self.desired_type((5,4,3,2,1))
self.generic_check(x,y,desired)
def test_error1(self):
x,y = (5,),(4,)
self.generic_error_check(x,y)
def test_error2(self):
x,y = (5,5),(4,5)
self.generic_error_check(x,y)
class TestDummyArray(TestBinaryOpSize):
def generic_check(self,x,y,desired):
if type(x) is type(()):
x = np.ones(x)
if type(y) is type(()):
y = np.ones(y)
xx = size_check.dummy_array(x)
yy = size_check.dummy_array(y)
ops = ['+', '-', '/', '*', '<<', '>>']
for op in ops:
actual = eval('xx' + op + 'yy')
desired = desired
assert_array_equal(actual,desired)
def desired_type(self,val):
return size_check.dummy_array(np.array(val),1)
class TestDummyArrayIndexing(TestCase):
def generic_check(self,ary,expr,desired):
a = size_check.dummy_array(ary)
actual = eval(expr).shape
#print desired, actual
assert_array_equal(actual,desired, expr)
def generic_wrap(self,a,expr):
desired = np.array(eval(expr).shape)
try:
self.generic_check(a,expr,desired)
except IndexError:
if 0 not in desired:
msg = '%s raised IndexError in dummy_array, but forms\n' \
'valid array shape -> %s' % (expr, str(desired))
raise AttributeError(msg)
def generic_1d(self,expr):
a = np.arange(10)
self.generic_wrap(a,expr)
def generic_2d(self,expr):
a = np.ones((10,20))
self.generic_wrap(a,expr)
def generic_3d(self,expr):
a = np.ones((10,20,1))
self.generic_wrap(a,expr)
def generic_1d_index(self,expr):
a = np.arange(10)
#print expr ,eval(expr)
desired = np.array(())
self.generic_check(a,expr,desired)
def test_1d_index_0(self):
self.generic_1d_index('a[0]')
def test_1d_index_1(self):
self.generic_1d_index('a[4]')
def test_1d_index_2(self):
self.generic_1d_index('a[-4]')
def test_1d_index_3(self):
try: self.generic_1d('a[12]')
except IndexError: pass
def test_1d_index_calculated(self):
self.generic_1d_index('a[0+1]')
def test_1d_0(self):
self.generic_1d('a[:]')
def test_1d_1(self):
self.generic_1d('a[1:]')
def test_1d_2(self):
self.generic_1d('a[-1:]')
def test_1d_3(self):
self.generic_1d('a[-11:]')
def test_1d_4(self):
self.generic_1d('a[:1]')
def test_1d_5(self):
self.generic_1d('a[:-1]')
def test_1d_6(self):
self.generic_1d('a[:-11]')
def test_1d_7(self):
self.generic_1d('a[1:5]')
def test_1d_8(self):
self.generic_1d('a[1:-5]')
def test_1d_9(self):
# don't support zero length slicing at the moment.
try: self.generic_1d('a[-1:-5]')
except IndexError: pass
def test_1d_10(self):
self.generic_1d('a[-5:-1]')
def test_1d_stride_0(self):
self.generic_1d('a[::1]')
def test_1d_stride_1(self):
self.generic_1d('a[::-1]')
def test_1d_stride_2(self):
self.generic_1d('a[1::1]')
def test_1d_stride_3(self):
self.generic_1d('a[1::-1]')
def test_1d_stride_4(self):
# don't support zero length slicing at the moment.
try: self.generic_1d('a[1:5:-1]')
except IndexError: pass
def test_1d_stride_5(self):
self.generic_1d('a[5:1:-1]')
def test_1d_stride_6(self):
self.generic_1d('a[:4:1]')
def test_1d_stride_7(self):
self.generic_1d('a[:4:-1]')
def test_1d_stride_8(self):
self.generic_1d('a[:-4:1]')
def test_1d_stride_9(self):
self.generic_1d('a[:-4:-1]')
def test_1d_stride_10(self):
self.generic_1d('a[:-3:2]')
def test_1d_stride_11(self):
self.generic_1d('a[:-3:-2]')
def test_1d_stride_12(self):
self.generic_1d('a[:-3:-7]')
def test_1d_random(self):
""" through a bunch of different indexes at it for good measure.
"""
import random
choices = map(lambda x: repr(x),range(50)) + range(50) + ['']*50
for i in range(100):
try:
beg = random.choice(choices)
end = random.choice(choices)
step = random.choice(choices)
if step in ['0',0]: step = 'None'
self.generic_1d('a[%s:%s:%s]' %(beg,end,step))
except IndexError:
pass
def test_2d_0(self):
self.generic_2d('a[:]')
def test_2d_1(self):
self.generic_2d('a[:2]')
def test_2d_2(self):
self.generic_2d('a[:,:]')
def test_2d_random(self):
""" through a bunch of different indexes at it for good measure.
"""
import random
choices = map(lambda x: repr(x),range(50)) + range(50) + ['']*50
for i in range(100):
try:
beg = random.choice(choices)
end = random.choice(choices)
step = random.choice(choices)
beg2 = random.choice(choices)
end2 = random.choice(choices)
step2 = random.choice(choices)
if step in ['0',0]: step = 'None'
if step2 in ['0',0]: step2 = 'None'
expr = 'a[%s:%s:%s,%s:%s:%s]' %(beg,end,step,beg2,end2,step2)
self.generic_2d(expr)
except IndexError:
pass
def test_3d_random(self):
""" through a bunch of different indexes at it for good measure.
"""
import random
choices = map(lambda x: repr(x),range(50)) + range(50) + ['']*50
for i in range(100):
try:
idx = []
for i in range(9):
val = random.choice(choices)
if (i+1) % 3 == 0 and val in ['0',0]:
val = 'None'
idx.append(val)
expr = 'a[%s:%s:%s,%s:%s:%s,%s:%s:%s]' % tuple(idx)
self.generic_3d(expr)
except IndexError:
pass
class TestReduction(TestCase):
def test_1d_0(self):
a = np.ones((5,))
actual = size_check.reduction(a,0)
desired = size_check.dummy_array((),1)
assert_array_equal(actual.shape,desired.shape)
def test_2d_0(self):
a = np.ones((5,10))
actual = size_check.reduction(a,0)
desired = size_check.dummy_array((10,),1)
assert_array_equal(actual.shape,desired.shape)
def test_2d_1(self):
a = np.ones((5,10))
actual = size_check.reduction(a,1)
desired = size_check.dummy_array((5,),1)
assert_array_equal(actual.shape,desired.shape)
def test_3d_0(self):
a = np.ones((5,6,7))
actual = size_check.reduction(a,1)
desired = size_check.dummy_array((5,7),1)
assert_array_equal(actual.shape,desired.shape)
def test_error0(self):
a = np.ones((5,))
try:
actual = size_check.reduction(a,-2)
except ValueError:
pass
def test_error1(self):
a = np.ones((5,))
try:
actual = size_check.reduction(a,1)
except ValueError:
pass
class TestExpressions(TestCase):
def generic_check(self,expr,desired,**kw):
import parser
ast_list = parser.expr(expr).tolist()
args = harvest_variables(ast_list)
loc = locals().update(kw)
for var in args:
s='%s = size_check.dummy_array(%s)'% (var,var)
exec(s,loc)
try:
actual = eval(expr,locals()).shape
except:
actual = 'failed'
if actual is 'failed' and desired is 'failed':
return
try:
assert_array_equal(actual,desired, expr)
except:
print('EXPR:',expr)
print('ACTUAL:',actual)
print('DESIRED:',desired)
def generic_wrap(self,expr,**kw):
try:
x = np.array(eval(expr,kw))
try:
desired = x.shape
except:
desired = np.zeros(())
except:
desired = 'failed'
self.generic_check(expr,desired,**kw)
def test_generic_1d(self):
a = np.arange(10)
expr = 'a[:]'
self.generic_wrap(expr,a=a)
expr = 'a[:] + a'
self.generic_wrap(expr,a=a)
bad_expr = 'a[4:] + a'
self.generic_wrap(bad_expr,a=a)
a = np.arange(10)
b = np.ones((1,10))
expr = 'a + b'
self.generic_wrap(expr,a=a,b=b)
bad_expr = 'a[:5] + b'
self.generic_wrap(bad_expr,a=a,b=b)
def test_single_index(self):
a = np.arange(10)
expr = 'a[5] + a[3]'
self.generic_wrap(expr,a=a)
def test_calculated_index(self):
a = np.arange(10)
nx = 0
expr = 'a[5] + a[nx+3]'
size_check.check_expr(expr,locals())
def test_calculated_index2(self):
a = np.arange(10)
nx = 0
expr = 'a[1:5] + a[nx+1:5+nx]'
size_check.check_expr(expr,locals())
def generic_2d(self,expr):
a = np.ones((10,20))
self.generic_wrap(a,expr)
def generic_3d(self,expr):
a = np.ones((10,20,1))
self.generic_wrap(a,expr)
if __name__ == "__main__":
run_module_suite()
| |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Observe Thermos tasks on a system
This module provides a number of classes for exposing information about running (active) and
finished Thermos tasks on a system. The primary entry point is the TaskObserver, a thread which
polls a designated Thermos checkpoint root and collates information about all tasks it discovers.
"""
import os
import threading
import time
from operator import attrgetter
from twitter.common import log
from twitter.common.exceptions import ExceptionalThread
from twitter.common.lang import Lockable
from twitter.common.quantity import Amount, Time
from apache.thermos.common.path import TaskPath
from apache.thermos.monitoring.disk import DiskCollectorSettings
from apache.thermos.monitoring.monitor import TaskMonitor
from apache.thermos.monitoring.process import ProcessSample
from apache.thermos.monitoring.resource import DiskCollectorProvider, TaskResourceMonitor
from .detector import ObserverTaskDetector
from .observed_task import ActiveObservedTask, FinishedObservedTask
from gen.apache.thermos.ttypes import ProcessState, TaskState
class TaskObserver(ExceptionalThread, Lockable):
"""
The TaskObserver monitors the thermos checkpoint root for active/finished
tasks. It is used to be the oracle of the state of all thermos tasks on
a machine.
It currently returns JSON, but really should just return objects. We should
then build an object->json translator.
"""
class UnexpectedError(Exception): pass
class UnexpectedState(Exception): pass
POLLING_INTERVAL = Amount(5, Time.SECONDS)
def __init__(
self,
path_detector,
interval=POLLING_INTERVAL,
task_process_collection_interval=TaskResourceMonitor.PROCESS_COLLECTION_INTERVAL,
enable_mesos_disk_collector=False,
disk_collector_settings=DiskCollectorSettings()):
self._detector = ObserverTaskDetector(
path_detector,
self.__on_active,
self.__on_finished,
self.__on_removed)
self._interval = interval
self._task_process_collection_interval = task_process_collection_interval
self._enable_mesos_disk_collector = enable_mesos_disk_collector
self._disk_collector_settings = disk_collector_settings
self._active_tasks = {} # task_id => ActiveObservedTask
self._finished_tasks = {} # task_id => FinishedObservedTask
self._stop_event = threading.Event()
ExceptionalThread.__init__(self)
Lockable.__init__(self)
self.daemon = True
@property
def active_tasks(self):
"""Return a dictionary of active Tasks"""
return self._active_tasks
@property
def finished_tasks(self):
"""Return a dictionary of finished Tasks"""
return self._finished_tasks
@property
def all_tasks(self):
"""Return a dictionary of all Tasks known by the TaskObserver"""
return dict(self.active_tasks.items() + self.finished_tasks.items())
def stop(self):
self._stop_event.set()
def start(self):
ExceptionalThread.start(self)
def __on_active(self, root, task_id):
log.debug('on_active(%r, %r)', root, task_id)
if task_id in self.finished_tasks:
log.error('Found an active task (%s) in finished tasks?', task_id)
return
task_monitor = TaskMonitor(root, task_id)
disk_collector_provider = DiskCollectorProvider(
self._enable_mesos_disk_collector,
self._disk_collector_settings)
resource_monitor = TaskResourceMonitor(
task_id,
task_monitor,
disk_collector_provider=disk_collector_provider,
process_collection_interval=self._task_process_collection_interval,
disk_collection_interval=self._disk_collector_settings.disk_collection_interval)
resource_monitor.start()
self._active_tasks[task_id] = ActiveObservedTask(
root,
task_id,
task_monitor,
resource_monitor)
def __on_finished(self, root, task_id):
log.debug('on_finished(%r, %r)', root, task_id)
active_task = self._active_tasks.pop(task_id, None)
if active_task:
active_task.resource_monitor.kill()
self._finished_tasks[task_id] = FinishedObservedTask(root, task_id)
def __on_removed(self, root, task_id):
log.debug('on_removed(%r, %r)', root, task_id)
active_task = self._active_tasks.pop(task_id, None)
if active_task:
active_task.resource_monitor.kill()
self._finished_tasks.pop(task_id, None)
def run(self):
"""
The internal thread for the observer. This periodically polls the
checkpoint root for new tasks, or transitions of tasks from active to
finished state.
"""
while not self._stop_event.is_set():
self._stop_event.wait(self._interval.as_(Time.SECONDS))
with self.lock:
start = time.time()
self._detector.refresh()
log.debug("TaskObserver: finished checkpoint refresh in %.2fs", time.time() - start)
@Lockable.sync
def process_from_name(self, task_id, process_id):
if task_id in self.all_tasks:
task = self.all_tasks[task_id].task
if task:
for process in task.processes():
if process.name().get() == process_id:
return process
@Lockable.sync
def task_count(self):
"""
Return the count of tasks that could be ready properly from disk.
This may be <= self.task_id_count()
"""
return dict(
active=len(self.active_tasks),
finished=len(self.finished_tasks),
all=len(self.all_tasks),
)
@Lockable.sync
def task_id_count(self):
"""Return the raw count of active and finished task_ids."""
num_active = len(self._detector.active_tasks)
num_finished = len(self._detector.finished_tasks)
return dict(active=num_active, finished=num_finished, all=num_active + num_finished)
def _get_tasks_of_type(self, type):
"""Convenience function to return all tasks of a given type"""
tasks = {
'active': self.active_tasks,
'finished': self.finished_tasks,
'all': self.all_tasks,
}.get(type, None)
if tasks is None:
log.error('Unknown task type %s', type)
return {}
return tasks
@Lockable.sync
def state(self, task_id):
"""Return a dict containing mapped information about a task's state"""
real_state = self.raw_state(task_id)
if real_state is None or real_state.header is None:
return {}
else:
return dict(
task_id=real_state.header.task_id,
launch_time=real_state.header.launch_time_ms / 1000.0,
sandbox=real_state.header.sandbox,
hostname=real_state.header.hostname,
user=real_state.header.user
)
@Lockable.sync
def raw_state(self, task_id):
"""
Return the current runner state (thrift blob: gen.apache.thermos.ttypes.RunnerState)
of a given task id
"""
if task_id not in self.all_tasks:
return None
return self.all_tasks[task_id].state
@Lockable.sync
def _task_processes(self, task_id):
"""
Return the processes of a task given its task_id.
Returns a map from state to processes in that state, where possible
states are: waiting, running, success, failed.
"""
if task_id not in self.all_tasks:
return {}
state = self.raw_state(task_id)
if state is None or state.header is None:
return {}
waiting, running, success, failed, killed = [], [], [], [], []
for process, runs in state.processes.items():
# No runs ==> nothing started.
if len(runs) == 0:
waiting.append(process)
else:
if runs[-1].state in (None, ProcessState.WAITING, ProcessState.LOST):
waiting.append(process)
elif runs[-1].state in (ProcessState.FORKED, ProcessState.RUNNING):
running.append(process)
elif runs[-1].state == ProcessState.SUCCESS:
success.append(process)
elif runs[-1].state == ProcessState.FAILED:
failed.append(process)
elif runs[-1].state == ProcessState.KILLED:
killed.append(process)
else:
# TODO(wickman) Consider log.error instead of raising.
raise self.UnexpectedState(
"Unexpected ProcessHistoryState: %s" % state.processes[process].state)
return dict(waiting=waiting, running=running, success=success, failed=failed, killed=killed)
@Lockable.sync
def main(self, type=None, offset=None, num=None):
"""Return a set of information about tasks, optionally filtered
Args:
type = (all|active|finished|None) [default: all]
offset = offset into the list of task_ids [default: 0]
num = number of results to return [default: 20]
Tasks are sorted by interest:
- active tasks are sorted by start time
- finished tasks are sorted by completion time
Returns:
{
tasks: [task_id_1, ..., task_id_N],
type: query type,
offset: next offset,
num: next num
}
"""
type = type or 'all'
offset = offset or 0
num = num or 20
# Get a list of all ObservedTasks of requested type
tasks = sorted((task for task in self._get_tasks_of_type(type).values()),
key=attrgetter('mtime'), reverse=True)
# Filter by requested offset + number of results
end = num
if offset < 0:
offset = offset % len(tasks) if len(tasks) > abs(offset) else 0
end += offset
tasks = tasks[offset:end]
def task_row(observed_task):
"""Generate an output row for a Task"""
task = self._task(observed_task.task_id)
# tasks include those which could not be found properly and are hence empty {}
if task:
return dict(
task_id=observed_task.task_id,
name=task['name'],
role=task['user'],
launch_timestamp=task['launch_timestamp'],
state=task['state'],
state_timestamp=task['state_timestamp'],
ports=task['ports'],
**task['resource_consumption'])
return dict(
tasks=filter(None, map(task_row, tasks)),
type=type,
offset=offset,
num=num,
task_count=self.task_count()[type],
)
def _sample(self, task_id):
if task_id not in self.active_tasks:
sample = ProcessSample.empty().to_dict()
sample['disk'] = 0
else:
resource_sample = self.active_tasks[task_id].resource_monitor.sample()[1]
sample = resource_sample.process_sample.to_dict()
sample['disk'] = resource_sample.disk_usage
log.debug("Got sample for task %s: %s", task_id, sample)
return sample
@Lockable.sync
def task_statuses(self, task_id):
"""
Return the sequence of task states.
[(task_state [string], timestamp), ...]
"""
# Unknown task_id.
if task_id not in self.all_tasks:
return []
task = self.all_tasks[task_id]
if task is None:
return []
state = self.raw_state(task_id)
if state is None or state.header is None:
return []
# Get the timestamp of the transition into the current state.
return [
(TaskState._VALUES_TO_NAMES.get(st.state, 'UNKNOWN'), st.timestamp_ms / 1000)
for st in state.statuses]
@Lockable.sync
def tasks(self, task_ids):
"""
Return information about an iterable of tasks [task_id1, task_id2, ...]
in the following form.
{
task_id1 : self._task(task_id1),
task_id2 : self._task(task_id2),
...
}
"""
res = {}
for task_id in task_ids:
d = self._task(task_id)
task_struct = d.pop('task_struct')
d['task'] = task_struct.get()
res[task_id] = d
return res
@Lockable.sync
def _task(self, task_id):
"""
Return composite information about a particular task task_id, given the below
schema.
{
task_id: string,
name: string,
user: string,
launch_timestamp: seconds,
state: string [ACTIVE, SUCCESS, FAILED]
ports: { name1: 'url', name2: 'url2' }
resource_consumption: { cpu:, ram:, disk: }
processes: { -> names only
waiting: [],
running: [],
success: [],
failed: []
}
}
"""
# Unknown task_id.
if task_id not in self.all_tasks:
return {}
task = self.all_tasks[task_id].task
if task is None:
# TODO(wickman) Can this happen?
log.error('Could not find task: %s', task_id)
return {}
state = self.raw_state(task_id)
if state is None or state.header is None:
# TODO(wickman) Can this happen?
return {}
# Get the timestamp of the transition into the current state.
current_state = state.statuses[-1].state
last_state = state.statuses[0]
state_timestamp = 0
for status in state.statuses:
if status.state == current_state and last_state != current_state:
state_timestamp = status.timestamp_ms / 1000
last_state = status.state
return dict(
task_id=task_id,
name=task.name().get(),
launch_timestamp=state.statuses[0].timestamp_ms / 1000,
state=TaskState._VALUES_TO_NAMES[state.statuses[-1].state],
state_timestamp=state_timestamp,
user=state.header.user,
resource_consumption=self._sample(task_id),
ports=state.header.ports,
processes=self._task_processes(task_id),
task_struct=task,
)
@Lockable.sync
def _get_process_resource_consumption(self, task_id, process_name):
if task_id not in self.active_tasks:
return ProcessSample.empty().to_dict()
sample = self.active_tasks[task_id].resource_monitor.sample_by_process(process_name).to_dict()
log.debug('Resource consumption (%s, %s) => %s', task_id, process_name, sample)
return sample
@Lockable.sync
def _get_process_tuple(self, history, run):
"""
Return the basic description of a process run if it exists, otherwise
an empty dictionary.
{
process_name: string
process_run: int
(optional) return_code: int
state: string [WAITING, FORKED, RUNNING, SUCCESS, KILLED, FAILED, LOST]
(optional) start_time: seconds from epoch
(optional) stop_time: seconds from epoch
}
"""
if len(history) == 0:
return {}
if run >= len(history):
return {}
else:
process_run = history[run]
run = run % len(history)
d = dict(
process_name=process_run.process,
process_run=run,
state=ProcessState._VALUES_TO_NAMES[process_run.state],
)
if process_run.start_time:
d.update(start_time=process_run.start_time)
if process_run.stop_time:
d.update(stop_time=process_run.stop_time)
if process_run.return_code:
d.update(return_code=process_run.return_code)
return d
@Lockable.sync
def process(self, task_id, process, run=None):
"""
Returns a process run, where the schema is given below:
{
process_name: string
process_run: int
used: { cpu: float, ram: int bytes, disk: int bytes }
start_time: (time since epoch in millis (utc))
stop_time: (time since epoch in millis (utc))
state: string [WAITING, FORKED, RUNNING, SUCCESS, KILLED, FAILED, LOST]
}
If run is None, return the latest run.
"""
state = self.raw_state(task_id)
if state is None or state.header is None:
return {}
if process not in state.processes:
return {}
history = state.processes[process]
run = int(run) if run is not None else -1
tup = self._get_process_tuple(history, run)
if not tup:
return {}
if tup.get('state') == 'RUNNING':
tup.update(used=self._get_process_resource_consumption(task_id, process))
return tup
@Lockable.sync
def _processes(self, task_id):
"""
Return
{
process1: { ... }
process2: { ... }
...
processN: { ... }
}
where processK is the latest run of processK and in the schema as
defined by process().
"""
if task_id not in self.all_tasks:
return {}
state = self.raw_state(task_id)
if state is None or state.header is None:
return {}
processes = self._task_processes(task_id)
d = dict()
for process_type in processes:
for process_name in processes[process_type]:
d[process_name] = self.process(task_id, process_name)
return d
@Lockable.sync
def processes(self, task_ids):
"""
Given a list of task_ids, returns a map of task_id => processes, where processes
is defined by the schema in _processes.
"""
if not isinstance(task_ids, (list, tuple)):
return {}
return dict((task_id, self._processes(task_id)) for task_id in task_ids)
@Lockable.sync
def get_run_number(self, runner_state, process, run=None):
if runner_state is not None and runner_state.processes is not None:
run = run if run is not None else -1
if run < len(runner_state.processes[process]):
if len(runner_state.processes[process]) > 0:
return run % len(runner_state.processes[process])
@Lockable.sync
def logs(self, task_id, process, run=None):
"""
Given a task_id and a process and (optional) run number, return a dict:
{
stderr: [dir, filename]
stdout: [dir, filename]
}
If the run number is unspecified, uses the latest run.
TODO(wickman) Just return the filenames directly?
"""
runner_state = self.raw_state(task_id)
if runner_state is None or runner_state.header is None:
return {}
run = self.get_run_number(runner_state, process, run)
if run is None:
return {}
observed_task = self.all_tasks.get(task_id, None)
if not observed_task:
return {}
log_path = TaskPath(
root=observed_task.root,
task_id=task_id,
process=process,
run=run,
log_dir=runner_state.header.log_dir,
).getpath('process_logdir')
return dict(
stdout=[log_path, 'stdout'],
stderr=[log_path, 'stderr']
)
@staticmethod
def _sanitize_path(base_path, relpath):
"""
Attempts to sanitize a path through path normalization, also making sure
that the relative path is contained inside of base_path.
"""
if relpath is None:
relpath = "."
normalized_base = os.path.realpath(base_path)
normalized = os.path.realpath(os.path.join(base_path, relpath))
if normalized.startswith(normalized_base):
return (normalized_base, os.path.relpath(normalized, normalized_base))
return (None, None)
@Lockable.sync
def valid_file(self, task_id, path):
"""
Like valid_path, but also verify the given path is a file
"""
chroot, path = self.valid_path(task_id, path)
if chroot and path and os.path.isfile(os.path.join(chroot, path)):
return chroot, path
return None, None
@Lockable.sync
def valid_path(self, task_id, path):
"""
Given a task_id and a path within that task_id's sandbox, verify:
(1) it's actually in the sandbox and not outside
(2) it's a valid, existing path
Returns chroot and the pathname relative to that chroot.
"""
runner_state = self.raw_state(task_id)
if runner_state is None or runner_state.header is None:
return None, None
try:
chroot = runner_state.header.sandbox
except AttributeError:
return None, None
chroot, path = self._sanitize_path(chroot, path)
if chroot and path and os.path.exists(os.path.join(chroot, path)):
return chroot, path
return None, None
@Lockable.sync
def files(self, task_id, path=None):
"""
Returns dictionary
{
task_id: task_id
chroot: absolute directory on machine
path: sanitized relative path w.r.t. chroot
dirs: list of directories
files: list of files
}
"""
# TODO(jon): DEPRECATED: most of the necessary logic is handled directly in the templates.
# Also, global s/chroot/sandbox/?
empty = dict(task_id=task_id, chroot=None, path=None, dirs=None, files=None)
path = path if path is not None else '.'
runner_state = self.raw_state(task_id)
if runner_state is None:
return empty
try:
chroot = runner_state.header.sandbox
except AttributeError:
return empty
if chroot is None: # chroot-less job
return empty
chroot, path = self._sanitize_path(chroot, path)
if (chroot is None or path is None
or not os.path.isdir(os.path.join(chroot, path))):
return empty
names = os.listdir(os.path.join(chroot, path))
dirs, files = [], []
for name in names:
if os.path.isdir(os.path.join(chroot, path, name)):
dirs.append(name)
else:
files.append(name)
return dict(
task_id=task_id,
chroot=chroot,
path=path,
dirs=dirs,
files=files
)
| |
"""Support for Magic Home select."""
from __future__ import annotations
import asyncio
from flux_led.aio import AIOWifiLedBulb
from flux_led.base_device import DeviceType
from flux_led.const import (
DEFAULT_WHITE_CHANNEL_TYPE,
STATE_CHANGE_LATENCY,
WhiteChannelType,
)
from flux_led.protocol import PowerRestoreState, RemoteConfig
from homeassistant import config_entries
from homeassistant.components.select import SelectEntity
from homeassistant.const import CONF_NAME
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import CONF_WHITE_CHANNEL_TYPE, DOMAIN, FLUX_COLOR_MODE_RGBW
from .coordinator import FluxLedUpdateCoordinator
from .entity import FluxBaseEntity, FluxEntity
from .util import _human_readable_option
NAME_TO_POWER_RESTORE_STATE = {
_human_readable_option(option.name): option for option in PowerRestoreState
}
async def _async_delayed_reload(
hass: HomeAssistant, entry: config_entries.ConfigEntry
) -> None:
"""Reload after making a change that will effect the operation of the device."""
await asyncio.sleep(STATE_CHANGE_LATENCY)
hass.async_create_task(hass.config_entries.async_reload(entry.entry_id))
async def async_setup_entry(
hass: HomeAssistant,
entry: config_entries.ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Flux selects."""
coordinator: FluxLedUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
device = coordinator.device
entities: list[
FluxPowerStateSelect
| FluxOperatingModesSelect
| FluxWiringsSelect
| FluxICTypeSelect
| FluxRemoteConfigSelect
| FluxWhiteChannelSelect
] = []
name = entry.data.get(CONF_NAME, entry.title)
base_unique_id = entry.unique_id or entry.entry_id
if device.device_type == DeviceType.Switch:
entities.append(FluxPowerStateSelect(coordinator.device, entry))
if device.operating_modes:
entities.append(
FluxOperatingModesSelect(
coordinator, base_unique_id, f"{name} Operating Mode", "operating_mode"
)
)
if device.wirings:
entities.append(
FluxWiringsSelect(coordinator, base_unique_id, f"{name} Wiring", "wiring")
)
if device.ic_types:
entities.append(
FluxICTypeSelect(coordinator, base_unique_id, f"{name} IC Type", "ic_type")
)
if device.remote_config:
entities.append(
FluxRemoteConfigSelect(
coordinator, base_unique_id, f"{name} Remote Config", "remote_config"
)
)
if FLUX_COLOR_MODE_RGBW in device.color_modes:
entities.append(FluxWhiteChannelSelect(coordinator.device, entry))
if entities:
async_add_entities(entities)
class FluxConfigAtStartSelect(FluxBaseEntity, SelectEntity):
"""Representation of a flux config entity that only updates at start or change."""
_attr_entity_category = EntityCategory.CONFIG
class FluxConfigSelect(FluxEntity, SelectEntity):
"""Representation of a flux config entity that updates."""
_attr_entity_category = EntityCategory.CONFIG
class FluxPowerStateSelect(FluxConfigAtStartSelect, SelectEntity):
"""Representation of a Flux power restore state option."""
_attr_icon = "mdi:transmission-tower-off"
_attr_options = list(NAME_TO_POWER_RESTORE_STATE)
def __init__(
self,
device: AIOWifiLedBulb,
entry: config_entries.ConfigEntry,
) -> None:
"""Initialize the power state select."""
super().__init__(device, entry)
self._attr_name = f"{entry.data.get(CONF_NAME, entry.title)} Power Restored"
base_unique_id = entry.unique_id or entry.entry_id
self._attr_unique_id = f"{base_unique_id}_power_restored"
self._async_set_current_option_from_device()
@callback
def _async_set_current_option_from_device(self) -> None:
"""Set the option from the current power state."""
restore_states = self._device.power_restore_states
assert restore_states is not None
assert restore_states.channel1 is not None
self._attr_current_option = _human_readable_option(restore_states.channel1.name)
async def async_select_option(self, option: str) -> None:
"""Change the power state."""
await self._device.async_set_power_restore(
channel1=NAME_TO_POWER_RESTORE_STATE[option]
)
self._async_set_current_option_from_device()
self.async_write_ha_state()
class FluxICTypeSelect(FluxConfigSelect):
"""Representation of Flux ic type."""
_attr_icon = "mdi:chip"
@property
def options(self) -> list[str]:
"""Return the available ic types."""
assert self._device.ic_types is not None
return self._device.ic_types
@property
def current_option(self) -> str | None:
"""Return the current ic type."""
return self._device.ic_type
async def async_select_option(self, option: str) -> None:
"""Change the ic type."""
await self._device.async_set_device_config(ic_type=option)
await _async_delayed_reload(self.hass, self.coordinator.entry)
class FluxWiringsSelect(FluxConfigSelect):
"""Representation of Flux wirings."""
_attr_icon = "mdi:led-strip-variant"
@property
def options(self) -> list[str]:
"""Return the available wiring options based on the strip protocol."""
assert self._device.wirings is not None
return self._device.wirings
@property
def current_option(self) -> str | None:
"""Return the current wiring."""
return self._device.wiring
async def async_select_option(self, option: str) -> None:
"""Change the wiring."""
await self._device.async_set_device_config(wiring=option)
class FluxOperatingModesSelect(FluxConfigSelect):
"""Representation of Flux operating modes."""
@property
def options(self) -> list[str]:
"""Return the current operating mode."""
assert self._device.operating_modes is not None
return self._device.operating_modes
@property
def current_option(self) -> str | None:
"""Return the current operating mode."""
return self._device.operating_mode
async def async_select_option(self, option: str) -> None:
"""Change the ic type."""
await self._device.async_set_device_config(operating_mode=option)
await _async_delayed_reload(self.hass, self.coordinator.entry)
class FluxRemoteConfigSelect(FluxConfigSelect):
"""Representation of Flux remote config type."""
def __init__(
self,
coordinator: FluxLedUpdateCoordinator,
base_unique_id: str,
name: str,
key: str,
) -> None:
"""Initialize the remote config type select."""
super().__init__(coordinator, base_unique_id, name, key)
assert self._device.remote_config is not None
self._name_to_state = {
_human_readable_option(option.name): option for option in RemoteConfig
}
self._attr_options = list(self._name_to_state)
@property
def current_option(self) -> str | None:
"""Return the current remote config."""
assert self._device.remote_config is not None
return _human_readable_option(self._device.remote_config.name)
async def async_select_option(self, option: str) -> None:
"""Change the remote config setting."""
remote_config: RemoteConfig = self._name_to_state[option]
await self._device.async_config_remotes(remote_config)
class FluxWhiteChannelSelect(FluxConfigAtStartSelect):
"""Representation of Flux white channel."""
_attr_options = [_human_readable_option(option.name) for option in WhiteChannelType]
def __init__(
self,
device: AIOWifiLedBulb,
entry: config_entries.ConfigEntry,
) -> None:
"""Initialize the white channel select."""
super().__init__(device, entry)
self._attr_name = f"{entry.data.get(CONF_NAME, entry.title)} White Channel"
base_unique_id = entry.unique_id or entry.entry_id
self._attr_unique_id = f"{base_unique_id}_white_channel"
@property
def current_option(self) -> str | None:
"""Return the current white channel type."""
return _human_readable_option(
self.entry.data.get(
CONF_WHITE_CHANNEL_TYPE, DEFAULT_WHITE_CHANNEL_TYPE.name
)
)
async def async_select_option(self, option: str) -> None:
"""Change the white channel type."""
self.hass.config_entries.async_update_entry(
self.entry,
data={**self.entry.data, CONF_WHITE_CHANNEL_TYPE: option.lower()},
)
await _async_delayed_reload(self.hass, self.entry)
| |
"""
Test case for testing the gdbremote protocol.
Tests run against debugserver and lldb-server (llgs).
lldb-server tests run where the lldb-server exe is
available.
This class will be broken into smaller test case classes by
gdb remote packet functional areas. For now it contains
the initial set of tests implemented.
"""
from __future__ import print_function
import unittest2
import gdbremote_testcase
import lldbgdbserverutils
import platform
import signal
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test.lldbdwarf import *
from lldbsuite.test import lldbutil
class LldbGdbServerTestCase(gdbremote_testcase.GdbRemoteTestCaseBase, DwarfOpcodeParser):
mydir = TestBase.compute_mydir(__file__)
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_exe_starts_debugserver(self):
self.init_debugserver_test()
server = self.connect_to_debug_monitor()
@llgs_test
def test_exe_starts_llgs(self):
self.init_llgs_test()
server = self.connect_to_debug_monitor()
def start_no_ack_mode(self):
server = self.connect_to_debug_monitor()
self.assertIsNotNone(server)
self.add_no_ack_remote_stream()
self.expect_gdbremote_sequence()
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_start_no_ack_mode_debugserver(self):
self.init_debugserver_test()
self.start_no_ack_mode()
@llgs_test
def test_start_no_ack_mode_llgs(self):
self.init_llgs_test()
self.start_no_ack_mode()
def thread_suffix_supported(self):
server = self.connect_to_debug_monitor()
self.assertIsNotNone(server)
self.add_no_ack_remote_stream()
self.test_sequence.add_log_lines(
["lldb-server < 26> read packet: $QThreadSuffixSupported#e4",
"lldb-server < 6> send packet: $OK#9a"],
True)
self.expect_gdbremote_sequence()
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_thread_suffix_supported_debugserver(self):
self.init_debugserver_test()
self.thread_suffix_supported()
@llgs_test
def test_thread_suffix_supported_llgs(self):
self.init_llgs_test()
self.thread_suffix_supported()
def list_threads_in_stop_reply_supported(self):
server = self.connect_to_debug_monitor()
self.assertIsNotNone(server)
self.add_no_ack_remote_stream()
self.test_sequence.add_log_lines(
["lldb-server < 27> read packet: $QListThreadsInStopReply#21",
"lldb-server < 6> send packet: $OK#9a"],
True)
self.expect_gdbremote_sequence()
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_list_threads_in_stop_reply_supported_debugserver(self):
self.init_debugserver_test()
self.list_threads_in_stop_reply_supported()
@llgs_test
def test_list_threads_in_stop_reply_supported_llgs(self):
self.init_llgs_test()
self.list_threads_in_stop_reply_supported()
def c_packet_works(self):
launch_args = self.install_and_create_launch_args()
server = self.connect_to_debug_monitor()
self.assertIsNotNone(server)
self.add_no_ack_remote_stream()
self.add_verified_launch_packets(launch_args)
self.test_sequence.add_log_lines(
["read packet: $c#63",
"send packet: $W00#00"],
True)
self.expect_gdbremote_sequence()
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_c_packet_works_debugserver(self):
self.init_debugserver_test()
self.build()
self.c_packet_works()
@llgs_test
def test_c_packet_works_llgs(self):
self.init_llgs_test()
self.build()
self.c_packet_works()
def inferior_print_exit(self):
launch_args = self.install_and_create_launch_args()
server = self.connect_to_debug_monitor()
self.assertIsNotNone(server)
# build launch args
launch_args += ["hello, world"]
self.add_no_ack_remote_stream()
self.add_verified_launch_packets(launch_args)
self.test_sequence.add_log_lines(
["read packet: $vCont;c#a8",
{"type": "output_match", "regex": self.maybe_strict_output_regex(r"hello, world\r\n")},
"send packet: $W00#00"],
True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_inferior_print_exit_debugserver(self):
self.init_debugserver_test()
self.build()
self.inferior_print_exit()
@llgs_test
@expectedFlakeyLinux("llvm.org/pr25652")
def test_inferior_print_exit_llgs(self):
self.init_llgs_test()
self.build()
self.inferior_print_exit()
def first_launch_stop_reply_thread_matches_first_qC(self):
launch_args = self.install_and_create_launch_args()
server = self.connect_to_debug_monitor()
self.assertIsNotNone(server)
# build launch args
launch_args += ["hello, world"]
self.add_no_ack_remote_stream()
self.add_verified_launch_packets(launch_args)
self.test_sequence.add_log_lines(["read packet: $qC#00",
{"direction": "send",
"regex": r"^\$QC([0-9a-fA-F]+)#",
"capture": {1: "thread_id"}},
"read packet: $?#00",
{"direction": "send",
"regex": r"^\$T[0-9a-fA-F]{2}thread:([0-9a-fA-F]+)",
"expect_captures": {1: "thread_id"}}],
True)
self.expect_gdbremote_sequence()
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_first_launch_stop_reply_thread_matches_first_qC_debugserver(self):
self.init_debugserver_test()
self.build()
self.first_launch_stop_reply_thread_matches_first_qC()
@llgs_test
def test_first_launch_stop_reply_thread_matches_first_qC_llgs(self):
self.init_llgs_test()
self.build()
self.first_launch_stop_reply_thread_matches_first_qC()
def attach_commandline_continue_app_exits(self):
procs = self.prep_debug_monitor_and_inferior()
self.test_sequence.add_log_lines(
["read packet: $vCont;c#a8",
"send packet: $W00#00"],
True)
self.expect_gdbremote_sequence()
# Wait a moment for completed and now-detached inferior process to
# clear.
time.sleep(1)
if not lldb.remote_platform:
# Process should be dead now. Reap results.
poll_result = procs["inferior"].poll()
self.assertIsNotNone(poll_result)
# Where possible, verify at the system level that the process is not
# running.
self.assertFalse(
lldbgdbserverutils.process_is_running(
procs["inferior"].pid, False))
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_attach_commandline_continue_app_exits_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_attach()
self.attach_commandline_continue_app_exits()
@llgs_test
def test_attach_commandline_continue_app_exits_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_attach()
self.attach_commandline_continue_app_exits()
def qRegisterInfo_returns_one_valid_result(self):
launch_args = self.install_and_create_launch_args()
server = self.connect_to_debug_monitor()
self.assertIsNotNone(server)
# Build the expected protocol stream
self.add_no_ack_remote_stream()
self.add_verified_launch_packets(launch_args)
self.test_sequence.add_log_lines(
["read packet: $qRegisterInfo0#00",
{"direction": "send", "regex": r"^\$(.+);#[0-9A-Fa-f]{2}", "capture": {1: "reginfo_0"}}],
True)
# Run the stream
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
reg_info_packet = context.get("reginfo_0")
self.assertIsNotNone(reg_info_packet)
self.assert_valid_reg_info(
lldbgdbserverutils.parse_reg_info_response(reg_info_packet))
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_qRegisterInfo_returns_one_valid_result_debugserver(self):
self.init_debugserver_test()
self.build()
self.qRegisterInfo_returns_one_valid_result()
@llgs_test
def test_qRegisterInfo_returns_one_valid_result_llgs(self):
self.init_llgs_test()
self.build()
self.qRegisterInfo_returns_one_valid_result()
def qRegisterInfo_returns_all_valid_results(self):
launch_args = self.install_and_create_launch_args()
server = self.connect_to_debug_monitor()
self.assertIsNotNone(server)
# Build the expected protocol stream.
self.add_no_ack_remote_stream()
self.add_verified_launch_packets(launch_args)
self.add_register_info_collection_packets()
# Run the stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Validate that each register info returned validates.
for reg_info in self.parse_register_info_packets(context):
self.assert_valid_reg_info(reg_info)
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_qRegisterInfo_returns_all_valid_results_debugserver(self):
self.init_debugserver_test()
self.build()
self.qRegisterInfo_returns_all_valid_results()
@llgs_test
def test_qRegisterInfo_returns_all_valid_results_llgs(self):
self.init_llgs_test()
self.build()
self.qRegisterInfo_returns_all_valid_results()
def qRegisterInfo_contains_required_generics(self):
launch_args = self.install_and_create_launch_args()
server = self.connect_to_debug_monitor()
self.assertIsNotNone(server)
# Build the expected protocol stream
self.add_no_ack_remote_stream()
self.add_verified_launch_packets(launch_args)
self.add_register_info_collection_packets()
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Gather register info entries.
reg_infos = self.parse_register_info_packets(context)
# Collect all generic registers found.
generic_regs = {
reg_info['generic']: 1 for reg_info in reg_infos if 'generic' in reg_info}
# Ensure we have a program counter register.
self.assertTrue('pc' in generic_regs)
# Ensure we have a frame pointer register. PPC64le's FP is the same as SP
if self.getArchitecture() != 'powerpc64le':
self.assertTrue('fp' in generic_regs)
# Ensure we have a stack pointer register.
self.assertTrue('sp' in generic_regs)
# Ensure we have a flags register.
self.assertTrue('flags' in generic_regs)
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_qRegisterInfo_contains_required_generics_debugserver(self):
self.init_debugserver_test()
self.build()
self.qRegisterInfo_contains_required_generics()
@llgs_test
def test_qRegisterInfo_contains_required_generics_llgs(self):
self.init_llgs_test()
self.build()
self.qRegisterInfo_contains_required_generics()
def qRegisterInfo_contains_at_least_one_register_set(self):
launch_args = self.install_and_create_launch_args()
server = self.connect_to_debug_monitor()
self.assertIsNotNone(server)
# Build the expected protocol stream
self.add_no_ack_remote_stream()
self.add_verified_launch_packets(launch_args)
self.add_register_info_collection_packets()
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Gather register info entries.
reg_infos = self.parse_register_info_packets(context)
# Collect all register sets found.
register_sets = {
reg_info['set']: 1 for reg_info in reg_infos if 'set' in reg_info}
self.assertTrue(len(register_sets) >= 1)
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_qRegisterInfo_contains_at_least_one_register_set_debugserver(
self):
self.init_debugserver_test()
self.build()
self.qRegisterInfo_contains_at_least_one_register_set()
@llgs_test
def test_qRegisterInfo_contains_at_least_one_register_set_llgs(self):
self.init_llgs_test()
self.build()
self.qRegisterInfo_contains_at_least_one_register_set()
def targetHasAVX(self):
triple = self.dbg.GetSelectedPlatform().GetTriple()
# TODO other platforms, please implement this function
if not re.match(".*-.*-linux", triple):
return True
# Need to do something different for non-Linux/Android targets
if lldb.remote_platform:
self.runCmd('platform get-file "/proc/cpuinfo" "cpuinfo"')
cpuinfo_path = "cpuinfo"
self.addTearDownHook(lambda: os.unlink("cpuinfo"))
else:
cpuinfo_path = "/proc/cpuinfo"
f = open(cpuinfo_path, 'r')
cpuinfo = f.read()
f.close()
return " avx " in cpuinfo
def qRegisterInfo_contains_avx_registers(self):
launch_args = self.install_and_create_launch_args()
server = self.connect_to_debug_monitor()
self.assertIsNotNone(server)
# Build the expected protocol stream
self.add_no_ack_remote_stream()
self.add_verified_launch_packets(launch_args)
self.add_register_info_collection_packets()
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Gather register info entries.
reg_infos = self.parse_register_info_packets(context)
# Collect all generics found.
register_sets = {
reg_info['set']: 1 for reg_info in reg_infos if 'set' in reg_info}
self.assertEqual(
self.targetHasAVX(),
"Advanced Vector Extensions" in register_sets)
@llgs_test
def test_qRegisterInfo_contains_avx_registers_llgs(self):
self.init_llgs_test()
self.build()
self.qRegisterInfo_contains_avx_registers()
def qThreadInfo_contains_thread(self):
procs = self.prep_debug_monitor_and_inferior()
self.add_threadinfo_collection_packets()
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Gather threadinfo entries.
threads = self.parse_threadinfo_packets(context)
self.assertIsNotNone(threads)
# We should have exactly one thread.
self.assertEqual(len(threads), 1)
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_qThreadInfo_contains_thread_launch_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.qThreadInfo_contains_thread()
@llgs_test
def test_qThreadInfo_contains_thread_launch_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.qThreadInfo_contains_thread()
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_qThreadInfo_contains_thread_attach_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_attach()
self.qThreadInfo_contains_thread()
@llgs_test
def test_qThreadInfo_contains_thread_attach_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_attach()
self.qThreadInfo_contains_thread()
def qThreadInfo_matches_qC(self):
procs = self.prep_debug_monitor_and_inferior()
self.add_threadinfo_collection_packets()
self.test_sequence.add_log_lines(
["read packet: $qC#00",
{"direction": "send", "regex": r"^\$QC([0-9a-fA-F]+)#", "capture": {1: "thread_id"}}
], True)
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Gather threadinfo entries.
threads = self.parse_threadinfo_packets(context)
self.assertIsNotNone(threads)
# We should have exactly one thread from threadinfo.
self.assertEqual(len(threads), 1)
# We should have a valid thread_id from $QC.
QC_thread_id_hex = context.get("thread_id")
self.assertIsNotNone(QC_thread_id_hex)
QC_thread_id = int(QC_thread_id_hex, 16)
# Those two should be the same.
self.assertEqual(threads[0], QC_thread_id)
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_qThreadInfo_matches_qC_launch_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.qThreadInfo_matches_qC()
@llgs_test
def test_qThreadInfo_matches_qC_launch_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.qThreadInfo_matches_qC()
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_qThreadInfo_matches_qC_attach_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_attach()
self.qThreadInfo_matches_qC()
@llgs_test
def test_qThreadInfo_matches_qC_attach_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_attach()
self.qThreadInfo_matches_qC()
def p_returns_correct_data_size_for_each_qRegisterInfo(self):
procs = self.prep_debug_monitor_and_inferior()
self.add_register_info_collection_packets()
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Gather register info entries.
reg_infos = self.parse_register_info_packets(context)
self.assertIsNotNone(reg_infos)
self.assertTrue(len(reg_infos) > 0)
inferior_exe_path = self.getBuildArtifact("a.out")
Target = self.dbg.CreateTarget(inferior_exe_path)
byte_order = Target.GetByteOrder()
# Read value for each register.
reg_index = 0
for reg_info in reg_infos:
# Skip registers that don't have a register set. For x86, these are
# the DRx registers, which have no LLDB-kind register number and thus
# cannot be read via normal
# NativeRegisterContext::ReadRegister(reg_info,...) calls.
if not "set" in reg_info:
continue
# Clear existing packet expectations.
self.reset_test_sequence()
# Run the register query
self.test_sequence.add_log_lines(
["read packet: $p{0:x}#00".format(reg_index),
{"direction": "send", "regex": r"^\$([0-9a-fA-F]+)#", "capture": {1: "p_response"}}],
True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Verify the response length.
p_response = context.get("p_response")
self.assertIsNotNone(p_response)
if "dynamic_size_dwarf_expr_bytes" in reg_info:
self.updateRegInfoBitsize(reg_info, byte_order)
self.assertEqual(len(p_response), 2 * int(reg_info["bitsize"]) / 8)
# Increment loop
reg_index += 1
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_p_returns_correct_data_size_for_each_qRegisterInfo_launch_debugserver(
self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.p_returns_correct_data_size_for_each_qRegisterInfo()
@llgs_test
def test_p_returns_correct_data_size_for_each_qRegisterInfo_launch_llgs(
self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.p_returns_correct_data_size_for_each_qRegisterInfo()
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_p_returns_correct_data_size_for_each_qRegisterInfo_attach_debugserver(
self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_attach()
self.p_returns_correct_data_size_for_each_qRegisterInfo()
@llgs_test
def test_p_returns_correct_data_size_for_each_qRegisterInfo_attach_llgs(
self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_attach()
self.p_returns_correct_data_size_for_each_qRegisterInfo()
def Hg_switches_to_3_threads(self):
# Startup the inferior with three threads (main + 2 new ones).
procs = self.prep_debug_monitor_and_inferior(
inferior_args=["thread:new", "thread:new"])
# Let the inferior process have a few moments to start up the thread
# when launched. (The launch scenario has no time to run, so threads
# won't be there yet.)
self.run_process_then_stop(run_seconds=1)
# Wait at most x seconds for 3 threads to be present.
threads = self.wait_for_thread_count(3, timeout_seconds=5)
self.assertEqual(len(threads), 3)
# verify we can $H to each thead, and $qC matches the thread we set.
for thread in threads:
# Change to each thread, verify current thread id.
self.reset_test_sequence()
self.test_sequence.add_log_lines(
["read packet: $Hg{0:x}#00".format(thread), # Set current thread.
"send packet: $OK#00",
"read packet: $qC#00",
{"direction": "send", "regex": r"^\$QC([0-9a-fA-F]+)#", "capture": {1: "thread_id"}}],
True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Verify the thread id.
self.assertIsNotNone(context.get("thread_id"))
self.assertEqual(int(context.get("thread_id"), 16), thread)
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_Hg_switches_to_3_threads_launch_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.Hg_switches_to_3_threads()
@llgs_test
def test_Hg_switches_to_3_threads_launch_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.Hg_switches_to_3_threads()
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_Hg_switches_to_3_threads_attach_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_attach()
self.Hg_switches_to_3_threads()
@llgs_test
def test_Hg_switches_to_3_threads_attach_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_attach()
self.Hg_switches_to_3_threads()
def Hc_then_Csignal_signals_correct_thread(self, segfault_signo):
# NOTE only run this one in inferior-launched mode: we can't grab inferior stdout when running attached,
# and the test requires getting stdout from the exe.
NUM_THREADS = 3
# Startup the inferior with three threads (main + NUM_THREADS-1 worker threads).
# inferior_args=["thread:print-ids"]
inferior_args = ["thread:segfault"]
for i in range(NUM_THREADS - 1):
# if i > 0:
# Give time between thread creation/segfaulting for the handler to work.
# inferior_args.append("sleep:1")
inferior_args.append("thread:new")
inferior_args.append("sleep:10")
# Launch/attach. (In our case, this should only ever be launched since
# we need inferior stdout/stderr).
procs = self.prep_debug_monitor_and_inferior(
inferior_args=inferior_args)
self.test_sequence.add_log_lines(["read packet: $c#63"], True)
context = self.expect_gdbremote_sequence()
# Let the inferior process have a few moments to start up the thread when launched.
# context = self.run_process_then_stop(run_seconds=1)
# Wait at most x seconds for all threads to be present.
# threads = self.wait_for_thread_count(NUM_THREADS, timeout_seconds=5)
# self.assertEquals(len(threads), NUM_THREADS)
signaled_tids = {}
print_thread_ids = {}
# Switch to each thread, deliver a signal, and verify signal delivery
for i in range(NUM_THREADS - 1):
# Run until SIGSEGV comes in.
self.reset_test_sequence()
self.test_sequence.add_log_lines([{"direction": "send",
"regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);",
"capture": {1: "signo",
2: "thread_id"}}],
True)
context = self.expect_gdbremote_sequence(timeout_seconds=10)
self.assertIsNotNone(context)
signo = context.get("signo")
self.assertEqual(int(signo, 16), segfault_signo)
# Ensure we haven't seen this tid yet.
thread_id = int(context.get("thread_id"), 16)
self.assertFalse(thread_id in signaled_tids)
signaled_tids[thread_id] = 1
# Send SIGUSR1 to the thread that signaled the SIGSEGV.
self.reset_test_sequence()
self.test_sequence.add_log_lines(
[
# Set the continue thread.
# Set current thread.
"read packet: $Hc{0:x}#00".format(thread_id),
"send packet: $OK#00",
# Continue sending the signal number to the continue thread.
# The commented out packet is a way to do this same operation without using
# a $Hc (but this test is testing $Hc, so we'll stick with the former).
"read packet: $C{0:x}#00".format(lldbutil.get_signal_number('SIGUSR1')),
# "read packet: $vCont;C{0:x}:{1:x};c#00".format(lldbutil.get_signal_number('SIGUSR1'), thread_id),
# FIXME: Linux does not report the thread stop on the delivered signal (SIGUSR1 here). MacOSX debugserver does.
# But MacOSX debugserver isn't guaranteeing the thread the signal handler runs on, so currently its an XFAIL.
# Need to rectify behavior here. The linux behavior is more intuitive to me since we're essentially swapping out
# an about-to-be-delivered signal (for which we already sent a stop packet) to a different signal.
# {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} },
# "read packet: $c#63",
{"type": "output_match", "regex": r"^received SIGUSR1 on thread id: ([0-9a-fA-F]+)\r\nthread ([0-9a-fA-F]+): past SIGSEGV\r\n", "capture": {1: "print_thread_id", 2: "post_handle_thread_id"}},
],
True)
# Run the sequence.
context = self.expect_gdbremote_sequence(timeout_seconds=10)
self.assertIsNotNone(context)
# Ensure the stop signal is the signal we delivered.
# stop_signo = context.get("stop_signo")
# self.assertIsNotNone(stop_signo)
# self.assertEquals(int(stop_signo,16), lldbutil.get_signal_number('SIGUSR1'))
# Ensure the stop thread is the thread to which we delivered the signal.
# stop_thread_id = context.get("stop_thread_id")
# self.assertIsNotNone(stop_thread_id)
# self.assertEquals(int(stop_thread_id,16), thread_id)
# Ensure we haven't seen this thread id yet. The inferior's
# self-obtained thread ids are not guaranteed to match the stub
# tids (at least on MacOSX).
print_thread_id = context.get("print_thread_id")
self.assertIsNotNone(print_thread_id)
print_thread_id = int(print_thread_id, 16)
self.assertFalse(print_thread_id in print_thread_ids)
# Now remember this print (i.e. inferior-reflected) thread id and
# ensure we don't hit it again.
print_thread_ids[print_thread_id] = 1
# Ensure post signal-handle thread id matches the thread that
# initially raised the SIGSEGV.
post_handle_thread_id = context.get("post_handle_thread_id")
self.assertIsNotNone(post_handle_thread_id)
post_handle_thread_id = int(post_handle_thread_id, 16)
self.assertEqual(post_handle_thread_id, print_thread_id)
@unittest2.expectedFailure()
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_Hc_then_Csignal_signals_correct_thread_launch_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
# Darwin debugserver translates some signals like SIGSEGV into some gdb
# expectations about fixed signal numbers.
self.Hc_then_Csignal_signals_correct_thread(self.TARGET_EXC_BAD_ACCESS)
@llgs_test
def test_Hc_then_Csignal_signals_correct_thread_launch_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.Hc_then_Csignal_signals_correct_thread(
lldbutil.get_signal_number('SIGSEGV'))
def m_packet_reads_memory(self):
# This is the memory we will write into the inferior and then ensure we
# can read back with $m.
MEMORY_CONTENTS = "Test contents 0123456789 ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz"
# Start up the inferior.
procs = self.prep_debug_monitor_and_inferior(
inferior_args=[
"set-message:%s" %
MEMORY_CONTENTS,
"get-data-address-hex:g_message",
"sleep:5"])
# Run the process
self.test_sequence.add_log_lines(
[
# Start running after initial stop.
"read packet: $c#63",
# Match output line that prints the memory address of the message buffer within the inferior.
# Note we require launch-only testing so we can get inferior otuput.
{"type": "output_match", "regex": self.maybe_strict_output_regex(r"data address: 0x([0-9a-fA-F]+)\r\n"),
"capture": {1: "message_address"}},
# Now stop the inferior.
"read packet: {}".format(chr(3)),
# And wait for the stop notification.
{"direction": "send", "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture": {1: "stop_signo", 2: "stop_thread_id"}}],
True)
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Grab the message address.
self.assertIsNotNone(context.get("message_address"))
message_address = int(context.get("message_address"), 16)
# Grab contents from the inferior.
self.reset_test_sequence()
self.test_sequence.add_log_lines(
["read packet: $m{0:x},{1:x}#00".format(message_address, len(MEMORY_CONTENTS)),
{"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$", "capture": {1: "read_contents"}}],
True)
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Ensure what we read from inferior memory is what we wrote.
self.assertIsNotNone(context.get("read_contents"))
read_contents = context.get("read_contents").decode("hex")
self.assertEqual(read_contents, MEMORY_CONTENTS)
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_m_packet_reads_memory_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.m_packet_reads_memory()
@llgs_test
def test_m_packet_reads_memory_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.m_packet_reads_memory()
def qMemoryRegionInfo_is_supported(self):
# Start up the inferior.
procs = self.prep_debug_monitor_and_inferior()
# Ask if it supports $qMemoryRegionInfo.
self.test_sequence.add_log_lines(
["read packet: $qMemoryRegionInfo#00",
"send packet: $OK#00"
], True)
self.expect_gdbremote_sequence()
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_qMemoryRegionInfo_is_supported_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.qMemoryRegionInfo_is_supported()
@llgs_test
def test_qMemoryRegionInfo_is_supported_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.qMemoryRegionInfo_is_supported()
def qMemoryRegionInfo_reports_code_address_as_executable(self):
# Start up the inferior.
procs = self.prep_debug_monitor_and_inferior(
inferior_args=["get-code-address-hex:hello", "sleep:5"])
# Run the process
self.test_sequence.add_log_lines(
[
# Start running after initial stop.
"read packet: $c#63",
# Match output line that prints the memory address of the message buffer within the inferior.
# Note we require launch-only testing so we can get inferior otuput.
{"type": "output_match", "regex": self.maybe_strict_output_regex(r"code address: 0x([0-9a-fA-F]+)\r\n"),
"capture": {1: "code_address"}},
# Now stop the inferior.
"read packet: {}".format(chr(3)),
# And wait for the stop notification.
{"direction": "send", "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture": {1: "stop_signo", 2: "stop_thread_id"}}],
True)
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Grab the code address.
self.assertIsNotNone(context.get("code_address"))
code_address = int(context.get("code_address"), 16)
# Grab memory region info from the inferior.
self.reset_test_sequence()
self.add_query_memory_region_packets(code_address)
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
mem_region_dict = self.parse_memory_region_packet(context)
# Ensure there are no errors reported.
self.assertFalse("error" in mem_region_dict)
# Ensure code address is readable and executable.
self.assertTrue("permissions" in mem_region_dict)
self.assertTrue("r" in mem_region_dict["permissions"])
self.assertTrue("x" in mem_region_dict["permissions"])
# Ensure the start address and size encompass the address we queried.
self.assert_address_within_memory_region(code_address, mem_region_dict)
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_qMemoryRegionInfo_reports_code_address_as_executable_debugserver(
self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.qMemoryRegionInfo_reports_code_address_as_executable()
@llgs_test
def test_qMemoryRegionInfo_reports_code_address_as_executable_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.qMemoryRegionInfo_reports_code_address_as_executable()
def qMemoryRegionInfo_reports_stack_address_as_readable_writeable(self):
# Start up the inferior.
procs = self.prep_debug_monitor_and_inferior(
inferior_args=["get-stack-address-hex:", "sleep:5"])
# Run the process
self.test_sequence.add_log_lines(
[
# Start running after initial stop.
"read packet: $c#63",
# Match output line that prints the memory address of the message buffer within the inferior.
# Note we require launch-only testing so we can get inferior otuput.
{"type": "output_match", "regex": self.maybe_strict_output_regex(r"stack address: 0x([0-9a-fA-F]+)\r\n"),
"capture": {1: "stack_address"}},
# Now stop the inferior.
"read packet: {}".format(chr(3)),
# And wait for the stop notification.
{"direction": "send", "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture": {1: "stop_signo", 2: "stop_thread_id"}}],
True)
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Grab the address.
self.assertIsNotNone(context.get("stack_address"))
stack_address = int(context.get("stack_address"), 16)
# Grab memory region info from the inferior.
self.reset_test_sequence()
self.add_query_memory_region_packets(stack_address)
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
mem_region_dict = self.parse_memory_region_packet(context)
# Ensure there are no errors reported.
self.assertFalse("error" in mem_region_dict)
# Ensure address is readable and executable.
self.assertTrue("permissions" in mem_region_dict)
self.assertTrue("r" in mem_region_dict["permissions"])
self.assertTrue("w" in mem_region_dict["permissions"])
# Ensure the start address and size encompass the address we queried.
self.assert_address_within_memory_region(
stack_address, mem_region_dict)
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_qMemoryRegionInfo_reports_stack_address_as_readable_writeable_debugserver(
self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.qMemoryRegionInfo_reports_stack_address_as_readable_writeable()
@llgs_test
def test_qMemoryRegionInfo_reports_stack_address_as_readable_writeable_llgs(
self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.qMemoryRegionInfo_reports_stack_address_as_readable_writeable()
def qMemoryRegionInfo_reports_heap_address_as_readable_writeable(self):
# Start up the inferior.
procs = self.prep_debug_monitor_and_inferior(
inferior_args=["get-heap-address-hex:", "sleep:5"])
# Run the process
self.test_sequence.add_log_lines(
[
# Start running after initial stop.
"read packet: $c#63",
# Match output line that prints the memory address of the message buffer within the inferior.
# Note we require launch-only testing so we can get inferior otuput.
{"type": "output_match", "regex": self.maybe_strict_output_regex(r"heap address: 0x([0-9a-fA-F]+)\r\n"),
"capture": {1: "heap_address"}},
# Now stop the inferior.
"read packet: {}".format(chr(3)),
# And wait for the stop notification.
{"direction": "send", "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture": {1: "stop_signo", 2: "stop_thread_id"}}],
True)
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Grab the address.
self.assertIsNotNone(context.get("heap_address"))
heap_address = int(context.get("heap_address"), 16)
# Grab memory region info from the inferior.
self.reset_test_sequence()
self.add_query_memory_region_packets(heap_address)
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
mem_region_dict = self.parse_memory_region_packet(context)
# Ensure there are no errors reported.
self.assertFalse("error" in mem_region_dict)
# Ensure address is readable and executable.
self.assertTrue("permissions" in mem_region_dict)
self.assertTrue("r" in mem_region_dict["permissions"])
self.assertTrue("w" in mem_region_dict["permissions"])
# Ensure the start address and size encompass the address we queried.
self.assert_address_within_memory_region(heap_address, mem_region_dict)
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_qMemoryRegionInfo_reports_heap_address_as_readable_writeable_debugserver(
self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.qMemoryRegionInfo_reports_heap_address_as_readable_writeable()
@llgs_test
def test_qMemoryRegionInfo_reports_heap_address_as_readable_writeable_llgs(
self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.qMemoryRegionInfo_reports_heap_address_as_readable_writeable()
def breakpoint_set_and_remove_work(self, want_hardware=False):
# Start up the inferior.
procs = self.prep_debug_monitor_and_inferior(
inferior_args=[
"get-code-address-hex:hello",
"sleep:1",
"call-function:hello"])
# Run the process
self.add_register_info_collection_packets()
self.add_process_info_collection_packets()
self.test_sequence.add_log_lines(
[ # Start running after initial stop.
"read packet: $c#63",
# Match output line that prints the memory address of the function call entry point.
# Note we require launch-only testing so we can get inferior otuput.
{"type": "output_match", "regex": self.maybe_strict_output_regex(r"code address: 0x([0-9a-fA-F]+)\r\n"),
"capture": {1: "function_address"}},
# Now stop the inferior.
"read packet: {}".format(chr(3)),
# And wait for the stop notification.
{"direction": "send", "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture": {1: "stop_signo", 2: "stop_thread_id"}}],
True)
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Gather process info - we need endian of target to handle register
# value conversions.
process_info = self.parse_process_info_response(context)
endian = process_info.get("endian")
self.assertIsNotNone(endian)
# Gather register info entries.
reg_infos = self.parse_register_info_packets(context)
(pc_lldb_reg_index, pc_reg_info) = self.find_pc_reg_info(reg_infos)
self.assertIsNotNone(pc_lldb_reg_index)
self.assertIsNotNone(pc_reg_info)
# Grab the function address.
self.assertIsNotNone(context.get("function_address"))
function_address = int(context.get("function_address"), 16)
# Get current target architecture
target_arch = self.getArchitecture()
# Set the breakpoint.
if (target_arch == "arm") or (target_arch == "aarch64"):
# TODO: Handle case when setting breakpoint in thumb code
BREAKPOINT_KIND = 4
else:
BREAKPOINT_KIND = 1
# Set default packet type to Z0 (software breakpoint)
z_packet_type = 0
# If hardware breakpoint is requested set packet type to Z1
if want_hardware == True:
z_packet_type = 1
self.reset_test_sequence()
self.add_set_breakpoint_packets(
function_address,
z_packet_type,
do_continue=True,
breakpoint_kind=BREAKPOINT_KIND)
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Verify the stop signal reported was the breakpoint signal number.
stop_signo = context.get("stop_signo")
self.assertIsNotNone(stop_signo)
self.assertEqual(int(stop_signo, 16),
lldbutil.get_signal_number('SIGTRAP'))
# Ensure we did not receive any output. If the breakpoint was not set, we would
# see output (from a launched process with captured stdio) printing a hello, world message.
# That would indicate the breakpoint didn't take.
self.assertEqual(len(context["O_content"]), 0)
# Verify that the PC for the main thread is where we expect it - right at the breakpoint address.
# This acts as a another validation on the register reading code.
self.reset_test_sequence()
self.test_sequence.add_log_lines(
[
# Print the PC. This should match the breakpoint address.
"read packet: $p{0:x}#00".format(pc_lldb_reg_index),
# Capture $p results.
{"direction": "send",
"regex": r"^\$([0-9a-fA-F]+)#",
"capture": {1: "p_response"}},
], True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Verify the PC is where we expect. Note response is in endianness of
# the inferior.
p_response = context.get("p_response")
self.assertIsNotNone(p_response)
# Convert from target endian to int.
returned_pc = lldbgdbserverutils.unpack_register_hex_unsigned(
endian, p_response)
self.assertEqual(returned_pc, function_address)
# Verify that a breakpoint remove and continue gets us the expected
# output.
self.reset_test_sequence()
# Add breakpoint remove packets
self.add_remove_breakpoint_packets(
function_address,
z_packet_type,
breakpoint_kind=BREAKPOINT_KIND)
self.test_sequence.add_log_lines(
[
# Continue running.
"read packet: $c#63",
# We should now receive the output from the call.
{"type": "output_match", "regex": r"^hello, world\r\n$"},
# And wait for program completion.
{"direction": "send", "regex": r"^\$W00(.*)#[0-9a-fA-F]{2}$"},
], True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_software_breakpoint_set_and_remove_work_debugserver(self):
self.init_debugserver_test()
if self.getArchitecture() == "arm":
# TODO: Handle case when setting breakpoint in thumb code
self.build(dictionary={'CFLAGS_EXTRAS': '-marm'})
else:
self.build()
self.set_inferior_startup_launch()
self.breakpoint_set_and_remove_work(want_hardware=False)
@llgs_test
@expectedFlakeyLinux("llvm.org/pr25652")
def test_software_breakpoint_set_and_remove_work_llgs(self):
self.init_llgs_test()
if self.getArchitecture() == "arm":
# TODO: Handle case when setting breakpoint in thumb code
self.build(dictionary={'CFLAGS_EXTRAS': '-marm'})
else:
self.build()
self.set_inferior_startup_launch()
self.breakpoint_set_and_remove_work(want_hardware=False)
@debugserver_test
@skipUnlessPlatform(oslist=['linux'])
@expectedFailureAndroid
@skipIf(archs=no_match(['arm', 'aarch64']))
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_hardware_breakpoint_set_and_remove_work_debugserver(self):
self.init_debugserver_test()
if self.getArchitecture() == "arm":
# TODO: Handle case when setting breakpoint in thumb code
self.build(dictionary={'CFLAGS_EXTRAS': '-marm'})
else:
self.build()
self.set_inferior_startup_launch()
self.breakpoint_set_and_remove_work(want_hardware=True)
@llgs_test
@skipUnlessPlatform(oslist=['linux'])
@expectedFailureAndroid
@skipIf(archs=no_match(['arm', 'aarch64']))
def test_hardware_breakpoint_set_and_remove_work_llgs(self):
self.init_llgs_test()
if self.getArchitecture() == "arm":
# TODO: Handle case when setting breakpoint in thumb code
self.build(dictionary={'CFLAGS_EXTRAS': '-marm'})
else:
self.build()
self.set_inferior_startup_launch()
self.breakpoint_set_and_remove_work(want_hardware=True)
def qSupported_returns_known_stub_features(self):
# Start up the stub and start/prep the inferior.
procs = self.prep_debug_monitor_and_inferior()
self.add_qSupported_packets()
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Retrieve the qSupported features.
supported_dict = self.parse_qSupported_response(context)
self.assertIsNotNone(supported_dict)
self.assertTrue(len(supported_dict) > 0)
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_qSupported_returns_known_stub_features_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.qSupported_returns_known_stub_features()
@llgs_test
def test_qSupported_returns_known_stub_features_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.qSupported_returns_known_stub_features()
def written_M_content_reads_back_correctly(self):
TEST_MESSAGE = "Hello, memory"
# Start up the stub and start/prep the inferior.
procs = self.prep_debug_monitor_and_inferior(
inferior_args=[
"set-message:xxxxxxxxxxxxxX",
"get-data-address-hex:g_message",
"sleep:1",
"print-message:"])
self.test_sequence.add_log_lines(
[
# Start running after initial stop.
"read packet: $c#63",
# Match output line that prints the memory address of the message buffer within the inferior.
# Note we require launch-only testing so we can get inferior otuput.
{"type": "output_match", "regex": self.maybe_strict_output_regex(r"data address: 0x([0-9a-fA-F]+)\r\n"),
"capture": {1: "message_address"}},
# Now stop the inferior.
"read packet: {}".format(chr(3)),
# And wait for the stop notification.
{"direction": "send", "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture": {1: "stop_signo", 2: "stop_thread_id"}}],
True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Grab the message address.
self.assertIsNotNone(context.get("message_address"))
message_address = int(context.get("message_address"), 16)
# Hex-encode the test message, adding null termination.
hex_encoded_message = TEST_MESSAGE.encode("hex")
# Write the message to the inferior. Verify that we can read it with the hex-encoded (m)
# and binary (x) memory read packets.
self.reset_test_sequence()
self.test_sequence.add_log_lines(
["read packet: $M{0:x},{1:x}:{2}#00".format(message_address, len(TEST_MESSAGE), hex_encoded_message),
"send packet: $OK#00",
"read packet: $m{0:x},{1:x}#00".format(message_address, len(TEST_MESSAGE)),
"send packet: ${0}#00".format(hex_encoded_message),
"read packet: $x{0:x},{1:x}#00".format(message_address, len(TEST_MESSAGE)),
"send packet: ${0}#00".format(TEST_MESSAGE),
"read packet: $m{0:x},4#00".format(message_address),
"send packet: ${0}#00".format(hex_encoded_message[0:8]),
"read packet: $x{0:x},4#00".format(message_address),
"send packet: ${0}#00".format(TEST_MESSAGE[0:4]),
"read packet: $c#63",
{"type": "output_match", "regex": r"^message: (.+)\r\n$", "capture": {1: "printed_message"}},
"send packet: $W00#00",
], True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Ensure what we read from inferior memory is what we wrote.
printed_message = context.get("printed_message")
self.assertIsNotNone(printed_message)
self.assertEqual(printed_message, TEST_MESSAGE + "X")
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_written_M_content_reads_back_correctly_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.written_M_content_reads_back_correctly()
@llgs_test
@expectedFlakeyLinux("llvm.org/pr25652")
def test_written_M_content_reads_back_correctly_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.written_M_content_reads_back_correctly()
def P_writes_all_gpr_registers(self):
# Start inferior debug session, grab all register info.
procs = self.prep_debug_monitor_and_inferior(inferior_args=["sleep:2"])
self.add_register_info_collection_packets()
self.add_process_info_collection_packets()
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Process register infos.
reg_infos = self.parse_register_info_packets(context)
self.assertIsNotNone(reg_infos)
self.add_lldb_register_index(reg_infos)
# Process endian.
process_info = self.parse_process_info_response(context)
endian = process_info.get("endian")
self.assertIsNotNone(endian)
# Pull out the register infos that we think we can bit flip
# successfully,.
gpr_reg_infos = [
reg_info for reg_info in reg_infos if self.is_bit_flippable_register(reg_info)]
self.assertTrue(len(gpr_reg_infos) > 0)
# Write flipped bit pattern of existing value to each register.
(successful_writes, failed_writes) = self.flip_all_bits_in_each_register_value(
gpr_reg_infos, endian)
# print("successful writes: {}, failed writes: {}".format(successful_writes, failed_writes))
self.assertTrue(successful_writes > 0)
# Note: as of this moment, a hefty number of the GPR writes are failing with E32 (everything except rax-rdx, rdi, rsi, rbp).
# Come back to this. I have the test rigged to verify that at least some
# of the bit-flip writes work.
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_P_writes_all_gpr_registers_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.P_writes_all_gpr_registers()
@llgs_test
def test_P_writes_all_gpr_registers_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.P_writes_all_gpr_registers()
def P_and_p_thread_suffix_work(self):
# Startup the inferior with three threads.
procs = self.prep_debug_monitor_and_inferior(
inferior_args=["thread:new", "thread:new"])
self.add_thread_suffix_request_packets()
self.add_register_info_collection_packets()
self.add_process_info_collection_packets()
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
process_info = self.parse_process_info_response(context)
self.assertIsNotNone(process_info)
endian = process_info.get("endian")
self.assertIsNotNone(endian)
reg_infos = self.parse_register_info_packets(context)
self.assertIsNotNone(reg_infos)
self.add_lldb_register_index(reg_infos)
reg_index = self.select_modifiable_register(reg_infos)
self.assertIsNotNone(reg_index)
reg_byte_size = int(reg_infos[reg_index]["bitsize"]) / 8
self.assertTrue(reg_byte_size > 0)
# Run the process a bit so threads can start up, and collect register
# info.
context = self.run_process_then_stop(run_seconds=1)
self.assertIsNotNone(context)
# Wait for 3 threads to be present.
threads = self.wait_for_thread_count(3, timeout_seconds=5)
self.assertEqual(len(threads), 3)
expected_reg_values = []
register_increment = 1
next_value = None
# Set the same register in each of 3 threads to a different value.
# Verify each one has the unique value.
for thread in threads:
# If we don't have a next value yet, start it with the initial read
# value + 1
if not next_value:
# Read pre-existing register value.
self.reset_test_sequence()
self.test_sequence.add_log_lines(
["read packet: $p{0:x};thread:{1:x}#00".format(reg_index, thread),
{"direction": "send", "regex": r"^\$([0-9a-fA-F]+)#", "capture": {1: "p_response"}},
], True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Set the next value to use for writing as the increment plus
# current value.
p_response = context.get("p_response")
self.assertIsNotNone(p_response)
next_value = lldbgdbserverutils.unpack_register_hex_unsigned(
endian, p_response)
# Set new value using P and thread suffix.
self.reset_test_sequence()
self.test_sequence.add_log_lines(
[
"read packet: $P{0:x}={1};thread:{2:x}#00".format(
reg_index,
lldbgdbserverutils.pack_register_hex(
endian,
next_value,
byte_size=reg_byte_size),
thread),
"send packet: $OK#00",
],
True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Save the value we set.
expected_reg_values.append(next_value)
# Increment value for next thread to use (we want them all
# different so we can verify they wrote to each thread correctly
# next.)
next_value += register_increment
# Revisit each thread and verify they have the expected value set for
# the register we wrote.
thread_index = 0
for thread in threads:
# Read pre-existing register value.
self.reset_test_sequence()
self.test_sequence.add_log_lines(
["read packet: $p{0:x};thread:{1:x}#00".format(reg_index, thread),
{"direction": "send", "regex": r"^\$([0-9a-fA-F]+)#", "capture": {1: "p_response"}},
], True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Get the register value.
p_response = context.get("p_response")
self.assertIsNotNone(p_response)
read_value = lldbgdbserverutils.unpack_register_hex_unsigned(
endian, p_response)
# Make sure we read back what we wrote.
self.assertEqual(read_value, expected_reg_values[thread_index])
thread_index += 1
# Note: as of this moment, a hefty number of the GPR writes are failing
# with E32 (everything except rax-rdx, rdi, rsi, rbp).
@debugserver_test
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def test_P_and_p_thread_suffix_work_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.P_and_p_thread_suffix_work()
@llgs_test
def test_P_and_p_thread_suffix_work_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.P_and_p_thread_suffix_work()
| |
"""Test the service primitives."""
import logging
import pytest
from pydicom.uid import UID
from pynetdicom import _config
from pynetdicom.pdu import A_ASSOCIATE_RQ, A_ABORT_RQ, P_DATA_TF
from pynetdicom.pdu_primitives import (
SOPClassExtendedNegotiation,
SOPClassCommonExtendedNegotiation,
MaximumLengthNotification,
ImplementationClassUIDNotification,
ImplementationVersionNameNotification,
P_DATA,
A_RELEASE,
A_ASSOCIATE,
A_P_ABORT,
A_ABORT,
SCP_SCU_RoleSelectionNegotiation,
AsynchronousOperationsWindowNegotiation,
UserIdentityNegotiation,
)
from pynetdicom.presentation import PresentationContext
from pynetdicom.utils import pretty_bytes
LOGGER = logging.getLogger("pynetdicom")
LOGGER.setLevel(logging.CRITICAL)
def print_nice_bytes(bytestream):
"""Nice output for bytestream."""
str_list = pretty_bytes(
bytestream, prefix="b'\\x", delimiter="\\x", items_per_line=10
)
for string in str_list:
print(string)
class TestPrimitive_MaximumLengthNotification:
def test_assignment_and_exceptions(self):
"""Test incorrect setter for maximum_length_received raises"""
primitive = MaximumLengthNotification()
# Check default assignment
assert primitive.maximum_length_received == 16382
# Check new assignment
primitive.maximum_length_received = 45
assert primitive.maximum_length_received == 45
# Check exceptions
with pytest.raises(TypeError):
primitive.maximum_length_received = 45.2
with pytest.raises(ValueError):
primitive.maximum_length_received = -1
with pytest.raises(TypeError):
primitive.maximum_length_received = "abc"
def test_conversion(self):
"""Check converting to PDU item works correctly"""
## Check conversion to item using default value
primitive = MaximumLengthNotification()
item = primitive.from_primitive()
# \x3F\xFE = 16382
assert item.encode() == b"\x51\x00\x00\x04\x00\x00\x3f\xfe"
## Check conversion using 0 (unlimited)
primitive.maximum_length_received = 0
item = primitive.from_primitive()
# \x00\x00 = 0
assert item.encode() == b"\x51\x00\x00\x04\x00\x00\x00\x00"
def test_string(self):
"""Check the string output."""
primitive = MaximumLengthNotification()
assert "16382 bytes" in primitive.__str__()
class TestPrimitive_ImplementationClassUIDNotification:
def setup(self):
self.default_conformance = _config.ENFORCE_UID_CONFORMANCE
def teardown(self):
_config.ENFORCE_UID_CONFORMANCE = self.default_conformance
def test_uid_conformance_false(self):
"""Test UID conformance with ENFORCE_UID_CONFORMANCE = False"""
_config.ENFORCE_UID_CONFORMANCE = False
primitive = ImplementationClassUIDNotification()
primitive.implementation_class_uid = "abc"
assert primitive.implementation_class_uid == "abc"
with pytest.raises(ValueError):
primitive.implementation_class_uid = "abc" * 22
def test_uid_conformance_true(self):
"""Test UID conformance with ENFORCE_UID_CONFORMANCE = True"""
_config.ENFORCE_UID_CONFORMANCE = True
primitive = ImplementationClassUIDNotification()
with pytest.raises(ValueError):
primitive.implementation_class_uid = "abc"
def test_assignment_and_exceptions(self):
"""Check incorrect setter for implementation_class_uid raises"""
primitive = ImplementationClassUIDNotification()
## Check assignment
reference_uid = UID("1.2.826.0.1.3680043.9.3811.0.9.0")
# bytes
primitive.implementation_class_uid = b"1.2.826.0.1.3680043.9.3811.0.9.0"
assert primitive.implementation_class_uid == reference_uid
# str
primitive.implementation_class_uid = "1.2.826.0.1.3680043.9.3811.0.9.0"
assert primitive.implementation_class_uid == reference_uid
# UID
primitive.implementation_class_uid = UID("1.2.826.0.1.3680043.9.3811.0.9.0")
assert primitive.implementation_class_uid == reference_uid
## Check exceptions
primitive = ImplementationClassUIDNotification()
# No value set
with pytest.raises(ValueError):
item = primitive.from_primitive()
# Non UID, bytes or str
with pytest.raises(TypeError):
primitive.implementation_class_uid = 45.2
with pytest.raises(TypeError):
primitive.implementation_class_uid = 100
def test_conversion(self):
"""Check converting to PDU item works correctly"""
primitive = ImplementationClassUIDNotification()
primitive.implementation_class_uid = UID("1.2.826.0.1.3680043.9.3811.0.9.0")
item = primitive.from_primitive()
assert item.encode() == (
b"\x52\x00\x00\x20\x31\x2e\x32\x2e\x38\x32\x36\x2e\x30\x2e\x31"
b"\x2e\x33\x36\x38\x30\x30\x34\x33\x2e\x39\x2e\x33\x38\x31\x31\x2e"
b"\x30\x2e\x39\x2e\x30"
)
def test_string(self):
"""Check the string output."""
primitive = ImplementationClassUIDNotification()
primitive.implementation_class_uid = UID("1.2.826.0.1.3680043.9.3811.0.9.0")
assert "1.2.826.0.1.3680043.9.3811.0.9.0" in primitive.__str__()
class TestPrimitive_ImplementationVersionNameNotification:
def test_assignment_and_exceptions(self):
"""Check incorrect setting for implementation_version_name raises"""
primitive = ImplementationVersionNameNotification()
## Check assignment
reference_name = "PYNETDICOM_090"
## Check maximum length allowable
primitive.implementation_version_name = "1234567890ABCDEF"
assert primitive.implementation_version_name == "1234567890ABCDEF"
# bytes
primitive.implementation_version_name = "PYNETDICOM_090"
assert primitive.implementation_version_name == reference_name
# str
primitive.implementation_version_name = "PYNETDICOM_090"
assert primitive.implementation_version_name == reference_name
primitive.implementation_version_name = "P"
assert primitive.implementation_version_name == "P"
## Check exceptions
primitive = ImplementationVersionNameNotification()
# No value set
with pytest.raises(ValueError):
item = primitive.from_primitive()
# Non UID, bytes or str
with pytest.raises(TypeError):
primitive.implementation_version_name = 45.2
with pytest.raises(TypeError):
primitive.implementation_version_name = 100
bad = "ABCD1234ABCD12345"
msg = (
f"Invalid 'Implementation Version Name' value '{bad}' - must not "
"exceed 16 characters"
)
with pytest.raises(ValueError, match=msg):
primitive.implementation_version_name = bad
primitive.implementation_version_name = ""
assert primitive.implementation_version_name == ""
primitive.implementation_version_name = None
assert primitive.implementation_version_name is None
def test_conversion(self):
"""Check converting to PDU item works correctly"""
primitive = ImplementationVersionNameNotification()
primitive.implementation_version_name = "PYNETDICOM_090"
item = primitive.from_primitive()
assert item.encode() == (
b"\x55\x00\x00\x0e\x50\x59\x4e\x45\x54\x44\x49\x43\x4f\x4d\x5f"
b"\x30\x39\x30"
)
def test_string(self):
"""Check the string output."""
primitive = ImplementationVersionNameNotification()
primitive.implementation_version_name = "PYNETDICOM3_090"
assert "PYNETDICOM3_090" in primitive.__str__()
class TestPrimitive_AsynchronousOperationsWindowNegotiation:
def test_assignment_and_exceptions(self):
"""Check incorrect types/values for properties raise exceptions"""
primitive = AsynchronousOperationsWindowNegotiation()
## Check default assignment
assert primitive.maximum_number_operations_invoked == 1
assert primitive.maximum_number_operations_performed == 1
## Check assignment
primitive.maximum_number_operations_invoked = 10
assert primitive.maximum_number_operations_invoked == 10
primitive.maximum_number_operations_performed = 11
assert primitive.maximum_number_operations_performed == 11
## Check exceptions
with pytest.raises(TypeError):
primitive.maximum_number_operations_invoked = 45.2
with pytest.raises(ValueError):
primitive.maximum_number_operations_invoked = -1
with pytest.raises(TypeError):
primitive.maximum_number_operations_invoked = "ABCD1234ABCD12345"
with pytest.raises(TypeError):
primitive.maximum_number_operations_performed = 45.2
with pytest.raises(ValueError):
primitive.maximum_number_operations_performed = -1
with pytest.raises(TypeError):
primitive.maximum_number_operations_performed = "ABCD1234ABCD12345"
def test_conversion(self):
"""Check converting to PDU item works correctly"""
primitive = AsynchronousOperationsWindowNegotiation()
primitive.maximum_number_operations_invoked = 10
primitive.maximum_number_operations_performed = 0
item = primitive.from_primitive()
assert item.encode() == b"\x53\x00\x00\x04\x00\x0a\x00\x00"
def test_string(self):
"""Check the string output."""
primitive = AsynchronousOperationsWindowNegotiation()
primitive.maximum_number_operations_invoked = 10
primitive.maximum_number_operations_performed = 0
assert "invoked: 10" in primitive.__str__()
assert "performed: 0" in primitive.__str__()
class TestPrimitive_SCP_SCU_RoleSelectionNegotiation:
def setup(self):
self.default_conformance = _config.ENFORCE_UID_CONFORMANCE
def teardown(self):
_config.ENFORCE_UID_CONFORMANCE = self.default_conformance
def test_uid_conformance_false(self):
"""Test UID conformance with ENFORCE_UID_CONFORMANCE = False"""
_config.ENFORCE_UID_CONFORMANCE = False
primitive = SCP_SCU_RoleSelectionNegotiation()
primitive.sop_class_uid = "abc"
assert primitive.sop_class_uid == "abc"
def test_uid_conformance_true(self):
"""Test UID conformance with ENFORCE_UID_CONFORMANCE = True"""
_config.ENFORCE_UID_CONFORMANCE = True
primitive = SCP_SCU_RoleSelectionNegotiation()
with pytest.raises(ValueError):
primitive.sop_class_uid = "abc"
def test_assignment_and_exceptions(self):
"""Check incorrect types/values for properties raise exceptions"""
primitive = SCP_SCU_RoleSelectionNegotiation()
## Check assignment
# SOP Class UID
reference_uid = UID("1.2.840.10008.5.1.4.1.1.2")
primitive.sop_class_uid = None
assert primitive.sop_class_uid is None
primitive.sop_class_uid = b"1.2.840.10008.5.1.4.1.1.2"
assert primitive.sop_class_uid == reference_uid
primitive.sop_class_uid = "1.2.840.10008.5.1.4.1.1.2"
assert primitive.sop_class_uid == reference_uid
primitive.sop_class_uid = UID("1.2.840.10008.5.1.4.1.1.2")
assert primitive.sop_class_uid == reference_uid
# SCP Role
primitive.scp_role = None
assert primitive.scp_role is None
primitive.scp_role = False
assert primitive.scp_role is False
# SCU Role
primitive.scu_role = None
assert primitive.scu_role is None
primitive.scu_role = True
assert primitive.scu_role is True
## Check exceptions
with pytest.raises(TypeError):
primitive.sop_class_uid = 10
with pytest.raises(TypeError):
primitive.sop_class_uid = 45.2
with pytest.raises(TypeError):
primitive.scp_role = 1
with pytest.raises(TypeError):
primitive.scp_role = "abc"
with pytest.raises(TypeError):
primitive.scu_role = 1
with pytest.raises(TypeError):
primitive.scu_role = "abc"
# No value set
primitive = SCP_SCU_RoleSelectionNegotiation()
with pytest.raises(ValueError):
item = primitive.from_primitive()
primitive.sop_class_uid = b"1.2.840.10008.5.1.4.1.1.2"
with pytest.raises(ValueError):
item = primitive.from_primitive()
primitive.scp_role = False
with pytest.raises(ValueError):
item = primitive.from_primitive()
primitive = SCP_SCU_RoleSelectionNegotiation()
primitive.sop_class_uid = b"1.2.840.10008.5.1.4.1.1.2"
primitive.scu_role = True
item = primitive.from_primitive()
assert item.scu_role
assert not item.scp_role
primitive = SCP_SCU_RoleSelectionNegotiation()
primitive.scp_role = True
primitive.scu_role = True
with pytest.raises(ValueError):
item = primitive.from_primitive()
def test_conversion(self):
"""Check converting to PDU item works correctly"""
primitive = SCP_SCU_RoleSelectionNegotiation()
primitive.sop_class_uid = b"1.2.840.10008.5.1.4.1.1.2"
primitive.scp_role = True
primitive.scu_role = False
item = primitive.from_primitive()
assert item.encode() == (
b"\x54\x00\x00\x1d\x00\x19\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30"
b"\x30\x30\x38\x2e\x35\x2e\x31\x2e\x34\x2e\x31\x2e\x31\x2e\x32"
b"\x00\x01"
)
primitive = SCP_SCU_RoleSelectionNegotiation()
primitive.sop_class_uid = b"1.2.840.10008.5.1.4.1.1.2"
primitive.scp_role = False
primitive.scu_role = False
with pytest.raises(ValueError):
primitive.from_primitive()
class TestPrimitive_SOPClassExtendedNegotiation:
def setup(self):
self.default_conformance = _config.ENFORCE_UID_CONFORMANCE
def teardown(self):
_config.ENFORCE_UID_CONFORMANCE = self.default_conformance
def test_uid_conformance_false(self):
"""Test UID conformance with ENFORCE_UID_CONFORMANCE = False"""
_config.ENFORCE_UID_CONFORMANCE = False
primitive = SOPClassExtendedNegotiation()
primitive.sop_class_uid = "abc"
assert primitive.sop_class_uid == "abc"
def test_uid_conformance_true(self):
"""Test UID conformance with ENFORCE_UID_CONFORMANCE = True"""
_config.ENFORCE_UID_CONFORMANCE = True
primitive = SOPClassExtendedNegotiation()
with pytest.raises(ValueError):
primitive.sop_class_uid = "abc"
def test_assignment_and_exceptions(self):
"""Check incorrect types/values for properties raise exceptions"""
primitive = SOPClassExtendedNegotiation()
## Check assignment
# SOP Class UID
reference_uid = UID("1.2.840.10008.5.1.4.1.1.2")
primitive.sop_class_uid = None
assert primitive.sop_class_uid is None
primitive.sop_class_uid = b"1.2.840.10008.5.1.4.1.1.2"
assert primitive.sop_class_uid == reference_uid
primitive.sop_class_uid = "1.2.840.10008.5.1.4.1.1.2"
assert primitive.sop_class_uid == reference_uid
primitive.sop_class_uid = UID("1.2.840.10008.5.1.4.1.1.2")
assert primitive.sop_class_uid == reference_uid
# Service Class Application Information
primitive.service_class_application_information = None
assert primitive.service_class_application_information is None
primitive.service_class_application_information = b"\x02\x00\x03\x00\x01\x00"
assert primitive.service_class_application_information == (
b"\x02\x00\x03\x00\x01\x00"
)
## Check exceptions
# SOP Class UID
with pytest.raises(TypeError):
primitive.sop_class_uid = 10
with pytest.raises(TypeError):
primitive.sop_class_uid = 45.2
# Service Class Application Information
with pytest.raises(TypeError):
primitive.service_class_application_information = 10
with pytest.raises(TypeError):
primitive.service_class_application_information = 45.2
# No value set
primitive = SOPClassExtendedNegotiation()
with pytest.raises(ValueError):
item = primitive.from_primitive()
primitive.sop_class_uid = b"1.2.840.10008.5.1.4.1.1.2"
with pytest.raises(ValueError):
item = primitive.from_primitive()
primitive = SOPClassExtendedNegotiation()
primitive.service_class_application_information = b"\x02\x00\x03\x00\x01\x00"
with pytest.raises(ValueError):
item = primitive.from_primitive()
def test_conversion(self):
"""Check converting to PDU item works correctly"""
primitive = SOPClassExtendedNegotiation()
primitive.sop_class_uid = b"1.2.840.10008.5.1.4.1.1.2"
primitive.service_class_application_information = b"\x02\x00\x03\x00\x01\x00"
item = primitive.from_primitive()
assert item.encode() == (
b"\x56\x00\x00\x21\x00\x19\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30"
b"\x30\x30\x38\x2e\x35\x2e\x31\x2e\x34\x2e\x31\x2e\x31\x2e\x32"
b"\x02\x00\x03\x00\x01\x00"
)
class TestPrimitive_SOPClassCommonExtendedNegotiation:
def setup(self):
self.default_conformance = _config.ENFORCE_UID_CONFORMANCE
def teardown(self):
_config.ENFORCE_UID_CONFORMANCE = self.default_conformance
def test_uid_conformance_false(self):
"""Test UID conformance with ENFORCE_UID_CONFORMANCE = False"""
_config.ENFORCE_UID_CONFORMANCE = False
primitive = SOPClassCommonExtendedNegotiation()
primitive.sop_class_uid = None
assert primitive.sop_class_uid is None
primitive.sop_class_uid = "abc"
assert primitive.sop_class_uid == "abc"
primitive.service_class_uid = None
assert primitive.service_class_uid is None
primitive.service_class_uid = "abc"
assert primitive.service_class_uid == "abc"
primitive.related_general_sop_class_identification = ["abc"]
assert primitive.related_general_sop_class_identification == ["abc"]
with pytest.raises(ValueError):
primitive.sop_class_uid = "abc" * 22
with pytest.raises(ValueError):
primitive.service_class_uid = "abc" * 22
with pytest.raises(ValueError):
primitive.related_general_sop_class_identification = ["abc" * 22]
def test_uid_conformance_true(self):
"""Test UID conformance with ENFORCE_UID_CONFORMANCE = True"""
_config.ENFORCE_UID_CONFORMANCE = True
primitive = SOPClassCommonExtendedNegotiation()
with pytest.raises(ValueError):
primitive.sop_class_uid = "abc"
with pytest.raises(ValueError):
primitive.service_class_uid = "abc"
with pytest.raises(ValueError):
primitive.related_general_sop_class_identification = ["abc"]
def test_assignment_and_exceptions(self):
"""Check incorrect types/values for properties raise exceptions"""
primitive = SOPClassCommonExtendedNegotiation()
## Check assignment
# SOP Class UID
reference_uid = UID("1.2.840.10008.5.1.4.1.1.2")
primitive.sop_class_uid = b"1.2.840.10008.5.1.4.1.1.2"
assert primitive.sop_class_uid == reference_uid
primitive.sop_class_uid = "abc"
assert primitive.sop_class_uid == "abc"
primitive.sop_class_uid = "1.2.840.10008.5.1.4.1.1.2"
assert primitive.sop_class_uid == reference_uid
primitive.sop_class_uid = UID("1.2.840.10008.5.1.4.1.1.2")
assert primitive.sop_class_uid == reference_uid
# Service Class UID
primitive.service_class_uid = b"1.2.840.10008.5.1.4.1.1.2"
assert primitive.service_class_uid == reference_uid
primitive.service_class_uid = "abc"
assert primitive.service_class_uid == "abc"
primitive.service_class_uid = "1.2.840.10008.5.1.4.1.1.2"
assert primitive.service_class_uid == reference_uid
primitive.service_class_uid = UID("1.2.840.10008.5.1.4.1.1.2")
assert primitive.service_class_uid == reference_uid
# Related General SOP Class Identification
ref_list = [
UID("1.2.840.10008.5.1.4.1.1.2"),
UID("1.2.840.10008.5.1.4.1.1.3"),
UID("1.2.840.10008.5.1.4.1.1.4"),
]
uid_list = []
uid_list.append(b"1.2.840.10008.5.1.4.1.1.2")
uid_list.append("1.2.840.10008.5.1.4.1.1.3")
uid_list.append(UID("1.2.840.10008.5.1.4.1.1.4"))
primitive.related_general_sop_class_identification = uid_list
assert primitive.related_general_sop_class_identification == ref_list
primitive.related_general_sop_class_identification = ["abc"]
assert primitive.related_general_sop_class_identification == ["abc"]
with pytest.raises(TypeError):
primitive.related_general_sop_class_identification = "test"
## Check exceptions
# SOP Class UID
with pytest.raises(TypeError):
primitive.sop_class_uid = 10
with pytest.raises(TypeError):
primitive.sop_class_uid = 45.2
# Service Class UID
with pytest.raises(TypeError):
primitive.service_class_uid = 10
with pytest.raises(TypeError):
primitive.service_class_uid = 45.2
# Related General SOP Class Identification
with pytest.raises(TypeError):
primitive.related_general_sop_class_identification = [10]
with pytest.raises(TypeError):
primitive.related_general_sop_class_identification = [45.2]
# No value set
primitive = SOPClassCommonExtendedNegotiation()
with pytest.raises(ValueError):
item = primitive.from_primitive()
primitive.sop_class_uid = b"1.2.840.10008.5.1.4.1.1.2"
with pytest.raises(ValueError):
item = primitive.from_primitive()
primitive = SOPClassCommonExtendedNegotiation()
primitive.service_class_uid = b"1.2.840.10008.5.1.4.1.1.2"
with pytest.raises(ValueError):
item = primitive.from_primitive()
def test_conversion(self):
"""Check converting to PDU item works correctly"""
primitive = SOPClassCommonExtendedNegotiation()
primitive.sop_class_uid = b"1.2.840.10008.5.1.4.1.1.4"
primitive.service_class_uid = b"1.2.840.10008.4.2"
primitive.related_general_sop_class_identification = [
"1.2.840.10008.5.1.4.1.1.88.22"
]
item = primitive.from_primitive()
assert item.encode() == (
b"\x57\x00\x00\x4f\x00\x19\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30"
b"\x30\x30\x38\x2e\x35\x2e\x31\x2e\x34\x2e\x31\x2e\x31\x2e\x34\x00"
b"\x11\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x34"
b"\x2e\x32\x00\x1f\x00\x1d\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30"
b"\x30\x30\x38\x2e\x35\x2e\x31\x2e\x34\x2e\x31\x2e\x31\x2e\x38\x38"
b"\x2e\x32\x32"
)
class TestPrimitive_UserIdentityNegotiation:
def test_assignment_and_exceptions(self):
"""Check incorrect types/values for properties raise exceptions"""
primitive = UserIdentityNegotiation()
for type_no in [1, 2, 3, 4, 5]:
primitive.user_identity_type = type_no
assert primitive.user_identity_type == type_no
with pytest.raises(ValueError):
primitive.user_identity_type = 6
with pytest.raises(TypeError):
primitive.user_identity_type = "a"
primitive.positive_response_requested = True
assert primitive.positive_response_requested
with pytest.raises(TypeError):
primitive.positive_response_requested = "test"
primitive.primary_field = None
assert primitive.primary_field is None
primitive.primary_field = b"\x00\x01"
assert primitive.primary_field == b"\x00\x01"
with pytest.raises(TypeError):
primitive.primary_field = ["test"]
primitive.secondary_field = b"\x00\x21"
assert primitive.secondary_field == b"\x00\x21"
primitive.secondary_field = None
assert primitive.secondary_field is None
with pytest.raises(TypeError):
primitive.secondary_field = ["test"]
primitive.server_response = None
assert primitive.server_response is None
primitive.server_response = b"\x00\x31"
assert primitive.server_response == b"\x00\x31"
with pytest.raises(TypeError):
primitive.server_response = ["test"]
primitive = UserIdentityNegotiation()
with pytest.raises(ValueError):
primitive.from_primitive()
primitive.user_identity_type = None
assert primitive.user_identity_type is None
primitive.user_identity_type = 2
with pytest.raises(ValueError):
primitive.from_primitive()
def test_string(self):
"""Check string output."""
primitive = UserIdentityNegotiation()
primitive.user_identity_type = 1
primitive.positive_response_requested = True
primitive.primary_field = b"\x00\x01"
primitive.secondary_field = b"\x00\x21"
assert "requested: True" in primitive.__str__()
assert "type: 1" in primitive.__str__()
assert "Primary" in primitive.__str__()
assert "Secondary" in primitive.__str__()
primitive.server_response = b"\x00\x31"
assert "Server response" in primitive.__str__()
def test_conversion(self):
"""Check converting to PDU item works correctly"""
primitive = UserIdentityNegotiation()
# -RQ
primitive.user_identity_type = 1
primitive.primary_field = b"test"
item = primitive.from_primitive()
primitive.user_identity_type = 2
primitive.secondary_field = b""
with pytest.raises(ValueError):
item = primitive.from_primitive()
# -AC
primitive = UserIdentityNegotiation()
primitive.server_response = b"Test"
item = primitive.from_primitive()
assert item.encode() == b"\x59\x00\x00\x06\x00\x04\x54\x65\x73\x74"
class TestPrimitive_A_ASSOCIATE:
def setup(self):
self.default_conformance = _config.ENFORCE_UID_CONFORMANCE
def teardown(self):
_config.ENFORCE_UID_CONFORMANCE = self.default_conformance
def test_uid_conformance_false(self):
"""Test UID conformance with ENFORCE_UID_CONFORMANCE = False"""
_config.ENFORCE_UID_CONFORMANCE = False
primitive = A_ASSOCIATE()
primitive.application_context_name = "abc"
assert primitive.application_context_name == "abc"
def test_uid_conformance_true(self):
"""Test UID conformance with ENFORCE_UID_CONFORMANCE = True"""
_config.ENFORCE_UID_CONFORMANCE = True
primitive = A_ASSOCIATE()
with pytest.raises(ValueError):
primitive.application_context_name = "abc"
def test_assignment(self):
"""Check assignment works correctly"""
assoc = A_ASSOCIATE()
with pytest.raises(AttributeError):
assoc.mode = "test value"
with pytest.raises(AttributeError):
assoc.presentation_requirements = "test value2"
with pytest.raises(AttributeError):
assoc.session_requirements = "test value3"
assoc.application_context_name = None
assert assoc.application_context_name is None
assoc.application_context_name = "1.2.840.10008.3.1.1.1"
assert assoc.application_context_name == UID("1.2.840.10008.3.1.1.1")
assoc.application_context_name = b"1.2.840.10008.3.1.1.1"
assert assoc.application_context_name == UID("1.2.840.10008.3.1.1.1")
assoc.application_context_name = UID("1.2.840.10008.3.1.1.1")
assert assoc.application_context_name == UID("1.2.840.10008.3.1.1.1")
msg = "'Calling AE Title' must be str, not 'NoneType'"
with pytest.raises(TypeError, match=msg):
assoc.calling_ae_title = None
assoc.calling_ae_title = "ABCDEF1234567890"
assert assoc.calling_ae_title == "ABCDEF1234567890"
msg = "'Called AE Title' must be str, not 'NoneType'"
with pytest.raises(TypeError, match=msg):
assoc.called_ae_title = None
assert assoc.responding_ae_title == assoc.called_ae_title
assoc.called_ae_title = "1234567890ABCDEF"
assert assoc.called_ae_title == "1234567890ABCDEF"
assert assoc.responding_ae_title == assoc.called_ae_title
max_length = MaximumLengthNotification()
max_length.maximum_length_received = 31222
assoc.user_information.append(max_length)
assert assoc.user_information[0].maximum_length_received == 31222
assoc.user_information = ["a", max_length]
assert assoc.user_information == [max_length]
assoc.result = None
assert assoc.result is None
assoc.result = 0
assoc.result = 1
assert assoc.result == 1
assoc.result = 2
assert assoc.result == 2
assoc.result_source = None
assert assoc.result_source is None
assoc.result_source = 1
assert assoc.result_source == 1
assoc.result_source = 2
assert assoc.result_source == 2
assoc.result_source = 3
assert assoc.result_source == 3
assoc.diagnostic = None
assert assoc.diagnostic is None
assoc.diagnostic = 1
assert assoc.diagnostic == 1
assoc.diagnostic = 2
assert assoc.diagnostic == 2
assoc.diagnostic = 3
assert assoc.diagnostic == 3
assoc.diagnostic = 7
assert assoc.diagnostic == 7
assoc.calling_presentation_address = None
assert assoc.calling_presentation_address is None
assoc.calling_presentation_address = ("10.40.94.43", 105)
assert assoc.calling_presentation_address == ("10.40.94.43", 105)
assoc.called_presentation_address = None
assert assoc.called_presentation_address is None
assoc.called_presentation_address = ("10.40.94.44", 106)
assert assoc.called_presentation_address == ("10.40.94.44", 106)
pc = PresentationContext()
pc.context_id = 1
assoc.presentation_context_definition_list = [pc]
assert assoc.presentation_context_definition_list == [pc]
assoc.presentation_context_definition_list = ["a", pc]
assert assoc.presentation_context_definition_list == [pc]
assoc.presentation_context_definition_results_list = [pc]
assert assoc.presentation_context_definition_results_list == [pc]
assoc.presentation_context_definition_results_list = ["a", pc]
assert assoc.presentation_context_definition_results_list == [pc]
assoc = A_ASSOCIATE()
# No maximum_length_received set
assert assoc.maximum_length_received is None
# No MaximumLengthNotification present
assoc.maximum_length_received = 31223
assert assoc.user_information[0].maximum_length_received == 31223
assert assoc.maximum_length_received == 31223
# MaximumLengthNotification already present
assoc.maximum_length_received = 31224
assert assoc.maximum_length_received == 31224
# No ImplementationClassUIDNotification present
assoc.implementation_class_uid = "1.1.2.3.4"
assert assoc.user_information[1].implementation_class_uid == UID("1.1.2.3.4")
assert assoc.implementation_class_uid == UID("1.1.2.3.4")
# ImplementationClassUIDNotification already present
assoc.implementation_class_uid = "1.1.2.3.4"
assert assoc.implementation_class_uid == UID("1.1.2.3.4")
def test_exceptions(self):
"""Check incorrect types/values for properties raise exceptions"""
assoc = A_ASSOCIATE()
# application_context_name
with pytest.raises(TypeError):
assoc.application_context_name = 10
with pytest.raises(TypeError):
assoc.application_context_name = 45.2
# calling_ae_title
with pytest.raises(TypeError):
assoc.calling_ae_title = 45.2
with pytest.raises(TypeError):
assoc.calling_ae_title = 100
with pytest.raises(ValueError):
assoc.calling_ae_title = ""
with pytest.raises(ValueError):
assoc.calling_ae_title = " "
# called_ae_title
with pytest.raises(TypeError):
assoc.called_ae_title = 45.2
with pytest.raises(TypeError):
assoc.called_ae_title = 100
with pytest.raises(ValueError):
assoc.called_ae_title = ""
with pytest.raises(ValueError):
assoc.called_ae_title = " "
# user_information
with pytest.raises(TypeError):
assoc.user_information = 45.2
# result
with pytest.raises(ValueError):
assoc.result = -1
with pytest.raises(ValueError):
assoc.result = 3
# result_source
with pytest.raises(ValueError):
assoc.result_source = 0
# result_source
with pytest.raises(ValueError):
assoc.result_source = 4
# diagnostic
with pytest.raises(ValueError):
assoc.diagnostic = 0
with pytest.raises(ValueError):
assoc.diagnostic = 4
with pytest.raises(ValueError):
assoc.diagnostic = 5
with pytest.raises(ValueError):
assoc.diagnostic = 6
with pytest.raises(ValueError):
assoc.diagnostic = 8
# calling_presentation_addresss
with pytest.raises(TypeError):
assoc.calling_presentation_address = ["10.40.94.43", 105]
with pytest.raises(TypeError):
assoc.calling_presentation_address = (105, "10.40.94.43")
# called_presentation_addresss
with pytest.raises(TypeError):
assoc.called_presentation_address = ["10.40.94.43", 105]
with pytest.raises(TypeError):
assoc.called_presentation_address = (105, "10.40.94.43")
# presentation_context_definition_list
with pytest.raises(TypeError):
assoc.presentation_context_definition_list = 45.2
# presentation_context_definition_results_list
with pytest.raises(TypeError):
assoc.presentation_context_definition_results_list = 45.2
# implementation_class_uid
with pytest.raises(ValueError):
x = assoc.implementation_class_uid
imp_uid = ImplementationClassUIDNotification()
assoc.user_information.append(imp_uid)
with pytest.raises(ValueError):
x = assoc.implementation_class_uid
def test_conversion(self):
"""Check conversion to a PDU produces the correct output"""
assoc = A_ASSOCIATE()
assoc.application_context_name = "1.2.840.10008.3.1.1.1"
assoc.calling_ae_title = "ECHOSCU"
assoc.called_ae_title = "ANY-SCP"
assoc.maximum_length_received = 16382
assoc.implementation_class_uid = "1.2.826.0.1.3680043.9.3811.0.9.0"
imp_ver_name = ImplementationVersionNameNotification()
imp_ver_name.implementation_version_name = "PYNETDICOM_090"
assoc.user_information.append(imp_ver_name)
pc = PresentationContext()
pc.context_id = 1
pc.abstract_syntax = "1.2.840.10008.1.1"
pc.transfer_syntax = ["1.2.840.10008.1.2"]
assoc.presentation_context_definition_list = [pc]
pdu = A_ASSOCIATE_RQ()
pdu.from_primitive(assoc)
data = pdu.encode()
assert data == (
b"\x01\x00\x00\x00\x00\xd1\x00\x01\x00\x00\x41\x4e\x59\x2d\x53\x43"
b"\x50\x20\x20\x20\x20\x20\x20\x20\x20\x20\x45\x43\x48\x4f\x53\x43"
b"\x55\x20\x20\x20\x20\x20\x20\x20\x20\x20\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x15\x31\x2e"
b"\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x33\x2e\x31\x2e"
b"\x31\x2e\x31\x20\x00\x00\x2e\x01\x00\x00\x00\x30\x00\x00\x11\x31"
b"\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x31\x2e\x31"
b"\x40\x00\x00\x11\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30"
b"\x38\x2e\x31\x2e\x32\x50\x00\x00\x3e\x51\x00\x00\x04\x00\x00\x3f"
b"\xfe\x52\x00\x00\x20\x31\x2e\x32\x2e\x38\x32\x36\x2e\x30\x2e\x31"
b"\x2e\x33\x36\x38\x30\x30\x34\x33\x2e\x39\x2e\x33\x38\x31\x31\x2e"
b"\x30\x2e\x39\x2e\x30\x55\x00\x00\x0e\x50\x59\x4e\x45\x54\x44\x49"
b"\x43\x4f\x4d\x5f\x30\x39\x30"
)
def test_invalid_result_str(self, caplog):
"""Test an invalid result value gets logged and doesn't raise."""
pdu = A_ASSOCIATE()
pdu.result = None
with caplog.at_level(logging.WARNING, logger="pynetdicom"):
assert pdu.result_str == "(no value available)"
assert "Invalid A-ASSOCIATE 'Result' None" in caplog.text
def test_invalid_source_str(self, caplog):
"""Test an invalid source value gets logged and doesn't raise."""
pdu = A_ASSOCIATE()
pdu.result_source = None
with caplog.at_level(logging.WARNING, logger="pynetdicom"):
assert pdu.source_str == "(no value available)"
assert "Invalid A-ASSOCIATE 'Result Source' None" in caplog.text
def test_invalid_reason_str(self, caplog):
"""Test an invalid diagnostic value gets logged and doesn't raise."""
pdu = A_ASSOCIATE()
pdu.result = 1
pdu.result_source = 2
pdu.diagnostic = 7
with caplog.at_level(logging.WARNING, logger="pynetdicom"):
assert pdu.reason_str == "(no value available)"
assert (
"Invalid A-ASSOCIATE 'Result Source' 2 and/or 'Diagnostic' 7 values"
) in caplog.text
class TestPrimitive_A_RELEASE:
def test_assignment(self):
"""Check assignment works correctly"""
assoc = A_RELEASE()
assert assoc.reason == "normal"
assoc.result = "affirmative"
assert assoc.result == "affirmative"
def test_exceptions(self):
"""Check incorrect types/values for properties raise exceptions"""
assoc = A_RELEASE()
with pytest.raises(AttributeError):
assoc.reason = "something"
with pytest.raises(ValueError):
assoc.result = "accepted"
class TestPrimitive_A_ABORT:
def test_assignment(self):
"""Check assignment works correctly"""
primitive = A_ABORT()
primitive.abort_source = 0
assert primitive.abort_source == 0
primitive.abort_source = 1
assert primitive.abort_source == 1
primitive.abort_source = 2
assert primitive.abort_source == 2
def test_exceptions(self):
"""Check incorrect types/values for properties raise exceptions"""
primitive = A_ABORT()
with pytest.raises(ValueError):
primitive.abort_source = 3
with pytest.raises(ValueError):
primitive.abort_source
def test_conversion(self):
"""Check conversion to a PDU produces the correct output"""
primitive = A_ABORT()
primitive.abort_source = 0
pdu = A_ABORT_RQ()
pdu.from_primitive(primitive)
data = pdu.encode()
assert data == b"\x07\x00\x00\x00\x00\x04\x00\x00\x00\x00"
class TestPrimitive_A_P_ABORT:
def test_assignment(self):
"""Check assignment works correctly"""
primitive = A_P_ABORT()
primitive.provider_reason = 0
assert primitive.provider_reason == 0
primitive.provider_reason = 1
assert primitive.provider_reason == 1
primitive.provider_reason = 2
assert primitive.provider_reason == 2
primitive.provider_reason = 4
assert primitive.provider_reason == 4
primitive.provider_reason = 5
assert primitive.provider_reason == 5
primitive.provider_reason = 6
assert primitive.provider_reason == 6
def test_exceptions(self):
"""Check incorrect types/values for properties raise exceptions"""
primitive = A_P_ABORT()
with pytest.raises(ValueError):
primitive.provider_reason = 3
with pytest.raises(ValueError):
primitive.provider_reason
def test_conversion(self):
"""Check conversion to a PDU produces the correct output"""
primitive = A_P_ABORT()
primitive.provider_reason = 4
pdu = A_ABORT_RQ()
pdu.from_primitive(primitive)
data = pdu.encode()
assert data == b"\x07\x00\x00\x00\x00\x04\x00\x00\x02\x04"
class TestPrimitive_P_DATA:
def test_assignment(self):
"""Check assignment works correctly"""
primitive = P_DATA()
primitive.presentation_data_value_list = [[1, b"\x00"]]
assert primitive.presentation_data_value_list == [[1, b"\x00"]]
def test_exceptions(self):
"""Check incorrect types/values for properties raise exceptions"""
primitive = P_DATA()
with pytest.raises(TypeError):
primitive.presentation_data_value_list = [1, b"\x00"]
with pytest.raises(TypeError):
primitive.presentation_data_value_list = [1, b"\x00"]
with pytest.raises(TypeError):
primitive.presentation_data_value_list = [[b"\x00", 1]]
with pytest.raises(TypeError):
primitive.presentation_data_value_list = "test"
def test_conversion(self):
"""Check conversion to a PDU produces the correct output"""
primitive = P_DATA()
pdv = (
b"\x03\x00\x00\x00\x00"
b"\x04\x00\x00\x00\x42\x00\x00\x00\x00\x00\x02\x00\x12\x00\x00"
b"\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e"
b"\x31\x2e\x31\x00\x00\x00\x00\x01\x02\x00\x00\x00\x30\x80\x00"
b"\x00\x20\x01\x02\x00\x00\x00\x01\x00\x00\x00\x00\x08\x02\x00"
b"\x00\x00\x01\x01\x00\x00\x00\x09\x02\x00\x00\x00\x00\x00"
)
primitive.presentation_data_value_list = [[1, pdv]]
pdu = P_DATA_TF()
pdu.from_primitive(primitive)
data = pdu.encode()
assert data == b"\x04\x00\x00\x00\x00\x54\x00\x00\x00\x50\x01" + pdv
def test_string(self):
"""Check the string output."""
primitive = P_DATA()
primitive.presentation_data_value_list = [[0, b"\x00\x00"]]
assert "Byte: 00000000" in primitive.__str__()
primitive.presentation_data_value_list = [[0, b"\x01\x00"]]
assert "Byte: 00000001" in primitive.__str__()
primitive.presentation_data_value_list = [[0, b"\x02\x00"]]
assert "Byte: 00000010" in primitive.__str__()
primitive.presentation_data_value_list = [[0, b"\x03\x00"]]
assert "Byte: 00000011" in primitive.__str__()
class TestServiceParameter:
def test_equality(self):
"""Test equality of ServiceParameter subclasses."""
prim_a = MaximumLengthNotification()
prim_b = MaximumLengthNotification()
assert prim_a == prim_b
assert not prim_a == "test"
assert not prim_a != prim_b
prim_b.maximum_length_received = 12
assert not prim_a == prim_b
assert prim_a != prim_b
| |
#!/usr/bin/env python
# Copyright (C) 2016, 2018 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
_cloud_foundry_tests_
Unit tests for the CloudFoundryService class.
"""
import json
import unittest
from cloudant._common_util import CloudFoundryService
from cloudant.error import CloudantException
class CloudFoundryServiceTests(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(CloudFoundryServiceTests, self).__init__(*args, **kwargs)
self._test_vcap_services_single_legacy_credentials_enabled = json.dumps({'cloudantNoSQLDB': [{
'name': 'Cloudant NoSQL DB 1', # valid service with legacy creds enabled
'credentials': {
'apikey': '1234api',
'username': 'user-bluemix',
'password': 'password',
'port': 443,
'host': 'user-bluemix.cloudant.com'
}
}
]})
self._test_vcap_services_single = json.dumps({'cloudantNoSQLDB': [{
'name': 'Cloudant NoSQL DB 1', # valid service
'credentials': {
'apikey': '1234api',
'username': 'user-bluemix',
'port': 443,
'host': 'user-bluemix.cloudant.com'
}
}
]})
self._test_legacy_vcap_services_multiple = json.dumps({'cloudantNoSQLDB': [
{
'name': 'Cloudant NoSQL DB 1', # valid legacy service
'credentials': {
'host': 'example.cloudant.com',
'password': 'pa$$w0rd01',
'port': 1234,
'username': 'example'
}
},
{
'name': 'Cloudant NoSQL DB 2', # valid service, default port
'credentials': {
'host': 'example.cloudant.com',
'password': 'pa$$w0rd01',
'username': 'example'
}
},
{
'name': 'Cloudant NoSQL DB 3', # missing host
'credentials': {
'password': 'pa$$w0rd01',
'port': 1234,
'username': 'example'
}
},
{
'name': 'Cloudant NoSQL DB 4', # missing password
'credentials': {
'host': 'example.cloudant.com',
'port': 1234,
'username': 'example'
}
},
{
'name': 'Cloudant NoSQL DB 5', # missing username
'credentials': {
'host': 'example.cloudant.com',
'password': 'pa$$w0rd01',
'port': 1234,
}
},
{
'name': 'Cloudant NoSQL DB 6', # invalid credentials type
'credentials': [
'example.cloudant.com',
'pa$$w0rd01',
'example'
]
},
{
'name': 'Cloudant NoSQL DB 7', # missing iam api key and creds
'credentials': {
'host': 'example.cloudant.com',
'port': 1234,
'username': 'example'
}
},
{
'name': 'Cloudant NoSQL DB 8', # valid service with IAM api
'credentials': {
'apikey': '1234api',
'username': 'example',
'host': 'example.cloudant.com',
'port': 1234
}
},
]})
self._test_vcap_services_dedicated = json.dumps({
'cloudantNoSQLDB Dedicated': [ # dedicated service name
{
'name': 'Cloudant NoSQL DB 1', # valid service
'credentials': {
'host': 'example.cloudant.com',
'password': 'pa$$w0rd01',
'port': 1234,
'username': 'example'
}
}
]
})
def test_get_vcap_service_legacy_creds_success(self):
service = CloudFoundryService(
self._test_vcap_services_single_legacy_credentials_enabled,
service_name='cloudantNoSQLDB'
)
self.assertEqual('Cloudant NoSQL DB 1', service.name)
def test_get_vcap_service_iam_api_no_creds_success(self):
service = CloudFoundryService(
self._test_vcap_services_single,
service_name='cloudantNoSQLDB'
)
self.assertEqual('Cloudant NoSQL DB 1', service.name)
self.assertEqual('1234api', service.iam_api_key)
with self.assertRaises(AttributeError) as cm:
service.password
self.assertEqual("'CloudFoundryService' object has no attribute '_password'", str(cm.exception))
def test_get_vcap_service_default_success_as_dict(self):
service = CloudFoundryService(
json.loads(self._test_vcap_services_single_legacy_credentials_enabled),
service_name='cloudantNoSQLDB'
)
self.assertEqual('Cloudant NoSQL DB 1', service.name)
def test_get_vcap_service_default_failure_multiple_services(self):
with self.assertRaises(CloudantException) as cm:
CloudFoundryService(
self._test_legacy_vcap_services_multiple,
service_name='cloudantNoSQLDB'
)
self.assertEqual('Missing service in VCAP_SERVICES', str(cm.exception))
def test_get_vcap_service_instance_host(self):
service = CloudFoundryService(
self._test_legacy_vcap_services_multiple,
instance_name='Cloudant NoSQL DB 1',
service_name='cloudantNoSQLDB'
)
self.assertEqual('example.cloudant.com', service.host)
def test_get_vcap_service_instance_password(self):
service = CloudFoundryService(
self._test_legacy_vcap_services_multiple,
instance_name='Cloudant NoSQL DB 1',
service_name='cloudantNoSQLDB'
)
self.assertEqual('pa$$w0rd01', service.password)
def test_get_vcap_service_instance_port(self):
service = CloudFoundryService(
self._test_legacy_vcap_services_multiple,
instance_name='Cloudant NoSQL DB 1',
service_name='cloudantNoSQLDB'
)
self.assertEqual('1234', service.port)
def test_get_vcap_service_instance_port_default(self):
service = CloudFoundryService(
self._test_legacy_vcap_services_multiple,
instance_name='Cloudant NoSQL DB 2',
service_name='cloudantNoSQLDB'
)
self.assertEqual('443', service.port)
def test_get_vcap_service_instance_url(self):
service = CloudFoundryService(
self._test_legacy_vcap_services_multiple,
instance_name='Cloudant NoSQL DB 1',
service_name='cloudantNoSQLDB'
)
self.assertEqual('https://example.cloudant.com:1234', service.url)
def test_get_vcap_service_instance_username(self):
service = CloudFoundryService(
self._test_legacy_vcap_services_multiple,
instance_name='Cloudant NoSQL DB 1',
service_name='cloudantNoSQLDB'
)
self.assertEqual('example', service.username)
def test_get_vcap_service_instance_iam_api_key(self):
service = CloudFoundryService(
self._test_legacy_vcap_services_multiple,
instance_name='Cloudant NoSQL DB 8',
service_name='cloudantNoSQLDB'
)
self.assertEqual('1234api', service.iam_api_key)
def test_raise_error_for_missing_host(self):
with self.assertRaises(CloudantException):
CloudFoundryService(
self._test_legacy_vcap_services_multiple,
instance_name='Cloudant NoSQL DB 3',
service_name='cloudantNoSQLDB'
)
def test_raise_error_for_missing_password(self):
with self.assertRaises(CloudantException) as cm:
CloudFoundryService(
self._test_legacy_vcap_services_multiple,
instance_name='Cloudant NoSQL DB 4',
service_name='cloudantNoSQLDB'
)
self.assertEqual(
'Invalid service: IAM API key or username/password credentials are required.',
str(cm.exception)
)
def test_raise_error_for_missing_username(self):
with self.assertRaises(CloudantException) as cm:
CloudFoundryService(
self._test_legacy_vcap_services_multiple,
instance_name='Cloudant NoSQL DB 5',
service_name='cloudantNoSQLDB'
)
self.assertEqual(
"Invalid service: 'username' missing",
str(cm.exception)
)
def test_raise_error_for_invalid_credentials_type(self):
with self.assertRaises(CloudantException) as cm:
CloudFoundryService(
self._test_legacy_vcap_services_multiple,
instance_name='Cloudant NoSQL DB 6',
service_name='cloudantNoSQLDB'
)
self.assertEqual(
'Failed to decode VCAP_SERVICES service credentials',
str(cm.exception)
)
def test_raise_error_for_missing_iam_api_key_and_credentials(self):
with self.assertRaises(CloudantException) as cm:
CloudFoundryService(
self._test_legacy_vcap_services_multiple,
instance_name='Cloudant NoSQL DB 7',
service_name='cloudantNoSQLDB'
)
self.assertEqual(
'Invalid service: IAM API key or username/password credentials are required.',
str(cm.exception)
)
def test_raise_error_for_missing_service(self):
with self.assertRaises(CloudantException) as cm:
CloudFoundryService(
self._test_legacy_vcap_services_multiple,
instance_name='Cloudant NoSQL DB 9',
service_name='cloudantNoSQLDB'
)
self.assertEqual('Missing service in VCAP_SERVICES', str(cm.exception))
def test_raise_error_for_invalid_vcap(self):
with self.assertRaises(CloudantException) as cm:
CloudFoundryService('{', 'Cloudant NoSQL DB 1') # invalid JSON
self.assertEqual('Failed to decode VCAP_SERVICES JSON', str(cm.exception))
def test_get_vcap_service_with_dedicated_service_name_success(self):
service = CloudFoundryService(
self._test_vcap_services_dedicated,
service_name='cloudantNoSQLDB Dedicated'
)
self.assertEqual('Cloudant NoSQL DB 1', service.name)
| |
############
### Author: Kenneth Miller
### Email Address: ken@erdosmiller.com
### Date: Wed Sep 9 21:02:54 2009
### Function: Basecamp API Wrapper!
############
import pdb
import base64
import urllib2
import datetime
from elementtree.ElementTree import fromstring, tostring
import elementtree.ElementTree as ET
from config import *
ALL = 'all'
PENDING = 'pending'
FINISHED = 'finished'
tdls_status = [ALL,PENDING,FINISHED]
tdl_status_lookup = {'all':ALL,
'pending':PENDING,
'finished':FINISHED,
ALL:'all',
PENDING:'pending',
FINISHED:'finished',
}
#this is where we define the entire API! In a dict!
#this is where we define all READ methods
url_mapping = {'get_projects':'/projects.xml', #projects
'get_project':'/projects/%d.xml',
'who_am_i':'/me.xml', #people
'all_people':'/people.xml',
'people_in_project':'/projects/%d/people.xml',
'people_in_company':'/companies/%d/people.xml',
'get_person':'/people/%d.xml',
'companies':'/companies.xml', #companies
'get_companies_in_project':'/projects/%d/companies.xml',
'get_company':'/companies/%d.xml',
'get_categories':'/projects/%d/categories.xml', #categories #this is not complete
'get_category':'/categories/%d.xml',
'get_messages':'/projects/%d/posts.xml', #messages
'get_message':'/posts/%d.xml',
'get_message_by_category':'/projects/%d/cat/%d/posts.xml',
'get_archived_messages':'/projects/%d/posts/archive.xml',
'get_archived_messages_by_category':'/projects/%d/cat/%d/posts/archive.xml',
'new_message':'/projects/%d/posts/new.xml',
'edit_message':'/posts/%d/edit.xml',
#'get_project_time':'/projects/%d/time_entries.xml', #required special implementation
#TIME ENTRIES
'get_all_todo_entries':'/todo_items/%d/time_entries.xml',
#TODO Lists
'get_entry':'/time_entries/%d.xml',
'get_all_lists':'/projects/%d/todo_lists.xml?filter=%s',
'get_list':'/todo_lists/%d.xml',
#ToDo List Items
'get_all_items':'/todo_lists/%d/todo_items.xml',
'new_item':'/todo_lists/%d/todo_items/new.xml',
}
class pythonic_objectify(object):
def __init__(self,tree,parent=None):
self._parent = parent
if isinstance(tree,str):
self._tree = fromstring(tree)
else:
self._tree = tree
#this is required to call on all the children
self._children = [pythonic_objectify(child,self) for child in self._tree]
#assigning attributes to the parent
if parent is not None:
#making the tags more pythonic - don't hate me!
tag = self._tree.tag
tag = tag.replace('-','_')
#getting the tags value
value = self._tree.text
#known type conversion
if 'type' in self._tree.attrib and value is not None:
kind = self._tree.attrib['type']
if kind == 'integer':
value = int(value)
elif kind == 'float':
value = float(value)
elif kind == 'boolean':
if value == 'false':
value = False
elif value == 'true':
value = True
else:
raise ValueError("I don't know how to handle this!")
elif kind == 'date':
year, month, day = value.split('-')
value = datetime.datetime(int(year),int(month),int(day))
#apply it to it's parent
setattr(self._parent,tag,value)
def __repr__(self):
return '<%s>' % self._tree.tag
def tostring(self):
return tostring(self._tree)
def __len__(self):
return len(self._children)
def __iter__(self):
return self._children.__iter__()
def __getitem__(self,index):
try:
return self._children[index]
except AttributeError:
return getattr(self,index)
def get_children(self):
return self._children
def __iter__(self):
return self._children.__iter__()
children = property(get_children)
data = property(get_children)
class Basecamp(object):
def __init__(self, baseURL, username, password):
"""Basic setup."""
self.baseURL = baseURL
if self.baseURL[-1] == '/':
self.baseURL = self.baseURL[:-1]
logger.debug('Base URL: %s' % self.baseURL)
self.opener = urllib2.build_opener()
self.auth_string = '%s:%s' % (username, password)
self.encoded_auth_string = base64.encodestring(self.auth_string)
self.encoded_auth_string = self.encoded_auth_string.replace('\n', '')
self.headers = [
('Content-Type', 'application/xml'),
('Accept', 'application/xml'),
('Authorization', 'Basic %s' % self.encoded_auth_string), ]
self.opener.addheaders = self.headers
def _request(self, path, data=None):
"""Make an http request."""
#what is this line for?
if hasattr(data, 'findall'):
data = ET.tostring(data)
logger.debug('Requesting URL: %s' % self.baseURL + path)
req = urllib2.Request(url=self.baseURL + path, data=data)
req.add_header('Content-Type', 'application/xml')
req.add_header('Accept', 'application/xml')
req.add_header('Authorization', 'Basic %s' % self.encoded_auth_string)
response = self.opener.open(req)
return response
def __getattr__(self,index):
if index in url_mapping.keys():
def temp_func(*args):
#print self._request(url_mapping[index] % args)
return pythonic_objectify(self._request(url_mapping[index] % args).read())
return temp_func
else:
return self.__dict__[index]
def people_id_map(self,company_id=None):
"""Return a dictionary for everyone in BaseCamp."""
keys = {}
if company_id is None:
people = self.all_people()
else:
people = self.people_in_company(company_id)
for person in people:
keys[person.id] = person.first_name + ' ' + person.last_name
return keys
def project_id_map(self):
"""Return a dictionary for all the projects in BaseCamp."""
keys = {}
for project in self.get_projects():
keys[project.id] = project.name
return keys
def old_create_todo_item(self, list_id, content, party_id=None, notify=False):
"""
This call lets you add an item to an existing list. The item is added
to the bottom of the list. If a person is responsible for the item,
give their id as the party_id value. If a company is responsible,
prefix their company id with a 'c' and use that as the party_id value.
If the item has a person as the responsible party, you can use the
notify key to indicate whether an email should be sent to that person
to tell them about the assignment.
"""
path = '/todos/create_item/%d' % list_id
req = ET.Element('request')
ET.SubElement(req, 'content').text = str(content)
if party_id is not None:
ET.SubElement(req, 'responsible-party').text = str(party_id)
ET.SubElement(req, 'notify').text = str(bool(notify)).lower()
return self._request(path, req)
def create_todo_item(self, list_id, content, party_id=None, notify=False):
path = '/todo_lists/%d/todo_items.xml' % list_id
req = ET.Element('todo-item')
ET.SubElement(req, 'content').text = str(content)
due = ET.SubElement(req, 'due-at')
due.set('nil',str(True).lower())
due.set('type','datetime')
notify_elem = ET.SubElement(req,'notify')
notify_elem.text = str(notify).lower()
notify_elem.set('type','boolean')
party = ET.SubElement(req,'responsible_party')
if party_id is not None:
ET.SubElement(req, 'responsible-party').text = str(party_id)
ET.SubElement(req, 'notify').text = str(bool(notify)).lower()
#print self._request(path,req)
#pdb.set_trace()
response = self._request(path,req)
if response.code == 201:
return int(response.headers['location'].split('/')[-1])
else:
return False
#return self.old_create_todo_item(list_id,content,party_id,notify)
def get_project_time(self,project_id,page=1,return_all=True):
"""This method will return all time entries, if you'd like it to return the last 50 set return_all to false and select the page."""
#print "Retrieving Page: %d" % page
time_entries = []
path = '/projects/%d/time_entries.xml?page=%d' % (project_id,page)
req = urllib2.Request(url=self.baseURL + path, data=None)
response = self.opener.open(req)
data = response.read()
objects = pythonic_objectify(data)
pages = int(response.headers['x-pages'])
page = int(response.headers['x-page'])
time_entries.extend(objects.data)
if page < pages:
time_entries.extend(self.get_project_time(project_id,page+1,return_all))
return time_entries
if __name__ == '__main__':
import unittest
from test_settings import *
class APITests(unittest.TestCase):
def setUp(self):
self.conn = Basecamp(bc_url,bc_user,bc_pwd)
def tearDown(self):
pass
def testGetCompany(self):
company = self.conn.get_company(bc_primary_company_id)
assert company.id == bc_primary_company_id
def testGetProjects(self):
projects = self.conn.get_projects()
assert projects[0].id == bc_primary_project_id
def testGetTDLS(self):
tdls = self.conn.get_all_lists(bc_primary_project_id,ALL)
assert bc_primary_tdl_id in [tdl.id for tdl in tdls]
def testCreateToDoItem(self):
new_id = self.conn.create_todo_item(bc_primary_tdl_id,'Test From python!')
assert new_id > 0
def testGetNewToDoListItem(self):
t = self.conn.new_item(bc_primary_tdl_id)
unittest.main()
| |
#!/usr/bin/env python3
import os
import os.path
from subprocess import Popen, PIPE
import sys
from threading import Thread
from urllib.parse import urlparse
from urllib.request import urlretrieve
import venv
# for openrc setup:
import shutil
from glob import glob
# run pip from the module
from pip.commands import commands_dict
from pip import parseopts
from pip import check_isolated, deprecation, locale
OPENRC = "openrc" # default filename
class ExtendedEnvBuilder(venv.EnvBuilder):
"""
This builder installs setuptools and pip so that you can pip or
easy_install other packages into the created virtual environment.
:param nodist: If True, setuptools and pip are not installed into the
created virtual environment.
:param nopip: If True, pip is not installed into the created
virtual environment.
:param progress: If setuptools or pip are installed, the progress of the
installation can be monitored by passing a progress
callable. If specified, it is called with two
arguments: a string indicating some progress, and a
context indicating where the string is coming from.
The context argument can have one of three values:
'main', indicating that it is called from virtualize()
itself, and 'stdout' and 'stderr', which are obtained
by reading lines from the output streams of a subprocess
which is used to install the app.
If a callable is not specified, default progress
information is output to sys.stderr.
"""
def __init__(self, *args, **kwargs):
self.nodist = kwargs.pop('nodist', False)
self.nopip = kwargs.pop('nopip', False)
self.progress = kwargs.pop('progress', None)
self.verbose = kwargs.pop('verbose', False)
# osic specific:
self.requirements = kwargs.pop('requirements', None)
self.openrc = kwargs.pop('openrc', None)
if self.openrc:
# check (early) that it is accessible:
if not os.access(self.openrc, os.R_OK):
raise Warning('Couldn\'t find "{}" '.format(self.openrc) +
'Either have a default "openrc.sh" file '
'in "~/.config/openstack" or specify a path '
'for one with the --openrc option')
super().__init__(*args, **kwargs)
def post_setup(self, context):
"""
Set up any packages which need to be pre-installed into the
virtual environment being created.
:param context: The information for the virtual environment
creation request being processed.
"""
os.environ['VIRTUAL_ENV'] = context.env_dir
if not self.nodist:
self.install_setuptools(context)
# Can't install pip without setuptools
if not self.nopip and not self.nodist:
self.install_pip(context)
## add openrc to activation;
if self.openrc: # options set to guarentee this is set
# copy self.openrc to bin directory
openrc_dest = os.path.join(context.env_dir, 'bin', OPENRC)
shutil.copyfile(self.openrc, openrc_dest)
# append "source openrc" to activate scripts
for fn in glob(os.path.join(context.env_dir, 'bin', 'activate*')):
cmd = '.' if fn[-4:] == 'fish' else 'source'
print("updating {}: {} {} ...".format(fn, cmd, OPENRC), file=sys.stderr)
with open(fn, 'a') as f:
f.write('{} {}\n'.format(cmd, OPENRC))
## add pip-installation of openstack (or update, in case it's there)
# setup pip for use, as pip.main() (mostly) does:
deprecation.install_warning_logger()
locale.setlocale(locale.LC_ALL, '')
# --prefix has to be told to ignore existing libraries in path;
self.pip("install -I --prefix {} python-openstackclient"
.format(context.env_dir))
'''
# -t option doesn't work on 2.7 or 3.5 - but does on 3.6;
self.pip("install -t {} -U python-openstackclient"
.format(os.path.join(context.env_dir, "lib",
context.python_exe, "site_packages")))
'''
## add any requirements options installations too;
if self.requirements:
self.pip( "install -I --prefix {} -r {}"
.format( context.env_dir, self.requirements))
def pip(self, args):
cmd_name, cmd_args = parseopts(args.split())
command = commands_dict[cmd_name](isolated=check_isolated(cmd_args))
rtn = command.main(cmd_args)
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not self.verbose:
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
def install_script(self, context, name, url):
_, _, path, _, _, _ = urlparse(url)
fn = os.path.split(path)[-1]
binpath = context.bin_path
distpath = os.path.join(binpath, fn)
# Download script into the virtual environment's binaries folder
urlretrieve(url, distpath)
progress = self.progress
if self.verbose:
term = '\n'
else:
term = ''
if progress is not None:
progress('Installing %s ...%s' % (name, term), 'main')
else:
sys.stderr.write('Installing %s ...%s' % (name, term))
sys.stderr.flush()
# Install in the virtual environment
args = [context.env_exe, fn]
p = Popen(args, stdout=PIPE, stderr=PIPE, cwd=binpath)
t1 = Thread(target=self.reader, args=(p.stdout, 'stdout'))
t1.start()
t2 = Thread(target=self.reader, args=(p.stderr, 'stderr'))
t2.start()
p.wait()
t1.join()
t2.join()
if progress is not None:
progress('done.', 'main')
else:
sys.stderr.write('done.\n')
# Clean up - no longer needed
os.unlink(distpath)
def install_setuptools(self, context):
"""
Install setuptools in the virtual environment.
:param context: The information for the virtual environment
creation request being processed.
"""
url = 'https://bootstrap.pypa.io/ez_setup.py'
self.install_script(context, 'setuptools', url)
# clear up the setuptools archive which gets downloaded
pred = lambda o: o.startswith('setuptools-') and o.endswith('.tar.gz')
files = filter(pred, os.listdir(context.bin_path))
for f in files:
f = os.path.join(context.bin_path, f)
os.unlink(f)
def install_pip(self, context):
"""
Install pip in the virtual environment.
:param context: The information for the virtual environment
creation request being processed.
"""
url = "https://bootstrap.pypa.io/get-pip.py"
self.install_script(context, 'pip', url)
def main(args=None):
compatible = True
if sys.version_info < (3, 3):
compatible = False
elif not hasattr(sys, 'base_prefix'):
compatible = False
if not compatible:
raise ValueError('This script is only for use with '
'Python 3.3 or later')
else:
import argparse
parser = argparse.ArgumentParser(prog=__name__,
description='Creates virtual Python '
'environments in one or '
'more target '
'directories.')
parser.add_argument('dirs', metavar='ENV_DIR', nargs='+',
help='A directory in which to create the '
'virtual environment.')
parser.add_argument('--no-setuptools', default=False,
action='store_true', dest='nodist',
help="Don't install setuptools or pip in the "
"virtual environment.")
parser.add_argument('--no-pip', default=False,
action='store_true', dest='nopip',
help="Don't install pip in the virtual "
"environment.")
parser.add_argument('--system-site-packages', default=False,
action='store_true', dest='system_site',
help='Give the virtual environment access to the '
'system site-packages dir.')
## osic-venv:
# if option not specified,
# try ~/.config/openstack/openrc.sh
# if option specified w/o filename,
# try ./openrc
parser.add_argument('-O', '--openrc', nargs='?',
const=os.path.join('.', OPENRC),
default=os.path.expanduser('~/.config/openstack/openrc.sh'),
help='path to OpenStack openrc file, ("./'+OPENRC+'" by default); '
'"~/.config/openstack/openrc.sh" '
'if option not specified')
parser.add_argument('-r', '--requirements', nargs='?', # type=argparse.FileType('r'),
const='requirements.txt',
help='pip requirements file for installation '
'(default: "requirements.txt")')
if os.name == 'nt':
use_symlinks = False
else:
use_symlinks = True
parser.add_argument('--symlinks', default=use_symlinks,
action='store_true', dest='symlinks',
help='Try to use symlinks rather than copies, '
'when symlinks are not the default for '
'the platform.')
parser.add_argument('--clear', default=False, action='store_true',
dest='clear', help='Delete the contents of the '
'virtual environment '
'directory if it already '
'exists, before virtual '
'environment creation.')
parser.add_argument('--upgrade', default=False, action='store_true',
dest='upgrade', help='Upgrade the virtual '
'environment directory to '
'use this version of '
'Python, assuming Python '
'has been upgraded '
'in-place.')
parser.add_argument('--verbose', default=False, action='store_true',
dest='verbose', help='Display the output '
'from the scripts which '
'install setuptools and pip.')
options = parser.parse_args(args)
if options.upgrade and options.clear:
raise ValueError('you cannot supply --upgrade and --clear together.')
builder = ExtendedEnvBuilder(
openrc=options.openrc,
requirements=options.requirements,
system_site_packages=options.system_site,
clear=options.clear,
symlinks=options.symlinks,
upgrade=options.upgrade,
nodist=options.nodist,
nopip=options.nopip,
verbose=options.verbose)
for d in options.dirs:
builder.create(d)
if __name__ == '__main__':
rc = 1
try:
main()
rc = 0
except Exception as e:
print('Error: %s' % e, file=sys.stderr)
sys.exit(rc)
| |
import datetime
import logging
import os
import pickle
import time
import pytz
from urllib.request import urlopen
from urllib.parse import urlencode
from bs4 import BeautifulSoup
from enum import Enum
import twitter
from twitter.error import TwitterError
class Line:
def __init__(self, name, emoji):
self.name = name
self.emoji = emoji
class MLStatus:
def __init__(self, message, ok):
self.message = message
self.ok = ok
class MLBot:
TIMEZONE = 'Europe/Lisbon'
STATUS_URL = 'http://app.metrolisboa.pt/status/estado_Linhas.php'
LINES = [
Line('Amarela', '\U0001F34B'),
Line('Vermelha', '\U000FE051'),
Line('Azul', '\U0001F433'),
Line('Verde', '\U0001F34F')
]
STRINGS = {
"LINE": "Linha",
"UP_EMOJI": "\U0001F535",
"DOWN_EMOJI": "\U0001F534"
}
def __init__(self, state_file, twitter_config, telegram_config, pretend=False):
"""
Initializes a bot instance
:param state_file: File where state will be stored
:param twitter_config: Twitter configuration
:param telegram_config: Telegram configuration
"""
self.state_file = state_file
self.pretend = pretend
if not self.pretend:
self.twitter = twitter.Api(**twitter_config)
self.twitter.VerifyCredentials()
self.telegram_config = telegram_config
self.tz = pytz.timezone(self.TIMEZONE)
self.log = logging.getLogger('mlbot')
self.log.debug('Loading state from file')
try:
with open(self.state_file, 'rb') as f:
self.status = pickle.load(f)
except FileNotFoundError:
self.log.warning('File not found, first run?')
self.status = {}
def check(self):
"""
Check the status for changes
"""
status = self.get_status()
# check for changes
for line, current in status.items():
last = self.status.get(line)
if last == None:
self.state_change(line, current)
elif ((current.ok != last.ok)
or (not current.ok and current.message != last.message)):
self.state_change(line, current)
# save state
self.log.debug('Saving state to file')
self.status = status
with open(self.state_file, 'wb') as f:
pickle.dump(self.status, f)
def get_status(self):
"""
Downloads and parses status from the Metro website
:return: MLStatus object
"""
self.log.debug('Downloading HTML')
html = urlopen(self.STATUS_URL)
self.log.debug('Parsing HTML')
soup = BeautifulSoup(html, 'html.parser')
status = {}
for line in self.LINES:
el = soup.select('td.linha_%s li' % line.name.lower())[0]
message = el.text
ok = 'semperturbacao' in el.parent.get('class', [])
status[line.name] = MLStatus(
message=message,
ok=ok)
return status
def state_change(self, line, status):
"""
A state change ocurred.
Builds a message and publishes it to Twitter.
:param line: Metro line
:param status: MLStatus object
"""
self.log.debug('State for line %s changed: %s', line, status.message)
emoji = '\u2705' if status.ok else '\u26A0\uFE0F'
message = status.message[0].upper() + status.message[1:]
# add full stop if there's none
if not message.endswith('.'):
message = message + '.'
self.publish("%s %s %s: %s" % (
emoji,
self.STRINGS["LINE"],
line,
message))
def publish(self, message):
self.log.info('Publishing: %s', message)
self.publish_twitter(message)
self.publish_telegram(message)
def publish_telegram(self, message):
"""
Publishes a message to telegram.
:param message: Message to publish
"""
key = self.telegram_config['api_key']
for dst in self.telegram_config['destination']:
data = urlencode(dict(
chat_id=dst,
parse_mode='HTML',
text=message
)).encode('utf-8')
res = urlopen(f'https://api.telegram.org/bot{key}/sendMessage', data=data)
def publish_twitter(self, message):
"""
Publishes a message to twitter.
:param message: Message to publish
"""
# add a timestamp to avoid duplicates
now = datetime.datetime.now()
now_tz = pytz.utc.localize(now).astimezone(self.tz)
timestamp = now_tz.strftime("[%H:%M]")
# split into tweets
parts = []
words = message.split(" ")
while len(words) > 0:
part = timestamp
while len(words) > 0:
joined = part + " " + words[0]
if len(joined) < 270:
part = joined
words.pop(0)
else:
break
parts.append(part)
for part in parts:
try:
if not self.pretend:
self.twitter.PostUpdate(part)
except TwitterError as e:
error = e.message
if (len(error) > 0 and
error[0].get('code') == 187):
# Duplicate? Add a dot.
self.publish_twitter(message + '.')
else:
raise e
if __name__ == '__main__':
# set up logger
logging.basicConfig(
format="%(asctime)-15s %(levelname)-9s %(message)s")
debug = os.environ.get('BOT_DEBUG', '0') == '1'
pretend = os.environ.get('BOT_PRETEND', '0') == '1'
log = logging.getLogger('mlbot')
log.setLevel(logging.DEBUG if debug else logging.INFO)
try:
state_file = os.environ['BOT_STATE_FILE']
bot = MLBot(
state_file,
twitter_config=dict(
consumer_key=os.environ['TWITTER_CONSUMER_KEY'],
consumer_secret=os.environ['TWITTER_CONSUMER_SECRET'],
access_token_key=os.environ['TWITTER_ACCESS_TOKEN_KEY'],
access_token_secret=os.environ['TWITTER_ACCESS_TOKEN_SECRET']
) if not pretend else {},
telegram_config=dict(
api_key=os.environ['TELEGRAM_KEY'],
destination=os.environ['TELEGRAM_DESTINATION'].split(',')
),
pretend=pretend)
except KeyError as e:
log.critical('Environment variable %s not found.' % e)
bot.check()
| |
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from google.cloud import bigquery
from google.api_core.exceptions import NotFound, BadRequest
class BigQueryTableResizer(object):
"""
BigQueryTableResizer is a class for copying a BigQuery table into itself
until it reaches the desired size. This is useful when trying to quickly
populate large tables in BigQuery. This class assumes that source and
destination tables are in the same project.
Attributes:
location (str): The location of the BigQuery tables ['US' or 'EU'].
project (str): The GCP project id for these tables.
client (bigquery.client.Client): A client for making BigQuery API
requests.
source_table (bigquery.table.Table): The source data to be copied. This
is constructred from the source_dataset and source_table args.
dest_table_ref (bigquery.table.TableReference): This is the table that
you want to be target_gb or target_rows. It is a TableReference
not a Table because it may not exist prior to this call.
target_rows (int): The desired number of rows in the destination table.
target_gb (int): The desired number of GB for the destination table.
Note that this will be cast to a number of rows and will only be
used if that number is greater than target_rows.
Methods:
resize(): Performs the duplication according to this DataResizer's
attributes.
"""
def __init__(self,
project=None,
source_dataset=None,
destination_dataset=None,
source_table=None,
destination_table=None,
target_rows=1000000,
target_gb=None,
location='US'):
"""
Constructor for DataDuplicator object.
Args:
location (str): The location of the BigQuery tables ['US' or 'EU'].
project (str): The GCP project id for these tables.
source_dataset (str): The BigQuery dataset ID containing the source
data table to be copied.
source_table (str): The BigQuery table ID containing the source
data table to be copied.
target_rows (int): The desired number of rows in the destination
table. Either target_rows or target_gb is required.
target_gb (int): The desired number of GB for the destination
table. Note, that this will be cast to a number of rows and
will only be used if that number is greater than target_rows.
destination_dataset (str): The BigQuery dataset to populate the
table that is the result of the copy operations.
This is optional; chosing not to specify the destination
dataset and table will result in an inplace copy.
destination_table (str): The BigQuery table ID that you want to be
target_gb or target_rows. This can be the same as source_table.
It is a TableReference not a Table because it may not
exist prior to this call.
"""
self.location = location
# Validate project argument.
try:
self.client = bigquery.Client(project=project)
# This will error out if BigQuery not activated for this project.
list(self.client.list_datasets())
self.project = project
except BadRequest:
raise argparse.ArgumentError(
"BigQuery is not setup in project: {}".format(project))
source_table_ref = self.client.dataset(source_dataset).table(
source_table)
try: # Validate source_table
self.source_table = self.client.get_table(source_table_ref)
except NotFound:
raise argparse.ArgumentError(
"Source table {} does not exist in {}.{}".format(
source_table, project, source_dataset))
if destination_dataset and destination_table:
self.dest_table_ref = \
self.client.dataset(destination_dataset).table(
destination_table
)
else: # Default to an inplace copy.
self.dest_table_ref = self.source_table.reference
if target_gb:
target_bytes = target_gb * 1024**3
increase_pct = target_bytes / self.source_table.num_bytes
self.target_rows = int(self.source_table.num_rows * increase_pct)
else:
self.target_rows = target_rows
def resize(self):
"""
This is the execute function of this class. It copies the source table
into the destination table and then copies the destination table into
itself until it reaches or exceeds the target_rows.
"""
# How many rows short of our target are we?
gap = self.target_rows - self.source_table.num_rows
while gap > 0: # Copy until we've reached or exceeded target_rows
# API requests to get the latest table info.
source_table = self.client.get_table(self.source_table)
try:
dest_table = self.client.get_table(self.dest_table_ref)
except NotFound:
dest_table = self.client.create_table(
bigquery.Table(self.dest_table_ref))
# Get the latest size of the dest_table.
# Note that for the first call these properties are None.
dest_rows = dest_table.num_rows
dest_bytes = dest_table.num_bytes
dest_gb = dest_bytes / float(1024**3)
# Recalculate the gap.
if dest_rows:
gap = self.target_rows - dest_rows
else:
gap = self.target_rows
print(('{} rows in table of size {} GB, with a target of {}, '
'leaving a gap of {}'.format(dest_rows, round(dest_gb, 2),
self.target_rows, gap)))
# Greedily copy the largest of dest_table and source_table into
# dest_table without going over the target rows. The last query
# will be a subset of source_table via a limit query.
if gap < source_table.num_rows:
# This will be the last copy operation if target_rows is
# not a power of 2 times the number of rows originally in the
# source table. It is not a full copy.
job_config = bigquery.QueryJobConfig()
# Set the destination table
job_config.destination = self.dest_table_ref
job_config.write_disposition = 'WRITE_APPEND'
job_config.allow_large_results = True
sql = """
SELECT *
FROM `{}.{}.{}`
LIMIT {}
""".format(self.project, self.source_table.dataset_id,
self.source_table.table_id, gap)
# API request to BigQuery with query and config defined above.
query_job = self.client.query(
sql,
# Location must match that of the dataset(s) referenced in
# the query and of the destination table.
location=self.location,
job_config=job_config)
# Wait for query_job to finish.
query_job.result()
else:
if source_table.num_rows < dest_table.num_rows < gap:
use_as_source_table = self.dest_table_ref
else: # source_table.num_rows < gap < dest_table.num_rows
use_as_source_table = self.source_table.reference
copy_config = bigquery.CopyJobConfig()
copy_config.write_disposition = 'WRITE_APPEND'
copy_job = self.client.copy_table(use_as_source_table,
self.dest_table_ref,
job_config=copy_config)
# Wait for copy_job to finish.
copy_job.result()
def parse_data_resizer_args(argv):
"""
This is a convienence function for parsing command line arguments and
returning an BigQueryTableResizer object.
Args:
argv: The command line arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--project',
dest='project',
required=True,
help='Name of the project containing source and '
'destination tables')
parser.add_argument('--source_dataset',
dest='source_dataset',
required=True,
help='Name of the dataset in which the source table is'
' located')
parser.add_argument('--source_table',
dest='source_table',
required=True,
help='Name of the source table')
parser.add_argument('--destination_dataset',
dest='destination_dataset',
required=False,
help='Name of the dataset in which the destination '
'table is located')
parser.add_argument('--destination_table',
dest='destination_table',
required=False,
help='Name of the destination table')
parser.add_argument('--target_rows',
dest='target_rows',
required=False,
type=int,
help='Number of records (rows) desired in the '
'destination table',
default=10000)
parser.add_argument('--target_gb',
dest='target_gb',
required=False,
type=float,
help='Size in GB desired for the destination table',
default=None)
parser.add_argument('--location',
dest='location',
required=False,
help='The location of the BigQuery Tables.',
default='US')
data_args = parser.parse_args(argv)
return BigQueryTableResizer(
project=data_args.project,
source_dataset=data_args.source_dataset,
destination_dataset=data_args.destination_dataset,
source_table=data_args.source_table,
destination_table=data_args.destination_table,
target_rows=data_args.target_rows,
target_gb=data_args.target_gb,
location=data_args.location)
def run(argv=None):
data_resizer = parse_data_resizer_args(argv)
data_resizer.resize()
if __name__ == '__main__':
run()
| |
# Copyright 2022 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for flax.linen."""
import dataclasses
import functools
import operator
from absl.testing import absltest
from flax.linen.module import override_named_call
import jax
from jax import random
from jax import lax
from jax.nn import initializers
import jax.numpy as jnp
import numpy as np
from typing import (Any, Tuple, Callable, Generic, Mapping, NamedTuple,
Sequence, TypeVar)
from flax import linen as nn
from flax import errors
from flax import struct
from flax.linen import compact
from flax.core import Scope, freeze, tracers
# Parse absl flags test_srcdir and test_tmpdir.
jax.config.parse_flags_with_absl()
def tree_equals(x, y):
return jax.tree_util.tree_all(
jax.tree_multimap(operator.eq, x, y))
class DummyModule(nn.Module):
@compact
def __call__(self, x):
bias = self.param('bias', initializers.ones, x.shape)
return x + bias
class Dense(nn.Module):
features: int
@compact
def __call__(self, x):
kernel = self.param('kernel',
initializers.lecun_normal(),
(x.shape[-1], self.features))
y = jnp.dot(x, kernel)
return y
class ModuleTest(absltest.TestCase):
def test_init_module(self):
rngkey = jax.random.PRNGKey(0)
x = jnp.array([1.])
scope = Scope({}, {'params': rngkey}, mutable=['params'])
y = DummyModule(parent=scope)(x)
params = scope.variables()['params']
y2 = DummyModule(parent=scope.rewound())(x)
np.testing.assert_allclose(y, y2)
np.testing.assert_allclose(y, jnp.array([2.]))
self.assertEqual(params, {'bias': jnp.array([1.])})
def test_arg_module(self):
rngkey = jax.random.PRNGKey(0)
x = jnp.ones((10,))
scope = Scope({}, {'params': rngkey}, mutable=['params'])
y = Dense(3, parent=scope)(x)
params = scope.variables()['params']
y2 = Dense(3, parent=scope.rewound())(x)
np.testing.assert_allclose(y, y2)
self.assertEqual(params['kernel'].shape, (10, 3))
def test_util_fun(self):
rngkey = jax.random.PRNGKey(0)
class MLP(nn.Module):
@compact
def __call__(self, x):
x = self._mydense(x)
x = self._mydense(x)
return x
def _mydense(self, x):
return Dense(3)(x)
x = jnp.ones((10,))
scope = Scope({}, {'params': rngkey}, mutable=['params'])
y = MLP(parent=scope)(x)
params = scope.variables()['params']
y2 = MLP(parent=scope.rewound())(x)
np.testing.assert_allclose(y, y2)
param_shape = jax.tree_map(jnp.shape, params)
self.assertEqual(param_shape,
{'Dense_0': {'kernel': (10, 3)},
'Dense_1': {'kernel': (3, 3)}})
def test_nested_module_reuse(self):
rngkey = jax.random.PRNGKey(0)
class MLP(nn.Module):
@compact
def __call__(self, x):
x = self._mydense(x)
x = self._mydense(x)
return x
def _mydense(self, x):
return Dense(3)(x)
class Top(nn.Module):
@compact
def __call__(self, x):
mlp = MLP()
y = mlp(x)
z = mlp(x)
return y + z
x = jnp.ones((10,))
scope = Scope({}, {'params': rngkey}, mutable=['params'])
y = Top(parent=scope)(x)
params = scope.variables()['params']
y2 = Top(parent=scope.rewound())(x)
np.testing.assert_allclose(y, y2)
param_shape = jax.tree_map(jnp.shape, params)
self.assertEqual(param_shape,
{'MLP_0':
{'Dense_0': {'kernel': (10, 3)},
'Dense_1': {'kernel': (3, 3)}}})
def test_setup_dict_assignment(self):
rngkey = jax.random.PRNGKey(0)
class MLP(nn.Module):
def setup(self):
self.lyrs1 = {'a': Dense(3), 'b': Dense(3),}
self.lyrs2 = [Dense(3), Dense(3)]
def __call__(self, x):
y = self.lyrs1['a'](x)
z = self.lyrs1['b'](y)
#w = self.lyrs2[0](x)
return z
x = jnp.ones((10,))
scope = Scope({}, {'params': rngkey}, mutable=['params'])
y = MLP(parent=scope)(x)
params = scope.variables()['params']
y2 = MLP(parent=scope.rewound())(x)
np.testing.assert_allclose(y, y2)
param_shape = jax.tree_map(jnp.shape, params)
self.assertEqual(param_shape,
{'lyrs1_a': {'kernel': (10, 3)},
'lyrs1_b': {'kernel': (3, 3)}})
def test_setup_cloning(self):
class MLP(nn.Module):
def setup(self):
self.dense = Dense(3)
scope = Scope({})
MLPclone = MLP(parent=scope).clone()
def test_submodule_attr(self):
rngkey = jax.random.PRNGKey(0)
class Inner(nn.Module):
@compact
def __call__(self):
self.param('x', lambda rng: 40)
class Outer(nn.Module):
inner: nn.Module
def __call__(self):
return self.inner()
class Wrapper(nn.Module):
def setup(self):
self.inner = Inner()
self.outer = Outer(self.inner)
def __call__(self):
return self.outer()
scope = Scope({'params': {}}, rngs={'params': rngkey}, mutable=['params'])
# Make sure this doesn't raise "Can't attach to remote parent"
wrapper = Wrapper(parent=scope)
wrapper()
# Make sure that variables are registered at the level of the
# Wrapper submodule, not the Outer submodule.
self.assertEqual(40, scope.variables()['params']['inner']['x'])
def test_param_in_setup(self):
rngkey = jax.random.PRNGKey(0)
class DummyModule(nn.Module):
xshape: Tuple[int]
def setup(self):
self.bias = self.param('bias', initializers.ones, self.xshape)
def __call__(self, x):
return x + self.bias
x = jnp.array([1.])
scope = Scope({}, {'params': rngkey}, mutable=['params'])
y = DummyModule(x.shape, parent=scope)(x)
params = scope.variables()['params']
y2 = DummyModule(x.shape, parent=scope.rewound())(x)
np.testing.assert_allclose(y, y2)
np.testing.assert_allclose(y, jnp.array([2.]))
self.assertEqual(params, {'bias': jnp.array([1.])})
def test_init_outside_setup_without_compact(self):
rngkey = jax.random.PRNGKey(0)
class DummyModule(nn.Module):
def __call__(self, x):
bias = self.param('bias', initializers.ones, x.shape)
return x + bias
x = jnp.array([1.])
scope = Scope({}, {'params': rngkey}, mutable=['params'])
with self.assertRaisesRegex(ValueError, 'must be initialized.*setup'):
y = DummyModule(parent=scope)(x)
def test_init_outside_call(self):
rngkey = jax.random.PRNGKey(0)
class Dummy(nn.Module):
@compact
def __call__(self, x):
bias = self.param('bias', initializers.ones, x.shape)
return x + bias
def foo(self, x):
bias = self.param('bias', initializers.ones, x.shape)
return x + bias
x = jnp.array([1.])
scope = Scope({}, {'params': rngkey}, mutable=['params'])
with self.assertRaisesRegex(ValueError, 'must be initialized.*setup'):
y = Dummy(parent=scope).foo(x)
def test_setup_call_var_collision(self):
rngkey = jax.random.PRNGKey(0)
class Dummy(nn.Module):
xshape: Tuple[int]
def setup(self):
self.bias = self.param('bias', initializers.ones, self.xshape)
@compact
def __call__(self, x):
bias = self.param('bias', initializers.ones, x.shape)
return x + self.bias
x = jnp.array([1.])
scope = Scope({}, {'params': rngkey}, mutable=['params'])
msg = 'Could not create param "bias" in Module Dummy: Name in use'
with self.assertRaisesRegex(errors.NameInUseError, msg):
y = Dummy(x.shape, parent=scope)(x)
def test_call_var_collision(self):
rngkey = jax.random.PRNGKey(0)
class Dummy(nn.Module):
xshape: Tuple[int]
@compact
def __call__(self, x):
bias = self.param('bias', initializers.ones, self.xshape)
bias = self.param('bias', initializers.ones, self.xshape)
return x + bias
x = jnp.array([1.])
scope = Scope({}, {'params': rngkey}, mutable=['params'])
msg = 'Could not create param "bias" in Module Dummy: Name in use'
with self.assertRaisesRegex(errors.NameInUseError, msg):
y = Dummy(x.shape, parent=scope)(x)
def test_setup_var_collision(self):
rngkey = jax.random.PRNGKey(0)
class Dummy(nn.Module):
xshape: Tuple[int]
def setup(self):
self.bias = self.param('bias', initializers.ones, self.xshape)
self.bias = self.param('bias', initializers.ones, self.xshape)
def __call__(self, x):
return x + self.bias
x = jnp.array([1.])
scope = Scope({}, {'params': rngkey}, mutable=['params'])
msg = 'Could not create param "bias" in Module Dummy: Name in use'
with self.assertRaisesRegex(errors.NameInUseError, msg):
y = Dummy(x.shape, parent=scope)(x)
def test_setattr_name_var_disagreement_allowed_in_lists(self):
rngkey = jax.random.PRNGKey(0)
class Dummy(nn.Module):
xshape: Tuple[int]
def setup(self):
self.biases = [
self.param(f'bias_{i}', initializers.ones, self.xshape)
for i in range(4)]
def __call__(self, x):
return x + self.biases[0]
x = jnp.array([1.])
scope = Scope({}, {'params': rngkey}, mutable=['params'])
y = Dummy(x.shape, parent=scope)(x)
self.assertEqual(y, jnp.array([2.]))
def test_setattr_name_var_disagreement_allowed_in_dicts(self):
rngkey = jax.random.PRNGKey(0)
class Dummy(nn.Module):
xshape: Tuple[int]
def setup(self):
self.biases = {
# NOTE that keys still must be strings. This is to make a possible
# future transition to automatically derived parameter names when assigned
# as a dict easier (like we currently have with submodules).
# See a bit of discussion here: https://github.com/google/flax/issues/705#issuecomment-738761853
str(i): self.param(f'bias_{i}', initializers.ones, self.xshape)
for i in range(4)}
def __call__(self, x):
return x + self.biases['0']
x = jnp.array([1.])
scope = Scope({}, {'params': rngkey}, mutable=['params'])
y = Dummy(x.shape, parent=scope)(x)
self.assertEqual(y, jnp.array([2.]))
def test_submodule_var_collision_with_scope(self):
rngkey = jax.random.PRNGKey(0)
class Dummy(nn.Module):
xshape: Tuple[int]
def setup(self):
self.bias = self.param('bias', initializers.ones, self.xshape)
self.bias = DummyModule()
def __call__(self, x):
return x + self.bias
x = jnp.array([1.])
scope = Scope({}, {'params': rngkey}, mutable=['params'])
msg = 'Duplicate use of scope name: "bias"'
with self.assertRaisesWithLiteralMatch(ValueError, msg):
y = Dummy(x.shape, parent=scope)(x)
def test_submodule_var_collision_with_submodule(self):
rngkey = jax.random.PRNGKey(0)
class Dummy(nn.Module):
xshape: Tuple[int]
def setup(self):
self.bias = self.param('bias', initializers.ones, self.xshape)
@compact
def __call__(self, x):
bias = DummyModule(name='bias')
return x + self.bias
x = jnp.array([1.])
scope = Scope({}, {'params': rngkey}, mutable=['params'])
msg = 'Could not create submodule "bias" in Module Dummy: Name in use'
with self.assertRaisesRegex(errors.NameInUseError, msg):
y = Dummy(x.shape, parent=scope)(x)
def test_submodule_var_collision_with_params(self):
rngkey = jax.random.PRNGKey(0)
class Dummy(nn.Module):
xshape: Tuple[int]
def setup(self):
self.bias = DummyModule()
@compact
def __call__(self, x):
bias = self.param('bias', initializers.ones, self.xshape)
return x + self.bias
x = jnp.array([1.])
scope = Scope({}, {'params': rngkey}, mutable=['params'])
msg = 'Could not create param "bias" in Module Dummy: Name in use'
with self.assertRaisesRegex(errors.NameInUseError, msg):
y = Dummy(x.shape, parent=scope)(x)
def test_attr_empty_container(self):
class Foo(nn.Module):
bar: Mapping[str, Any]
@compact
def __call__(self):
pass
Foo({"a": ()}).apply({})
def test_attr_param_name_collision(self):
rngkey = jax.random.PRNGKey(0)
class Dummy(nn.Module):
bias: bool
def setup(self):
self.bias = self.param('bias', initializers.ones, (3, 3))
def __call__(self, x):
return x + self.bias
x = jnp.array([1.])
scope = Scope({}, {'params': rngkey}, mutable=['params'])
msg = 'Could not create param "bias" in Module Dummy: Name in use'
with self.assertRaisesRegex(errors.NameInUseError, msg):
y = Dummy(x.shape, parent=scope)(x)
def test_attr_submodule_name_collision(self):
rngkey = jax.random.PRNGKey(0)
class Dummy(nn.Module):
bias: bool
def setup(self):
self.bias = DummyModule(name='bias')
def __call__(self, x):
return self.bias(x)
x = jnp.array([1.])
scope = Scope({}, {'params': rngkey}, mutable=['params'])
msg = 'Could not create submodule "bias" in Module Dummy: Name in use'
with self.assertRaisesRegex(errors.NameInUseError, msg):
y = Dummy(x.shape, parent=scope)(x)
def test_only_one_compact_method(self):
msg = 'Only one method per class can be @compact'
with self.assertRaisesRegex(errors.MultipleMethodsCompactError, msg):
class Dummy(nn.Module):
@compact
def call1(self):
pass
@compact
def call2(self):
pass
def test_only_one_compact_method_subclass(self):
class Dummy(nn.Module):
@nn.compact
def __call__(self):
pass
class SubDummy(Dummy):
@nn.compact
def __call__(self):
super().__call__()
scope = Scope(variables={})
subdummy = SubDummy(parent=scope)
# Make sure the @compact annotation is valid on both base class and subclass, as long
# as its on the same method.
subdummy()
def test_forgotten_compact_annotation(self):
class Bar(nn.Module):
# user forgot to add @compact
def __call__(self, x):
return nn.Dense(1)(x)
class Foo(nn.Module):
@nn.compact
def __call__(self, x):
bar = Bar()
x = bar(x)
x = bar(x)
return x
msg = (r'Submodule Dense must be defined in `setup\(\)` or in a method '
'wrapped in `@compact`')
with self.assertRaisesRegex(errors.AssignSubModuleError, msg):
Foo().init(random.PRNGKey(0), jnp.ones((1, 3)))
def test_forgotten_compact_annotation_with_explicit_parent(self):
class Bar(nn.Module):
def __call__(self, x):
return nn.Dense(1, parent=self)(x)
class Foo(nn.Module):
@nn.compact
def __call__(self, x):
bar = Bar()
x = bar(x)
x = bar(x)
return x
msg = (r'Submodule Dense must be defined in `setup\(\)` or in a method '
'wrapped in `@compact`')
with self.assertRaisesRegex(errors.AssignSubModuleError, msg):
Foo().init(random.PRNGKey(0), jnp.ones((1, 3)))
def test_numpy_array_shape_class_args(self):
class MLP(nn.Module):
widths: Sequence
@nn.compact
def __call__(self, x):
for width in self.widths[:-1]:
x = nn.relu(nn.Dense(width)(x))
return nn.Dense(self.widths[-1])(x)
test = MLP(np.array([3, 3], np.int32))
params = test.init({'params': random.PRNGKey(42)}, jnp.ones((3, 3)))
_ = test.apply(params, jnp.ones((3, 3)))
def test_get_local_methods(self):
class Base:
@staticmethod
def bar(x):
return x
@classmethod
def baz(cls, x):
return x
def bleep(self, x):
return x
class Derived1(Base):
@staticmethod
def bar2(x):
return x
@classmethod
def baz2(cls, x):
return x
def bloop(self, x):
return x
class Derived2(Derived1):
pass
self.assertEqual(nn.module._get_local_method_names(Base), ('bleep',))
self.assertEqual(nn.module._get_local_method_names(Derived1), ('bloop',))
self.assertEqual(
nn.module._get_local_method_names(Derived1, exclude=('bloop',)), ())
self.assertEqual(nn.module._get_local_method_names(Derived2), ())
def test_inheritance_dataclass_attribs(self):
class Test(nn.Module):
bar: int
def __call__(self, x):
return x
class Test2(Test):
baz: int
def __call__(self, x):
return x
class Test3(Test):
baz: int
def __call__(self, x):
return x
class Test4(Test2):
def __call__(self, x):
return x
key = random.PRNGKey(0)
x = jnp.ones((5,))
test1 = Test(bar=4)
test2 = Test2(bar=4, baz=2)
test3 = Test3(bar=4, baz=2)
test4 = Test4(bar=5, baz=3)
self.assertEqual(test1.init_with_output(key, x), (x, freeze({})))
self.assertEqual(test2.init_with_output(key, x), (x, freeze({})))
self.assertEqual(test3.init_with_output(key, x), (x, freeze({})))
self.assertEqual(test4.init_with_output(key, x), (x, freeze({})))
self.assertTrue(hasattr(test1, 'bar'))
self.assertTrue(hasattr(test1, 'name'))
self.assertTrue(hasattr(test1, 'parent'))
self.assertTrue(hasattr(test2, 'bar'))
self.assertTrue(hasattr(test2, 'baz'))
self.assertTrue(hasattr(test2, 'name'))
self.assertTrue(hasattr(test2, 'parent'))
self.assertTrue(hasattr(test3, 'bar'))
self.assertTrue(hasattr(test3, 'baz'))
self.assertTrue(hasattr(test3, 'name'))
self.assertTrue(hasattr(test3, 'parent'))
self.assertTrue(hasattr(test4, 'bar'))
self.assertTrue(hasattr(test4, 'baz'))
self.assertTrue(hasattr(test4, 'name'))
self.assertTrue(hasattr(test4, 'parent'))
self.assertEqual(
list(Test.__dataclass_fields__.keys()),
['bar', 'parent', 'name'])
self.assertEqual(
list(Test2.__dataclass_fields__.keys()),
['bar', 'baz', 'parent', 'name'])
self.assertEqual(
list(Test3.__dataclass_fields__.keys()),
['bar', 'baz', 'parent', 'name'])
self.assertEqual(
list(Test4.__dataclass_fields__.keys()),
['bar', 'baz', 'parent', 'name'])
def test_get_suffix_value_pairs(self):
for x in [(), [], {}, None, 0, set()]:
self.assertEqual(
nn.module._get_suffix_value_pairs(x), [('', x)])
self.assertEqual(
nn.module._get_suffix_value_pairs(
{'a': 1, 'b': 2}), [('_a', 1), ('_b', 2)])
self.assertEqual(
nn.module._get_suffix_value_pairs(
[1, 2, 3]), [('_0', 1), ('_1', 2), ('_2', 3)])
x1 = [nn.Dense(10), nn.relu, nn.Dense(10)]
y1 = nn.module._get_suffix_value_pairs(x1)
self.assertEqual(y1, [('_0', x1[0]), ('_1', x1[1]), ('_2', x1[2])])
x2 = {'a': 1, 'b': {'c': nn.Dense(10), 'd': nn.relu}}
y2 = nn.module._get_suffix_value_pairs(x2)
self.assertEqual(y2,
[('_a', 1), ('_b_c', x2['b']['c']), ('_b_d', x2['b']['d'])])
def test_mixed_list_assignment_in_setup(self):
class Test(nn.Module):
def setup(self):
self.layers = [nn.Dense(10), nn.relu, nn.Dense(10)]
def __call__(self, x):
for lyr in self.layers:
x = lyr(x)
return x
x = random.uniform(random.PRNGKey(0), (5,5))
variables = Test().init(random.PRNGKey(0), jnp.ones((5,5)))
y = Test().apply(variables, x)
m0 = variables['params']['layers_0']['kernel']
m1 = variables['params']['layers_2']['kernel']
self.assertTrue(jnp.all(y == jnp.dot(nn.relu(jnp.dot(x, m0)), m1)))
def test_module_is_hashable(self):
module_a = nn.Dense(10)
module_a_2 = nn.Dense(10)
module_b = nn.Dense(5)
self.assertEqual(hash(module_a), hash(module_a_2))
self.assertNotEqual(hash(module_a), hash(module_b))
def test_module_custom_hash(self):
class Test(nn.Module):
x: int = 3
y: int = 5
def __hash__(self):
return 42 + self.x
module_a = Test(1, 2)
module_a_2 = Test(1, 5)
module_b = Test(2, 2)
self.assertEqual(hash(module_a), hash(module_a_2))
self.assertNotEqual(hash(module_a), hash(module_b))
def test_module_with_scope_is_not_hashable(self):
module_a = nn.Dense(10, parent=Scope({}))
msg = 'Can\'t call __hash__ on modules that hold variables.'
with self.assertRaisesWithLiteralMatch(TypeError, msg):
hash(module_a)
def test_module_trace(self):
class MLP(nn.Module):
act: Callable = nn.relu
sizes: Sequence[int] = (3, 2)
@nn.compact
def __call__(self, x):
for size in self.sizes:
x = nn.Dense(size)(x)
x = self.act(x)
return repr(self)
mlp = MLP()
expected_trace = (
"""MLP(
# attributes
act = relu
sizes = (3, 2)
# children
Dense_0 = Dense(
# attributes
features = 3
use_bias = True
dtype = float32
param_dtype = float32
precision = None
kernel_init = init
bias_init = zeros
)
Dense_1 = Dense(
# attributes
features = 2
use_bias = True
dtype = float32
param_dtype = float32
precision = None
kernel_init = init
bias_init = zeros
)
)""")
x = jnp.ones((1, 2))
trace, variables = mlp.init_with_output(random.PRNGKey(0), x)
self.assertEqual(trace, expected_trace)
trace = mlp.apply(variables, x)
self.assertEqual(trace, expected_trace)
def test_module_apply_method(self):
class Foo(nn.Module):
@nn.compact
def __call__(self):
pass
def test(self):
pass
# We can use both instance and class methods in apply.
Foo().apply({}, method=Foo.test)
Foo().apply({}, method=Foo().test)
# We also use a function that is not in the provided Module, although it
# should have a first argument representing an instance of the Module (Foo
# in this case).
x = Foo().apply({}, method=lambda foo_instance: foo_instance)
self.assertEqual(type(x), type(Foo()))
# This is not allowed.
msg = 'Cannot call apply()'
with self.assertRaisesRegex(errors.ApplyModuleInvalidMethodError, msg):
Foo().apply({}, method=lambda: True)
with self.assertRaisesRegex(errors.ApplyModuleInvalidMethodError, msg):
Foo().apply({}, method='allowed_apply_fn')
def test_call_unbound_compact_module_methods(self):
dense = Dense(3)
msg = r'Can\'t call compact methods on unbound modules'
with self.assertRaisesRegex(errors.CallCompactUnboundModuleError, msg):
dense(jnp.ones((1, )))
def test_call_unbound_has_variable(self):
class EmptyModule(nn.Module):
def foo(self):
self.has_variable('bar', 'baz')
empty = EmptyModule()
with self.assertRaisesRegex(ValueError, "variable.*unbound module"):
empty.foo()
def test_call_unbound_make_rng(self):
class EmptyModule(nn.Module):
def foo(self):
self.make_rng('bar')
empty = EmptyModule()
with self.assertRaisesRegex(ValueError, "RNGs.*unbound module"):
empty.foo()
def test_call_unbound_variables(self):
class EmptyModule(nn.Module):
def foo(self):
self.variables
empty = EmptyModule()
with self.assertRaisesRegex(ValueError, "variables.*unbound module"):
empty.foo()
def test_call_unbound_noncompact_module_methods(self):
class EmptyModule(nn.Module):
foo: int = 3
def bar(self):
return self.foo
empty = EmptyModule()
# It's fine to call methods of unbound methods that don't depend on
# attributes defined during `setup`.
self.assertEqual(empty.bar(), 3)
def test_call_unbound_noncompact_module_methods_depending_on_setup(self):
class EmptyModule(nn.Module):
def setup(self):
self.foo = 2
def bar(self):
return self.foo
empty = EmptyModule()
msg = r'"EmptyModule" object has no attribute "foo"'
with self.assertRaisesRegex(AttributeError, msg):
empty.bar()
def test_module_with_attrs(self):
class Foo(nn.Module):
bar: nn.Dense = dataclasses.field(init=False)
def setup(self):
self.bar = nn.Dense(3)
def __call__(self, x):
return self.bar(x)
foo = Foo()
x = jnp.ones((2,))
variables = foo.init(random.PRNGKey(0), x)
self.assertEqual(variables['params']['bar']['kernel'].shape, (2, 3))
def test_noncompact_module_frozen(self):
class Foo(nn.Module):
def setup(self):
self.i = 1 # This is allowed (for assigning submodules).
def __call__(self):
self.i = 2 # This is not allowed.
msg = ('Can\'t set i=2 for Module of type Foo: Module instance is frozen '
'outside of setup method.')
with self.assertRaisesRegex(errors.SetAttributeFrozenModuleError, msg):
Foo().init(random.PRNGKey(0))
def test_compact_module_frozen(self):
class Foo(nn.Module):
@nn.compact
def __call__(self):
self.i = 2
msg = ('Can\'t set i=2 for Module of type Foo: Module instance is frozen '
'outside of setup method.')
with self.assertRaisesRegex(errors.SetAttributeFrozenModuleError, msg):
Foo().init(random.PRNGKey(0))
def test_submodule_frozen(self):
class Foo(nn.Module):
@nn.compact
def __call__(self):
dense = nn.Dense(10)
dense.features = 20 # <--- This is not allowed
msg = ('Can\'t set features=20 for Module of type Dense: Module instance '
'is frozen outside of setup method.')
with self.assertRaisesRegex(errors.SetAttributeFrozenModuleError, msg):
Foo().init(random.PRNGKey(0))
def test_module_call_not_implemented(self):
class Foo(nn.Module):
pass
msg = '"Foo" object has no attribute "__call__"'
with self.assertRaisesRegex(AttributeError, msg):
Foo().init(random.PRNGKey(0))
def test_is_mutable_collection(self):
class EmptyModule(nn.Module):
def __call__(self):
return self.is_mutable_collection('test')
empty = EmptyModule()
self.assertTrue(empty.apply({}, mutable=['test'])[0])
self.assertFalse(empty.apply({}, mutable=False))
def test_module_lazy_getattr_setup(self):
class A(nn.Module):
def setup(self):
self.d = nn.Dense(2)
def __call__(self, x):
return self.d(x)
class B(nn.Module):
def setup(self):
self.a = A()
def __call__(self, x):
y1 = self.a.d(x)
y2 = self.a(x)
return y1, y2
key = random.PRNGKey(0)
x = jnp.ones((2,))
(y1, y2), p = B().init_with_output(key, x)
np.testing.assert_array_equal(y1, y2)
def test_module_lazy_dir_setup(self):
class A(nn.Module):
def setup(self):
self.d = nn.Dense(2)
def __call__(self, x):
return self.d(x)
class B(nn.Module):
def setup(self):
self.a = A()
def __call__(self, x):
assert 'd' in dir(self.a)
y1 = self.a.d(x)
y2 = self.a(x)
return y1, y2
key = random.PRNGKey(0)
x = jnp.ones((2,))
_ = B().init_with_output(key, x)
def test_module_unbound_getattr(self):
class A(nn.Module):
def setup(self):
b = B()
b.c # B is unbound because it is not yet assigned to an attribute.
self.b = b
def __call__(self):
pass
class B(nn.Module):
def setup(self):
self.c = nn.Dense(2)
msg = '"B" object has no attribute "c"'
with self.assertRaisesRegex(AttributeError, msg):
A().init(random.PRNGKey(0))
def test_unbound_setup_call(self):
setup_called = False
class A(nn.Module):
def setup(self):
nonlocal setup_called
setup_called = True
def test(self):
pass
A().test()
self.assertFalse(setup_called)
def test_module_pass_as_attr(self):
class A(nn.Module):
def setup(self):
self.b = B(nn.Dense(2))
def __call__(self, x):
return self.b(x)
class B(nn.Module):
foo: Any
def __call__(self, x):
return self.foo(x)
variables = A().init(random.PRNGKey(0), jnp.ones((1,)))
var_shapes = jax.tree_map(jnp.shape, variables)
ref_var_shapes = freeze({
'params': {
'b': {
'foo': {
'bias': (2,),
'kernel': (1, 2),
}
},
},
})
self.assertTrue(tree_equals(var_shapes, ref_var_shapes))
def test_module_pass_in_closure(self):
a = nn.Dense(2)
class B(nn.Module):
def setup(self):
self.foo = a
def __call__(self, x):
return self.foo(x)
variables = B().init(random.PRNGKey(0), jnp.ones((1,)))
var_shapes = jax.tree_map(jnp.shape, variables)
ref_var_shapes = freeze({
'params': {
'foo': {
'bias': (2,),
'kernel': (1, 2),
}
},
})
self.assertTrue(tree_equals(var_shapes, ref_var_shapes))
self.assertEqual(a.name, None)
def test_toplevel_submodule_adoption(self):
class Encoder(nn.Module):
n_layers: int
ch: int
def setup(self):
self.layers = [nn.Dense(self.ch) for _ in range(self.n_layers)]
def __call__(self, x):
for layer in self.layers:
x = layer(x)
x = nn.relu(x)
return x
class Model(nn.Module):
encoder: nn.Module
n_out: int
def setup(self):
self.dense_out = nn.Dense(self.n_out)
def __call__(self, x):
x = self.encoder(x)
return self.dense_out(x)
# Define model.
encoder = Encoder(n_layers=1, ch=8)
model = Model(encoder=encoder, n_out=5)
# Initialize.
key = jax.random.PRNGKey(0)
x = random.uniform(key, (4, 4))
variables = model.init(key, x)
y = model.apply(variables, x)
self.assertEqual(y.shape, (4, 5))
var_shapes = jax.tree_map(jnp.shape, variables)
ref_var_shapes = freeze({
'params': {
'dense_out': {
'bias': (5,),
'kernel': (8, 5),
},
'encoder': {
'layers_0': {
'bias': (8,),
'kernel': (4, 8),
},
},
},
})
self.assertTrue(tree_equals(var_shapes, ref_var_shapes))
def test_toplevel_submodule_adoption_pytree(self):
class A(nn.Module):
@nn.compact
def __call__(self, c, x):
counter = self.variable('counter', 'i', jnp.zeros, ())
counter.value += 1
x = nn.Dense(1)(x)
return c, x
class B(nn.Module):
A: Any
@nn.compact
def __call__(self, c, x):
return self.A['foo'](*self.A['bar'](c, x))
a = A()
As = {'foo': A(), 'bar': A()}
b = B(As)
key = random.PRNGKey(0)
x = jnp.ones((2, 2))
p = B(As).init(key, x, x)
print('apply', x.shape)
y, cntrs = b.apply(p, x, x, mutable='counter')
ref_cntrs = freeze({
'counter': {
'A_bar': {
'i': jnp.array(2.0),
},
'A_foo': {
'i': jnp.array(2.0),
},
},
})
self.assertTrue(jax.tree_util.tree_all(
jax.tree_multimap(
lambda x, y: np.testing.assert_allclose(x, y, atol=1e-7),
cntrs, ref_cntrs)
))
def test_toplevel_submodule_adoption_sharing(self):
dense = functools.partial(nn.Dense, use_bias=False)
class A(nn.Module):
@nn.compact
def __call__(self, x):
return dense(2)(x)
class B(nn.Module):
a: nn.Module
@nn.compact
def __call__(self, x):
return dense(2)(x) + self.a(x)
class C(nn.Module):
a: nn.Module
b: nn.Module
@nn.compact
def __call__(self, x):
return dense(2)(x) + self.b(x) + self.a(x)
key = random.PRNGKey(0)
x = jnp.ones((2, 2))
a = A()
b = B(a)
c = C(a, b)
p = c.init(key, x)
var_shapes = jax.tree_map(jnp.shape, p)
ref_var_shapes = freeze({
'params': {
'Dense_0': {
'kernel': (2, 2),
},
'a': {
'Dense_0': {
'kernel': (2, 2),
},
},
'b': {
'Dense_0': {
'kernel': (2, 2),
},
},
},
})
self.assertTrue(tree_equals(var_shapes, ref_var_shapes))
def test_toplevel_named_submodule_adoption(self):
dense = functools.partial(nn.Dense, use_bias=False)
class A(nn.Module):
def setup(self):
self.dense = dense(4)
def __call__(self, x):
return self.dense(x)
class B(nn.Module):
a: A
def setup(self):
self.proj = dense(6)
def __call__(self, x):
return self.proj(self.a(x))
a = A(name='foo')
b = B(a=a)
k = jax.random.PRNGKey(0)
x = jnp.zeros((5,5))
init_vars = b.init(k, x)
var_shapes = jax.tree_map(jnp.shape, init_vars)
ref_var_shapes = freeze({
'params': {
'a': {
'dense': {
'kernel': (5, 4),
},
},
'proj': {
'kernel': (4, 6),
},
},
})
self.assertTrue(tree_equals(var_shapes, ref_var_shapes))
def test_toplevel_submodule_pytree_adoption_sharing(self):
class A(nn.Module):
@nn.compact
def __call__(self, x):
counter = self.variable('counter', 'i', jnp.zeros, ())
counter.value += 1
x = nn.Dense(1)(x)
return x
class B(nn.Module):
A: Any
@nn.compact
def __call__(self, x):
return self.A['foo'](x) + self.A['bar'](x) + self.A['baz'](x)
key = random.PRNGKey(0)
x = jnp.ones((2, 2))
a = A()
As = {'foo': a, 'bar': a, 'baz': a}
b = B(As)
p = b.init(key, x)
_, cntrs = b.apply(p, x, mutable='counter')
ref_cntrs = freeze({
'counter': {
'A_bar': {
'i': jnp.array(6.0),
},
},
})
self.assertTrue(tree_equals(cntrs, ref_cntrs))
def test_inner_class_def(self):
class X(nn.Module):
class Hyper(struct.PyTreeNode):
a: int
hyper: Hyper
@nn.compact
def __call__(self, x):
return x+1
self.assertTrue(isinstance(X.Hyper(a=1), X.Hyper))
def test_sow(self):
class Foo(nn.Module):
@nn.compact
def __call__(self, x, **sow_args):
self.sow('intermediates', 'h', x, **sow_args)
self.sow('intermediates', 'h', 2 * x, **sow_args)
return 3 * x
variables = Foo().init(random.PRNGKey(0), 1)
# during init we should not collect intermediates by default...
self.assertTrue('intermediates' not in variables)
# ...unless we override mutable
variables = Foo().init(random.PRNGKey(0), 1, mutable=True)
self.assertEqual(variables, {
'intermediates': {'h': (1, 2)}
})
_, state = Foo().apply({}, 1, mutable=['intermediates'])
self.assertEqual(state, {
'intermediates': {'h': (1, 2)}
})
_, state = Foo().apply(
{}, 1,
init_fn=lambda: 0,
reduce_fn=lambda a, b: a + b,
mutable=['intermediates'])
self.assertEqual(state, {
'intermediates': {'h': 3}
})
self.assertEqual(Foo().apply({}, 1), 3)
def test_capture_intermediates(self):
class Bar(nn.Module):
def test(self, x):
return x + 1
class Foo(nn.Module):
@nn.compact
def __call__(self, x):
return Bar().test(x) + 1
_, state = Foo().apply({}, 1, capture_intermediates=True)
self.assertEqual(state, {
'intermediates': {'__call__': (3,)}
})
fn = lambda mdl, _: isinstance(mdl, Bar)
_, state = Foo().apply({}, 1, capture_intermediates=fn)
self.assertEqual(state, {
'intermediates': {'Bar_0': {'test': (2,)}}
})
def test_functional_apply(self):
class Foo(nn.Module):
def setup(self):
self.a = nn.Dense(3)
self.b = nn.Dense(1)
def f(foo, x):
x = foo.a(x)
return foo.b(x)
foo = Foo()
x = jnp.ones((4,))
f_init = nn.init_with_output(f, foo)
f_apply = nn.apply(f, foo)
y1, variables = f_init(random.PRNGKey(0), x)
y2 = f_apply(variables, x)
self.assertEqual(y1, y2)
def test_bind(self):
class Foo(nn.Module):
def setup(self):
self.a = nn.Dense(3)
self.b = nn.Dense(1)
def f(foo, x):
x = foo.a(x)
return foo.b(x)
foo = Foo()
x = jnp.ones((4,))
f_init = nn.init_with_output(f, foo)
y1, variables = f_init(random.PRNGKey(0), x)
y2 = f(foo.bind(variables), x)
self.assertEqual(y1, y2)
def test_bind_stateful(self):
class Foo(nn.Module):
def setup(self):
self.a = nn.Dense(3)
self.bn = nn.BatchNorm()
self.b = nn.Dense(1)
def f(foo, x):
x = foo.a(x)
x = foo.bn(x, use_running_average=False)
return foo.b(x)
foo = Foo()
x = jnp.ones((4,))
f_init = nn.init_with_output(f, foo)
y1, variables = f_init(random.PRNGKey(0), x)
foo_b = foo.bind(variables, mutable='batch_stats')
y2 = f(foo_b, x)
y3, new_state = nn.apply(f, foo, mutable='batch_stats')(variables, x)
self.assertEqual(y1, y2)
self.assertEqual(y2, y3)
bs_1 = new_state['batch_stats']
bs_2 = foo_b.variables['batch_stats']
for x, y in zip(jax.tree_leaves(bs_1), jax.tree_leaves(bs_2)):
np.testing.assert_allclose(x, y)
def test_passing_mutable_variables(self):
class Foo(nn.Module):
@nn.compact
def __call__(self, x):
return nn.Dense(2)(x)
x = jnp.ones((3,))
variables = Foo().init(random.PRNGKey(0), x)
variables = variables.unfreeze()
y = Foo().apply(variables, x)
self.assertEqual(y.shape, (2,))
def test_super_compact(self):
class Foo(nn.Module):
@nn.compact
def __call__(self, x):
return nn.Dense(4)(x)
class Bar(Foo):
@nn.compact
def __call__(self, x):
y = super().__call__(x)
return nn.Dense(3)(y)
k = random.PRNGKey(0)
x = jnp.ones((4, 7))
variables = Bar().init(k, x)
shapes = jax.tree_map(np.shape, variables['params'])
self.assertEqual(shapes, {
'Dense_0': {'kernel': (7, 4), 'bias': (4,)},
'Dense_1': {'kernel': (4, 3), 'bias': (3,)},
})
y = Bar().apply(variables, x)
self.assertEqual(y.shape, (4, 3))
def test_super_setup(self):
class Foo(nn.Module):
def setup(self):
self.a = nn.Dense(4)
class Bar(Foo):
def setup(self):
super().setup()
self.b = nn.Dense(3)
def __call__(self, x):
y = self.a(x)
return self.b(y)
k = random.PRNGKey(0)
x = jnp.ones((4, 7))
variables = Bar().init(k, x)
y = Bar().apply(variables, x)
self.assertEqual(y.shape, (4, 3))
def test_freeze_attr(self):
class Foo(NamedTuple):
a: int
b: int
self.assertEqual(nn.module._freeze_attr([1, 2]), (1, 2))
xs = nn.module._freeze_attr(Foo(1, 2))
self.assertEqual(xs, (1, 2))
self.assertEqual(type(xs), Foo) # equality test for NamedTuple doesn't check class!
def test_generic_multiple_inheritance(self):
T = TypeVar('T')
class MyComponent(nn.Module, Generic[T]):
pass
class MyModule(nn.Module):
submodule: MyComponent[jnp.ndarray]
class MyComponent2(Generic[T], nn.Module):
pass
class MyModule2(nn.Module):
submodule: MyComponent2[jnp.ndarray]
def test_named_call_rng_equivalance(self):
model = nn.Dense(1, use_bias=False)
with override_named_call(False):
param = model.init(random.PRNGKey(0), np.ones((1, 1)))["params"]["kernel"]
with override_named_call(True):
param_2 = model.init(random.PRNGKey(0), np.ones((1, 1)))["params"]["kernel"]
self.assertEqual(param, param_2)
def test_rng_reuse_after_rewind(self):
class C(nn.Module):
@nn.compact
def __call__(self):
# Some module that has dropouts in it, in general,
# it does more than just dropout!
return self.make_rng('dropout')
class A(nn.Module):
@nn.compact
def __call__(self):
# Some module that has dropouts in it, in general,
# it does more than just dropout!
return C()()
class B(nn.Module):
@nn.compact
def __call__(self):
a = A()
x0 = a()
x1 = a()
return jnp.alltrue(x0 == x1)
k = random.PRNGKey(0)
rng_equals = B().apply({}, rngs={'dropout': k})
self.assertFalse(rng_equals)
def test_module_get_put_has_variable(self):
class A(nn.Module):
@nn.compact
def __call__(self, x):
self.put_variable('test_col', 'a', x)
assert self.has_variable('test_col', 'a')
return self.get_variable('test_col', 'a')
class B(nn.Module):
def __call__(self, x):
self.put_variable('test_col', 'a', x)
assert self.has_variable('test_col', 'a')
return self.get_variable('test_col', 'a')
class C(nn.Module):
def setup(self):
self.put_variable('test_col', 'a', jnp.ones(2,))
assert self.has_variable('test_col', 'a')
def __call__(self):
return self.get_variable('test_col', 'a')
key = random.PRNGKey(0)
x = jnp.ones((2,))
y, vs = A().apply({}, x, mutable=['test_col'])
np.testing.assert_array_equal(x, y)
np.testing.assert_array_equal(x, vs['test_col']['a'])
y, vs = B().apply({}, x, mutable=['test_col'])
np.testing.assert_array_equal(x, y)
np.testing.assert_array_equal(x, vs['test_col']['a'])
y, vs = C().apply({}, mutable=['test_col'])
np.testing.assert_array_equal(y, jnp.ones((2,)))
np.testing.assert_array_equal(y, vs['test_col']['a'])
def test_generic_module(self):
# See https://github.com/google/flax/issues/1899
T = TypeVar('T')
class C(nn.Module, Generic[T]):
def f(self, t: T) -> T:
return t
class D(nn.Module):
def setup(self):
c = C[Any]()
def __call__(self) -> None:
pass
rngs = {}
D().init(rngs)
def test_modifying_attribs_in_post_init(self):
class Foo(nn.Module):
love: int = 99
def __post_init__(self):
self.hate = 100 - self.love
super().__post_init__()
foo = Foo()
self.assertEqual(foo.love, 99)
self.assertEqual(foo.hate, 1)
class Bar(nn.Module):
love: int = 99
def __post_init__(self):
self.love = 101
super().__post_init__()
bar = Bar()
self.assertEqual(bar.love, 101)
def test_has_rng(self):
class Foo(nn.Module):
def __call__(self):
return self.has_rng('bar')
foo = Foo()
with self.assertRaisesRegex(ValueError, "RNGs.*unbound module"):
foo()
k = random.PRNGKey(0)
self.assertTrue(foo.apply({}, rngs={'bar': k}))
self.assertFalse(foo.apply({}, rngs={'baz': k}))
if __name__ == '__main__':
absltest.main()
| |
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio/grpc_core_dependencies.py.template`!!!
CORE_SOURCE_FILES = [
'src/core/lib/profiling/basic_timers.c',
'src/core/lib/profiling/stap_timers.c',
'src/core/lib/support/alloc.c',
'src/core/lib/support/arena.c',
'src/core/lib/support/atm.c',
'src/core/lib/support/avl.c',
'src/core/lib/support/backoff.c',
'src/core/lib/support/cmdline.c',
'src/core/lib/support/cpu_iphone.c',
'src/core/lib/support/cpu_linux.c',
'src/core/lib/support/cpu_posix.c',
'src/core/lib/support/cpu_windows.c',
'src/core/lib/support/env_linux.c',
'src/core/lib/support/env_posix.c',
'src/core/lib/support/env_windows.c',
'src/core/lib/support/histogram.c',
'src/core/lib/support/host_port.c',
'src/core/lib/support/log.c',
'src/core/lib/support/log_android.c',
'src/core/lib/support/log_linux.c',
'src/core/lib/support/log_posix.c',
'src/core/lib/support/log_windows.c',
'src/core/lib/support/mpscq.c',
'src/core/lib/support/murmur_hash.c',
'src/core/lib/support/stack_lockfree.c',
'src/core/lib/support/string.c',
'src/core/lib/support/string_posix.c',
'src/core/lib/support/string_util_windows.c',
'src/core/lib/support/string_windows.c',
'src/core/lib/support/subprocess_posix.c',
'src/core/lib/support/subprocess_windows.c',
'src/core/lib/support/sync.c',
'src/core/lib/support/sync_posix.c',
'src/core/lib/support/sync_windows.c',
'src/core/lib/support/thd.c',
'src/core/lib/support/thd_posix.c',
'src/core/lib/support/thd_windows.c',
'src/core/lib/support/time.c',
'src/core/lib/support/time_posix.c',
'src/core/lib/support/time_precise.c',
'src/core/lib/support/time_windows.c',
'src/core/lib/support/tls_pthread.c',
'src/core/lib/support/tmpfile_msys.c',
'src/core/lib/support/tmpfile_posix.c',
'src/core/lib/support/tmpfile_windows.c',
'src/core/lib/support/wrap_memcpy.c',
'src/core/lib/surface/init.c',
'src/core/lib/channel/channel_args.c',
'src/core/lib/channel/channel_stack.c',
'src/core/lib/channel/channel_stack_builder.c',
'src/core/lib/channel/compress_filter.c',
'src/core/lib/channel/connected_channel.c',
'src/core/lib/channel/deadline_filter.c',
'src/core/lib/channel/handshaker.c',
'src/core/lib/channel/handshaker_factory.c',
'src/core/lib/channel/handshaker_registry.c',
'src/core/lib/channel/http_client_filter.c',
'src/core/lib/channel/http_server_filter.c',
'src/core/lib/channel/max_age_filter.c',
'src/core/lib/channel/message_size_filter.c',
'src/core/lib/compression/compression.c',
'src/core/lib/compression/message_compress.c',
'src/core/lib/debug/trace.c',
'src/core/lib/http/format_request.c',
'src/core/lib/http/httpcli.c',
'src/core/lib/http/parser.c',
'src/core/lib/iomgr/closure.c',
'src/core/lib/iomgr/combiner.c',
'src/core/lib/iomgr/endpoint.c',
'src/core/lib/iomgr/endpoint_pair_posix.c',
'src/core/lib/iomgr/endpoint_pair_uv.c',
'src/core/lib/iomgr/endpoint_pair_windows.c',
'src/core/lib/iomgr/error.c',
'src/core/lib/iomgr/ev_epoll_linux.c',
'src/core/lib/iomgr/ev_poll_posix.c',
'src/core/lib/iomgr/ev_posix.c',
'src/core/lib/iomgr/exec_ctx.c',
'src/core/lib/iomgr/executor.c',
'src/core/lib/iomgr/iocp_windows.c',
'src/core/lib/iomgr/iomgr.c',
'src/core/lib/iomgr/iomgr_posix.c',
'src/core/lib/iomgr/iomgr_uv.c',
'src/core/lib/iomgr/iomgr_windows.c',
'src/core/lib/iomgr/load_file.c',
'src/core/lib/iomgr/network_status_tracker.c',
'src/core/lib/iomgr/polling_entity.c',
'src/core/lib/iomgr/pollset_set_uv.c',
'src/core/lib/iomgr/pollset_set_windows.c',
'src/core/lib/iomgr/pollset_uv.c',
'src/core/lib/iomgr/pollset_windows.c',
'src/core/lib/iomgr/resolve_address_posix.c',
'src/core/lib/iomgr/resolve_address_uv.c',
'src/core/lib/iomgr/resolve_address_windows.c',
'src/core/lib/iomgr/resource_quota.c',
'src/core/lib/iomgr/sockaddr_utils.c',
'src/core/lib/iomgr/socket_factory_posix.c',
'src/core/lib/iomgr/socket_mutator.c',
'src/core/lib/iomgr/socket_utils_common_posix.c',
'src/core/lib/iomgr/socket_utils_linux.c',
'src/core/lib/iomgr/socket_utils_posix.c',
'src/core/lib/iomgr/socket_utils_uv.c',
'src/core/lib/iomgr/socket_utils_windows.c',
'src/core/lib/iomgr/socket_windows.c',
'src/core/lib/iomgr/tcp_client_posix.c',
'src/core/lib/iomgr/tcp_client_uv.c',
'src/core/lib/iomgr/tcp_client_windows.c',
'src/core/lib/iomgr/tcp_posix.c',
'src/core/lib/iomgr/tcp_server_posix.c',
'src/core/lib/iomgr/tcp_server_utils_posix_common.c',
'src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.c',
'src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.c',
'src/core/lib/iomgr/tcp_server_uv.c',
'src/core/lib/iomgr/tcp_server_windows.c',
'src/core/lib/iomgr/tcp_uv.c',
'src/core/lib/iomgr/tcp_windows.c',
'src/core/lib/iomgr/time_averaged_stats.c',
'src/core/lib/iomgr/timer_generic.c',
'src/core/lib/iomgr/timer_heap.c',
'src/core/lib/iomgr/timer_uv.c',
'src/core/lib/iomgr/udp_server.c',
'src/core/lib/iomgr/unix_sockets_posix.c',
'src/core/lib/iomgr/unix_sockets_posix_noop.c',
'src/core/lib/iomgr/wakeup_fd_cv.c',
'src/core/lib/iomgr/wakeup_fd_eventfd.c',
'src/core/lib/iomgr/wakeup_fd_nospecial.c',
'src/core/lib/iomgr/wakeup_fd_pipe.c',
'src/core/lib/iomgr/wakeup_fd_posix.c',
'src/core/lib/iomgr/workqueue_uv.c',
'src/core/lib/iomgr/workqueue_windows.c',
'src/core/lib/json/json.c',
'src/core/lib/json/json_reader.c',
'src/core/lib/json/json_string.c',
'src/core/lib/json/json_writer.c',
'src/core/lib/slice/b64.c',
'src/core/lib/slice/percent_encoding.c',
'src/core/lib/slice/slice.c',
'src/core/lib/slice/slice_buffer.c',
'src/core/lib/slice/slice_hash_table.c',
'src/core/lib/slice/slice_intern.c',
'src/core/lib/slice/slice_string_helpers.c',
'src/core/lib/surface/alarm.c',
'src/core/lib/surface/api_trace.c',
'src/core/lib/surface/byte_buffer.c',
'src/core/lib/surface/byte_buffer_reader.c',
'src/core/lib/surface/call.c',
'src/core/lib/surface/call_details.c',
'src/core/lib/surface/call_log_batch.c',
'src/core/lib/surface/channel.c',
'src/core/lib/surface/channel_init.c',
'src/core/lib/surface/channel_ping.c',
'src/core/lib/surface/channel_stack_type.c',
'src/core/lib/surface/completion_queue.c',
'src/core/lib/surface/completion_queue_factory.c',
'src/core/lib/surface/event_string.c',
'src/core/lib/surface/lame_client.c',
'src/core/lib/surface/metadata_array.c',
'src/core/lib/surface/server.c',
'src/core/lib/surface/validate_metadata.c',
'src/core/lib/surface/version.c',
'src/core/lib/transport/bdp_estimator.c',
'src/core/lib/transport/byte_stream.c',
'src/core/lib/transport/connectivity_state.c',
'src/core/lib/transport/error_utils.c',
'src/core/lib/transport/metadata.c',
'src/core/lib/transport/metadata_batch.c',
'src/core/lib/transport/pid_controller.c',
'src/core/lib/transport/service_config.c',
'src/core/lib/transport/static_metadata.c',
'src/core/lib/transport/status_conversion.c',
'src/core/lib/transport/timeout_encoding.c',
'src/core/lib/transport/transport.c',
'src/core/lib/transport/transport_op_string.c',
'src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c',
'src/core/ext/transport/chttp2/transport/bin_decoder.c',
'src/core/ext/transport/chttp2/transport/bin_encoder.c',
'src/core/ext/transport/chttp2/transport/chttp2_plugin.c',
'src/core/ext/transport/chttp2/transport/chttp2_transport.c',
'src/core/ext/transport/chttp2/transport/frame_data.c',
'src/core/ext/transport/chttp2/transport/frame_goaway.c',
'src/core/ext/transport/chttp2/transport/frame_ping.c',
'src/core/ext/transport/chttp2/transport/frame_rst_stream.c',
'src/core/ext/transport/chttp2/transport/frame_settings.c',
'src/core/ext/transport/chttp2/transport/frame_window_update.c',
'src/core/ext/transport/chttp2/transport/hpack_encoder.c',
'src/core/ext/transport/chttp2/transport/hpack_parser.c',
'src/core/ext/transport/chttp2/transport/hpack_table.c',
'src/core/ext/transport/chttp2/transport/huffsyms.c',
'src/core/ext/transport/chttp2/transport/incoming_metadata.c',
'src/core/ext/transport/chttp2/transport/parsing.c',
'src/core/ext/transport/chttp2/transport/stream_lists.c',
'src/core/ext/transport/chttp2/transport/stream_map.c',
'src/core/ext/transport/chttp2/transport/varint.c',
'src/core/ext/transport/chttp2/transport/writing.c',
'src/core/ext/transport/chttp2/alpn/alpn.c',
'src/core/lib/http/httpcli_security_connector.c',
'src/core/lib/security/context/security_context.c',
'src/core/lib/security/credentials/composite/composite_credentials.c',
'src/core/lib/security/credentials/credentials.c',
'src/core/lib/security/credentials/credentials_metadata.c',
'src/core/lib/security/credentials/fake/fake_credentials.c',
'src/core/lib/security/credentials/google_default/credentials_generic.c',
'src/core/lib/security/credentials/google_default/google_default_credentials.c',
'src/core/lib/security/credentials/iam/iam_credentials.c',
'src/core/lib/security/credentials/jwt/json_token.c',
'src/core/lib/security/credentials/jwt/jwt_credentials.c',
'src/core/lib/security/credentials/jwt/jwt_verifier.c',
'src/core/lib/security/credentials/oauth2/oauth2_credentials.c',
'src/core/lib/security/credentials/plugin/plugin_credentials.c',
'src/core/lib/security/credentials/ssl/ssl_credentials.c',
'src/core/lib/security/transport/client_auth_filter.c',
'src/core/lib/security/transport/lb_targets_info.c',
'src/core/lib/security/transport/secure_endpoint.c',
'src/core/lib/security/transport/security_connector.c',
'src/core/lib/security/transport/security_handshaker.c',
'src/core/lib/security/transport/server_auth_filter.c',
'src/core/lib/security/transport/tsi_error.c',
'src/core/lib/security/util/json_util.c',
'src/core/lib/surface/init_secure.c',
'src/core/tsi/fake_transport_security.c',
'src/core/tsi/ssl_transport_security.c',
'src/core/tsi/transport_security.c',
'src/core/ext/transport/chttp2/server/chttp2_server.c',
'src/core/ext/transport/chttp2/client/secure/secure_channel_create.c',
'src/core/ext/client_channel/channel_connectivity.c',
'src/core/ext/client_channel/client_channel.c',
'src/core/ext/client_channel/client_channel_factory.c',
'src/core/ext/client_channel/client_channel_plugin.c',
'src/core/ext/client_channel/connector.c',
'src/core/ext/client_channel/http_connect_handshaker.c',
'src/core/ext/client_channel/http_proxy.c',
'src/core/ext/client_channel/lb_policy.c',
'src/core/ext/client_channel/lb_policy_factory.c',
'src/core/ext/client_channel/lb_policy_registry.c',
'src/core/ext/client_channel/parse_address.c',
'src/core/ext/client_channel/proxy_mapper.c',
'src/core/ext/client_channel/proxy_mapper_registry.c',
'src/core/ext/client_channel/resolver.c',
'src/core/ext/client_channel/resolver_factory.c',
'src/core/ext/client_channel/resolver_registry.c',
'src/core/ext/client_channel/retry_throttle.c',
'src/core/ext/client_channel/subchannel.c',
'src/core/ext/client_channel/subchannel_index.c',
'src/core/ext/client_channel/uri_parser.c',
'src/core/ext/transport/chttp2/client/chttp2_connector.c',
'src/core/ext/transport/chttp2/server/insecure/server_chttp2.c',
'src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c',
'src/core/ext/transport/chttp2/client/insecure/channel_create.c',
'src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c',
'src/core/ext/lb_policy/grpclb/grpclb.c',
'src/core/ext/lb_policy/grpclb/grpclb_channel_secure.c',
'src/core/ext/lb_policy/grpclb/load_balancer_api.c',
'src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c',
'third_party/nanopb/pb_common.c',
'third_party/nanopb/pb_decode.c',
'third_party/nanopb/pb_encode.c',
'src/core/ext/lb_policy/pick_first/pick_first.c',
'src/core/ext/lb_policy/round_robin/round_robin.c',
'src/core/ext/resolver/dns/c_ares/dns_resolver_ares.c',
'src/core/ext/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c',
'src/core/ext/resolver/dns/c_ares/grpc_ares_wrapper.c',
'src/core/ext/resolver/dns/native/dns_resolver.c',
'src/core/ext/resolver/sockaddr/sockaddr_resolver.c',
'src/core/ext/load_reporting/load_reporting.c',
'src/core/ext/load_reporting/load_reporting_filter.c',
'src/core/ext/census/base_resources.c',
'src/core/ext/census/context.c',
'src/core/ext/census/gen/census.pb.c',
'src/core/ext/census/gen/trace_context.pb.c',
'src/core/ext/census/grpc_context.c',
'src/core/ext/census/grpc_filter.c',
'src/core/ext/census/grpc_plugin.c',
'src/core/ext/census/initialize.c',
'src/core/ext/census/mlog.c',
'src/core/ext/census/operation.c',
'src/core/ext/census/placeholders.c',
'src/core/ext/census/resource.c',
'src/core/ext/census/trace_context.c',
'src/core/ext/census/tracing.c',
'src/core/plugin_registry/grpc_plugin_registry.c',
'src/boringssl/err_data.c',
'third_party/boringssl/crypto/aes/aes.c',
'third_party/boringssl/crypto/aes/mode_wrappers.c',
'third_party/boringssl/crypto/asn1/a_bitstr.c',
'third_party/boringssl/crypto/asn1/a_bool.c',
'third_party/boringssl/crypto/asn1/a_d2i_fp.c',
'third_party/boringssl/crypto/asn1/a_dup.c',
'third_party/boringssl/crypto/asn1/a_enum.c',
'third_party/boringssl/crypto/asn1/a_gentm.c',
'third_party/boringssl/crypto/asn1/a_i2d_fp.c',
'third_party/boringssl/crypto/asn1/a_int.c',
'third_party/boringssl/crypto/asn1/a_mbstr.c',
'third_party/boringssl/crypto/asn1/a_object.c',
'third_party/boringssl/crypto/asn1/a_octet.c',
'third_party/boringssl/crypto/asn1/a_print.c',
'third_party/boringssl/crypto/asn1/a_strnid.c',
'third_party/boringssl/crypto/asn1/a_time.c',
'third_party/boringssl/crypto/asn1/a_type.c',
'third_party/boringssl/crypto/asn1/a_utctm.c',
'third_party/boringssl/crypto/asn1/a_utf8.c',
'third_party/boringssl/crypto/asn1/asn1_lib.c',
'third_party/boringssl/crypto/asn1/asn1_par.c',
'third_party/boringssl/crypto/asn1/asn_pack.c',
'third_party/boringssl/crypto/asn1/f_enum.c',
'third_party/boringssl/crypto/asn1/f_int.c',
'third_party/boringssl/crypto/asn1/f_string.c',
'third_party/boringssl/crypto/asn1/t_bitst.c',
'third_party/boringssl/crypto/asn1/tasn_dec.c',
'third_party/boringssl/crypto/asn1/tasn_enc.c',
'third_party/boringssl/crypto/asn1/tasn_fre.c',
'third_party/boringssl/crypto/asn1/tasn_new.c',
'third_party/boringssl/crypto/asn1/tasn_typ.c',
'third_party/boringssl/crypto/asn1/tasn_utl.c',
'third_party/boringssl/crypto/asn1/x_bignum.c',
'third_party/boringssl/crypto/asn1/x_long.c',
'third_party/boringssl/crypto/base64/base64.c',
'third_party/boringssl/crypto/bio/bio.c',
'third_party/boringssl/crypto/bio/bio_mem.c',
'third_party/boringssl/crypto/bio/buffer.c',
'third_party/boringssl/crypto/bio/connect.c',
'third_party/boringssl/crypto/bio/fd.c',
'third_party/boringssl/crypto/bio/file.c',
'third_party/boringssl/crypto/bio/hexdump.c',
'third_party/boringssl/crypto/bio/pair.c',
'third_party/boringssl/crypto/bio/printf.c',
'third_party/boringssl/crypto/bio/socket.c',
'third_party/boringssl/crypto/bio/socket_helper.c',
'third_party/boringssl/crypto/bn/add.c',
'third_party/boringssl/crypto/bn/asm/x86_64-gcc.c',
'third_party/boringssl/crypto/bn/bn.c',
'third_party/boringssl/crypto/bn/bn_asn1.c',
'third_party/boringssl/crypto/bn/cmp.c',
'third_party/boringssl/crypto/bn/convert.c',
'third_party/boringssl/crypto/bn/ctx.c',
'third_party/boringssl/crypto/bn/div.c',
'third_party/boringssl/crypto/bn/exponentiation.c',
'third_party/boringssl/crypto/bn/gcd.c',
'third_party/boringssl/crypto/bn/generic.c',
'third_party/boringssl/crypto/bn/kronecker.c',
'third_party/boringssl/crypto/bn/montgomery.c',
'third_party/boringssl/crypto/bn/montgomery_inv.c',
'third_party/boringssl/crypto/bn/mul.c',
'third_party/boringssl/crypto/bn/prime.c',
'third_party/boringssl/crypto/bn/random.c',
'third_party/boringssl/crypto/bn/rsaz_exp.c',
'third_party/boringssl/crypto/bn/shift.c',
'third_party/boringssl/crypto/bn/sqrt.c',
'third_party/boringssl/crypto/buf/buf.c',
'third_party/boringssl/crypto/bytestring/asn1_compat.c',
'third_party/boringssl/crypto/bytestring/ber.c',
'third_party/boringssl/crypto/bytestring/cbb.c',
'third_party/boringssl/crypto/bytestring/cbs.c',
'third_party/boringssl/crypto/chacha/chacha.c',
'third_party/boringssl/crypto/cipher/aead.c',
'third_party/boringssl/crypto/cipher/cipher.c',
'third_party/boringssl/crypto/cipher/derive_key.c',
'third_party/boringssl/crypto/cipher/e_aes.c',
'third_party/boringssl/crypto/cipher/e_chacha20poly1305.c',
'third_party/boringssl/crypto/cipher/e_des.c',
'third_party/boringssl/crypto/cipher/e_null.c',
'third_party/boringssl/crypto/cipher/e_rc2.c',
'third_party/boringssl/crypto/cipher/e_rc4.c',
'third_party/boringssl/crypto/cipher/e_ssl3.c',
'third_party/boringssl/crypto/cipher/e_tls.c',
'third_party/boringssl/crypto/cipher/tls_cbc.c',
'third_party/boringssl/crypto/cmac/cmac.c',
'third_party/boringssl/crypto/conf/conf.c',
'third_party/boringssl/crypto/cpu-aarch64-linux.c',
'third_party/boringssl/crypto/cpu-arm-linux.c',
'third_party/boringssl/crypto/cpu-arm.c',
'third_party/boringssl/crypto/cpu-intel.c',
'third_party/boringssl/crypto/cpu-ppc64le.c',
'third_party/boringssl/crypto/crypto.c',
'third_party/boringssl/crypto/curve25519/curve25519.c',
'third_party/boringssl/crypto/curve25519/spake25519.c',
'third_party/boringssl/crypto/curve25519/x25519-x86_64.c',
'third_party/boringssl/crypto/des/des.c',
'third_party/boringssl/crypto/dh/check.c',
'third_party/boringssl/crypto/dh/dh.c',
'third_party/boringssl/crypto/dh/dh_asn1.c',
'third_party/boringssl/crypto/dh/params.c',
'third_party/boringssl/crypto/digest/digest.c',
'third_party/boringssl/crypto/digest/digests.c',
'third_party/boringssl/crypto/dsa/dsa.c',
'third_party/boringssl/crypto/dsa/dsa_asn1.c',
'third_party/boringssl/crypto/ec/ec.c',
'third_party/boringssl/crypto/ec/ec_asn1.c',
'third_party/boringssl/crypto/ec/ec_key.c',
'third_party/boringssl/crypto/ec/ec_montgomery.c',
'third_party/boringssl/crypto/ec/oct.c',
'third_party/boringssl/crypto/ec/p224-64.c',
'third_party/boringssl/crypto/ec/p256-64.c',
'third_party/boringssl/crypto/ec/p256-x86_64.c',
'third_party/boringssl/crypto/ec/simple.c',
'third_party/boringssl/crypto/ec/util-64.c',
'third_party/boringssl/crypto/ec/wnaf.c',
'third_party/boringssl/crypto/ecdh/ecdh.c',
'third_party/boringssl/crypto/ecdsa/ecdsa.c',
'third_party/boringssl/crypto/ecdsa/ecdsa_asn1.c',
'third_party/boringssl/crypto/engine/engine.c',
'third_party/boringssl/crypto/err/err.c',
'third_party/boringssl/crypto/evp/digestsign.c',
'third_party/boringssl/crypto/evp/evp.c',
'third_party/boringssl/crypto/evp/evp_asn1.c',
'third_party/boringssl/crypto/evp/evp_ctx.c',
'third_party/boringssl/crypto/evp/p_dsa_asn1.c',
'third_party/boringssl/crypto/evp/p_ec.c',
'third_party/boringssl/crypto/evp/p_ec_asn1.c',
'third_party/boringssl/crypto/evp/p_rsa.c',
'third_party/boringssl/crypto/evp/p_rsa_asn1.c',
'third_party/boringssl/crypto/evp/pbkdf.c',
'third_party/boringssl/crypto/evp/print.c',
'third_party/boringssl/crypto/evp/sign.c',
'third_party/boringssl/crypto/ex_data.c',
'third_party/boringssl/crypto/hkdf/hkdf.c',
'third_party/boringssl/crypto/hmac/hmac.c',
'third_party/boringssl/crypto/lhash/lhash.c',
'third_party/boringssl/crypto/md4/md4.c',
'third_party/boringssl/crypto/md5/md5.c',
'third_party/boringssl/crypto/mem.c',
'third_party/boringssl/crypto/modes/cbc.c',
'third_party/boringssl/crypto/modes/cfb.c',
'third_party/boringssl/crypto/modes/ctr.c',
'third_party/boringssl/crypto/modes/gcm.c',
'third_party/boringssl/crypto/modes/ofb.c',
'third_party/boringssl/crypto/newhope/error_correction.c',
'third_party/boringssl/crypto/newhope/newhope.c',
'third_party/boringssl/crypto/newhope/ntt.c',
'third_party/boringssl/crypto/newhope/poly.c',
'third_party/boringssl/crypto/newhope/precomp.c',
'third_party/boringssl/crypto/newhope/reduce.c',
'third_party/boringssl/crypto/obj/obj.c',
'third_party/boringssl/crypto/obj/obj_xref.c',
'third_party/boringssl/crypto/pem/pem_all.c',
'third_party/boringssl/crypto/pem/pem_info.c',
'third_party/boringssl/crypto/pem/pem_lib.c',
'third_party/boringssl/crypto/pem/pem_oth.c',
'third_party/boringssl/crypto/pem/pem_pk8.c',
'third_party/boringssl/crypto/pem/pem_pkey.c',
'third_party/boringssl/crypto/pem/pem_x509.c',
'third_party/boringssl/crypto/pem/pem_xaux.c',
'third_party/boringssl/crypto/pkcs8/p5_pbe.c',
'third_party/boringssl/crypto/pkcs8/p5_pbev2.c',
'third_party/boringssl/crypto/pkcs8/p8_pkey.c',
'third_party/boringssl/crypto/pkcs8/pkcs8.c',
'third_party/boringssl/crypto/poly1305/poly1305.c',
'third_party/boringssl/crypto/poly1305/poly1305_arm.c',
'third_party/boringssl/crypto/poly1305/poly1305_vec.c',
'third_party/boringssl/crypto/rand/deterministic.c',
'third_party/boringssl/crypto/rand/rand.c',
'third_party/boringssl/crypto/rand/urandom.c',
'third_party/boringssl/crypto/rand/windows.c',
'third_party/boringssl/crypto/rc4/rc4.c',
'third_party/boringssl/crypto/refcount_c11.c',
'third_party/boringssl/crypto/refcount_lock.c',
'third_party/boringssl/crypto/rsa/blinding.c',
'third_party/boringssl/crypto/rsa/padding.c',
'third_party/boringssl/crypto/rsa/rsa.c',
'third_party/boringssl/crypto/rsa/rsa_asn1.c',
'third_party/boringssl/crypto/rsa/rsa_impl.c',
'third_party/boringssl/crypto/sha/sha1.c',
'third_party/boringssl/crypto/sha/sha256.c',
'third_party/boringssl/crypto/sha/sha512.c',
'third_party/boringssl/crypto/stack/stack.c',
'third_party/boringssl/crypto/thread.c',
'third_party/boringssl/crypto/thread_none.c',
'third_party/boringssl/crypto/thread_pthread.c',
'third_party/boringssl/crypto/thread_win.c',
'third_party/boringssl/crypto/time_support.c',
'third_party/boringssl/crypto/x509/a_digest.c',
'third_party/boringssl/crypto/x509/a_sign.c',
'third_party/boringssl/crypto/x509/a_strex.c',
'third_party/boringssl/crypto/x509/a_verify.c',
'third_party/boringssl/crypto/x509/algorithm.c',
'third_party/boringssl/crypto/x509/asn1_gen.c',
'third_party/boringssl/crypto/x509/by_dir.c',
'third_party/boringssl/crypto/x509/by_file.c',
'third_party/boringssl/crypto/x509/i2d_pr.c',
'third_party/boringssl/crypto/x509/pkcs7.c',
'third_party/boringssl/crypto/x509/rsa_pss.c',
'third_party/boringssl/crypto/x509/t_crl.c',
'third_party/boringssl/crypto/x509/t_req.c',
'third_party/boringssl/crypto/x509/t_x509.c',
'third_party/boringssl/crypto/x509/t_x509a.c',
'third_party/boringssl/crypto/x509/x509.c',
'third_party/boringssl/crypto/x509/x509_att.c',
'third_party/boringssl/crypto/x509/x509_cmp.c',
'third_party/boringssl/crypto/x509/x509_d2.c',
'third_party/boringssl/crypto/x509/x509_def.c',
'third_party/boringssl/crypto/x509/x509_ext.c',
'third_party/boringssl/crypto/x509/x509_lu.c',
'third_party/boringssl/crypto/x509/x509_obj.c',
'third_party/boringssl/crypto/x509/x509_r2x.c',
'third_party/boringssl/crypto/x509/x509_req.c',
'third_party/boringssl/crypto/x509/x509_set.c',
'third_party/boringssl/crypto/x509/x509_trs.c',
'third_party/boringssl/crypto/x509/x509_txt.c',
'third_party/boringssl/crypto/x509/x509_v3.c',
'third_party/boringssl/crypto/x509/x509_vfy.c',
'third_party/boringssl/crypto/x509/x509_vpm.c',
'third_party/boringssl/crypto/x509/x509cset.c',
'third_party/boringssl/crypto/x509/x509name.c',
'third_party/boringssl/crypto/x509/x509rset.c',
'third_party/boringssl/crypto/x509/x509spki.c',
'third_party/boringssl/crypto/x509/x509type.c',
'third_party/boringssl/crypto/x509/x_algor.c',
'third_party/boringssl/crypto/x509/x_all.c',
'third_party/boringssl/crypto/x509/x_attrib.c',
'third_party/boringssl/crypto/x509/x_crl.c',
'third_party/boringssl/crypto/x509/x_exten.c',
'third_party/boringssl/crypto/x509/x_info.c',
'third_party/boringssl/crypto/x509/x_name.c',
'third_party/boringssl/crypto/x509/x_pkey.c',
'third_party/boringssl/crypto/x509/x_pubkey.c',
'third_party/boringssl/crypto/x509/x_req.c',
'third_party/boringssl/crypto/x509/x_sig.c',
'third_party/boringssl/crypto/x509/x_spki.c',
'third_party/boringssl/crypto/x509/x_val.c',
'third_party/boringssl/crypto/x509/x_x509.c',
'third_party/boringssl/crypto/x509/x_x509a.c',
'third_party/boringssl/crypto/x509v3/pcy_cache.c',
'third_party/boringssl/crypto/x509v3/pcy_data.c',
'third_party/boringssl/crypto/x509v3/pcy_lib.c',
'third_party/boringssl/crypto/x509v3/pcy_map.c',
'third_party/boringssl/crypto/x509v3/pcy_node.c',
'third_party/boringssl/crypto/x509v3/pcy_tree.c',
'third_party/boringssl/crypto/x509v3/v3_akey.c',
'third_party/boringssl/crypto/x509v3/v3_akeya.c',
'third_party/boringssl/crypto/x509v3/v3_alt.c',
'third_party/boringssl/crypto/x509v3/v3_bcons.c',
'third_party/boringssl/crypto/x509v3/v3_bitst.c',
'third_party/boringssl/crypto/x509v3/v3_conf.c',
'third_party/boringssl/crypto/x509v3/v3_cpols.c',
'third_party/boringssl/crypto/x509v3/v3_crld.c',
'third_party/boringssl/crypto/x509v3/v3_enum.c',
'third_party/boringssl/crypto/x509v3/v3_extku.c',
'third_party/boringssl/crypto/x509v3/v3_genn.c',
'third_party/boringssl/crypto/x509v3/v3_ia5.c',
'third_party/boringssl/crypto/x509v3/v3_info.c',
'third_party/boringssl/crypto/x509v3/v3_int.c',
'third_party/boringssl/crypto/x509v3/v3_lib.c',
'third_party/boringssl/crypto/x509v3/v3_ncons.c',
'third_party/boringssl/crypto/x509v3/v3_pci.c',
'third_party/boringssl/crypto/x509v3/v3_pcia.c',
'third_party/boringssl/crypto/x509v3/v3_pcons.c',
'third_party/boringssl/crypto/x509v3/v3_pku.c',
'third_party/boringssl/crypto/x509v3/v3_pmaps.c',
'third_party/boringssl/crypto/x509v3/v3_prn.c',
'third_party/boringssl/crypto/x509v3/v3_purp.c',
'third_party/boringssl/crypto/x509v3/v3_skey.c',
'third_party/boringssl/crypto/x509v3/v3_sxnet.c',
'third_party/boringssl/crypto/x509v3/v3_utl.c',
'third_party/boringssl/ssl/custom_extensions.c',
'third_party/boringssl/ssl/d1_both.c',
'third_party/boringssl/ssl/d1_lib.c',
'third_party/boringssl/ssl/d1_pkt.c',
'third_party/boringssl/ssl/d1_srtp.c',
'third_party/boringssl/ssl/dtls_method.c',
'third_party/boringssl/ssl/dtls_record.c',
'third_party/boringssl/ssl/handshake_client.c',
'third_party/boringssl/ssl/handshake_server.c',
'third_party/boringssl/ssl/s3_both.c',
'third_party/boringssl/ssl/s3_enc.c',
'third_party/boringssl/ssl/s3_lib.c',
'third_party/boringssl/ssl/s3_pkt.c',
'third_party/boringssl/ssl/ssl_aead_ctx.c',
'third_party/boringssl/ssl/ssl_asn1.c',
'third_party/boringssl/ssl/ssl_buffer.c',
'third_party/boringssl/ssl/ssl_cert.c',
'third_party/boringssl/ssl/ssl_cipher.c',
'third_party/boringssl/ssl/ssl_ecdh.c',
'third_party/boringssl/ssl/ssl_file.c',
'third_party/boringssl/ssl/ssl_lib.c',
'third_party/boringssl/ssl/ssl_rsa.c',
'third_party/boringssl/ssl/ssl_session.c',
'third_party/boringssl/ssl/ssl_stat.c',
'third_party/boringssl/ssl/t1_enc.c',
'third_party/boringssl/ssl/t1_lib.c',
'third_party/boringssl/ssl/tls13_both.c',
'third_party/boringssl/ssl/tls13_client.c',
'third_party/boringssl/ssl/tls13_enc.c',
'third_party/boringssl/ssl/tls13_server.c',
'third_party/boringssl/ssl/tls_method.c',
'third_party/boringssl/ssl/tls_record.c',
'third_party/zlib/adler32.c',
'third_party/zlib/compress.c',
'third_party/zlib/crc32.c',
'third_party/zlib/deflate.c',
'third_party/zlib/gzclose.c',
'third_party/zlib/gzlib.c',
'third_party/zlib/gzread.c',
'third_party/zlib/gzwrite.c',
'third_party/zlib/infback.c',
'third_party/zlib/inffast.c',
'third_party/zlib/inflate.c',
'third_party/zlib/inftrees.c',
'third_party/zlib/trees.c',
'third_party/zlib/uncompr.c',
'third_party/zlib/zutil.c',
'third_party/cares/cares/ares__close_sockets.c',
'third_party/cares/cares/ares__get_hostent.c',
'third_party/cares/cares/ares__read_line.c',
'third_party/cares/cares/ares__timeval.c',
'third_party/cares/cares/ares_cancel.c',
'third_party/cares/cares/ares_create_query.c',
'third_party/cares/cares/ares_data.c',
'third_party/cares/cares/ares_destroy.c',
'third_party/cares/cares/ares_expand_name.c',
'third_party/cares/cares/ares_expand_string.c',
'third_party/cares/cares/ares_fds.c',
'third_party/cares/cares/ares_free_hostent.c',
'third_party/cares/cares/ares_free_string.c',
'third_party/cares/cares/ares_getenv.c',
'third_party/cares/cares/ares_gethostbyaddr.c',
'third_party/cares/cares/ares_gethostbyname.c',
'third_party/cares/cares/ares_getnameinfo.c',
'third_party/cares/cares/ares_getopt.c',
'third_party/cares/cares/ares_getsock.c',
'third_party/cares/cares/ares_init.c',
'third_party/cares/cares/ares_library_init.c',
'third_party/cares/cares/ares_llist.c',
'third_party/cares/cares/ares_mkquery.c',
'third_party/cares/cares/ares_nowarn.c',
'third_party/cares/cares/ares_options.c',
'third_party/cares/cares/ares_parse_a_reply.c',
'third_party/cares/cares/ares_parse_aaaa_reply.c',
'third_party/cares/cares/ares_parse_mx_reply.c',
'third_party/cares/cares/ares_parse_naptr_reply.c',
'third_party/cares/cares/ares_parse_ns_reply.c',
'third_party/cares/cares/ares_parse_ptr_reply.c',
'third_party/cares/cares/ares_parse_soa_reply.c',
'third_party/cares/cares/ares_parse_srv_reply.c',
'third_party/cares/cares/ares_parse_txt_reply.c',
'third_party/cares/cares/ares_platform.c',
'third_party/cares/cares/ares_process.c',
'third_party/cares/cares/ares_query.c',
'third_party/cares/cares/ares_search.c',
'third_party/cares/cares/ares_send.c',
'third_party/cares/cares/ares_strcasecmp.c',
'third_party/cares/cares/ares_strdup.c',
'third_party/cares/cares/ares_strerror.c',
'third_party/cares/cares/ares_timeout.c',
'third_party/cares/cares/ares_version.c',
'third_party/cares/cares/ares_writev.c',
'third_party/cares/cares/bitncmp.c',
'third_party/cares/cares/inet_net_pton.c',
'third_party/cares/cares/inet_ntop.c',
'third_party/cares/cares/windows_port.c',
]
| |
# Copyright (c) 2015 SUSE Linux GmbH. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Configuration file setup for azurectl.
usage: azurectl setup account -h | --help
azurectl setup account configure --name=<account_name> --publish-settings-file=<file>
[--subscription-id=<subscriptionid>]
[--region=<region_name> --storage-account-name=<storagename> --container-name=<containername> --create]
azurectl setup account configure --name=<account_name> --management-pem-file=<file> --management-url=<url> --subscription-id=<subscriptionid>
[--region=<region_name> --storage-account-name=<storagename> --container-name=<containername> --create]
azurectl setup account region add --region=<region_name> --storage-account-name=<storagename> --container-name=<containername>
[--name=<account_name>]
azurectl setup account list
azurectl setup account default --name=<account_name>
azurectl setup account region default --region=<region_name>
[--name=<account_name>]
azurectl setup account remove --name=<account_name>
azurectl setup account region help
azurectl setup account help
commands:
configure
create new account config file
default
set the given account as default config
help
show manual page for account command
list
list configured account and region sections. Also list
information about default config file
region add
add new region section to the config file. If specified
the given account config file is used, otherwise the default
config file
region default
set new default region in config file. If specified
the given account config file is used, otherwise the default
config file
region help
show manual page for account region subcommand
remove
remove specified account config file
options:
--container-name=<containername>
specify default container name used with the storage account
in the selected region.
--create
process storage and container configuration and create the
storage account and the container in Azure
--management-pem-file=<file>
path to the pem file associated with a management certificate enabled on
this account
--management-url=<url>
URL of the management API where this account is available
--name=<account_name>
account name used for account config file lookup
--publish-settings-file=<file>
path to the Microsoft Azure account publish settings file
--storage-account-name=<storagename>
specify default storage account name in the selected region
--subscription-id=<subscriptionid>
subscription id, if more than one subscription is included in your
publish settings file, or if a publish settings file is not used
--region=<region_name>
Name of the geographic region in Azure.
"""
import os
# project
from azurectl.commands.base import CliTask
from azurectl.logger import log
from azurectl.help import Help
from azurectl.account.setup import AccountSetup
from azurectl.utils.collector import DataCollector
from azurectl.utils.output import DataOutput
from azurectl.config.parser import Config
from azurectl.storage.container import Container
from azurectl.storage.account import StorageAccount
from azurectl.account.service import AzureAccount
from azurectl.defaults import Defaults
from azurectl.azurectl_exceptions import (
AzureAccountConfigurationError
)
class SetupAccountTask(CliTask):
"""
Process setup config commands
"""
def process(self):
self.manual = Help()
if self.__help():
return
self.result = DataCollector()
self.out = DataOutput(
self.result,
self.global_args['--output-format'],
self.global_args['--output-style']
)
if self.command_args['list']:
self.__list()
elif self.command_args['default'] and not self.command_args['region']:
self.__default()
else:
self.__load_account_setup(
self.command_args['--name']
)
if self.command_args['remove']:
self.__remove()
elif self.command_args['configure']:
self.__configure_account()
elif self.command_args['region'] and self.command_args['add']:
self.__region_add()
elif self.command_args['region'] and self.command_args['default']:
self.__set_region_default()
def __help(self):
if self.command_args['region'] and self.command_args['help']:
self.manual.show('azurectl::setup::account::region')
elif self.command_args['help']:
self.manual.show('azurectl::setup::account')
else:
return False
return self.manual
def __load_account_setup(self, for_account=None):
self.setup = AccountSetup(
Config.get_config_file(account_name=for_account)
)
def __default(self):
Config.set_default_config_file(
account_name=self.command_args['--name']
)
log.info(
'Account %s has been set as default configuration',
self.command_args['--name']
)
def __check_account_existing_in_default_config(self):
default_config = None
try:
default_config = Config()
except Exception:
# ignore exception thrown if no config file exists
pass
if default_config:
account_section_name = 'account:' + self.command_args['--name']
if default_config.config.has_section(account_section_name):
raise AzureAccountConfigurationError(
'Account %s already configured in file %s' % (
self.command_args['--name'],
Config.get_config_file()
)
)
def __configure_account(self):
self.__check_account_existing_in_default_config()
self.setup.configure_account(
self.command_args['--name'],
self.command_args['--publish-settings-file'],
self.command_args['--region'],
self.command_args['--storage-account-name'],
self.command_args['--container-name'],
self.command_args['--subscription-id'],
self.command_args['--management-pem-file'],
self.command_args['--management-url']
)
self.setup.write()
log.info(
'Added account %s', self.command_args['--name']
)
if self.command_args['--create']:
self.global_args['--account'] = self.command_args['--name']
self.load_config()
self.account = AzureAccount(self.config)
self.__load_account_setup(
self.command_args['--name']
)
try:
storage_account_name = \
self.command_args['--storage-account-name']
storage_account = StorageAccount(self.account)
if not storage_account.exists(storage_account_name):
storage_account_request_id = storage_account.create(
name=storage_account_name,
description=self.command_args['--name'],
label=self.command_args['--storage-account-name'],
account_type=Defaults.account_type_for_docopts(
self.command_args
)
)
self.request_wait(storage_account_request_id)
log.info(
'Created %s storage account', storage_account_name
)
else:
log.info(
'Storage account %s already exists',
storage_account_name
)
container_name = self.command_args['--container-name']
container = Container(self.account)
if not container.exists(container_name):
container.create(container_name)
log.info(
'Created %s container', container_name
)
else:
log.info(
'Container %s already exists', container_name
)
except Exception as e:
self.__remove()
raise AzureAccountConfigurationError(
'%s: %s' % (type(e).__name__, format(e))
)
def __region_add(self):
self.setup.add_region(
self.command_args['--region'],
self.command_args['--storage-account-name'],
self.command_args['--container-name']
)
self.setup.write()
log.info('Added region %s', self.command_args['--region'])
def __set_region_default(self):
if self.setup.set_default_region(self.command_args['--region']):
self.setup.write()
log.info(
'Region %s is now set as default',
self.command_args['--region'],
)
def __remove(self):
self.setup.remove()
log.info('Removed account config file %s', self.setup.filename)
def __list(self):
config_files = Config.get_config_file_list()
default_config_file = config_files[0] or '<missing>'
if os.path.islink(default_config_file):
default_config_file = os.readlink(default_config_file)
self.result.add(
'default_config_file', default_config_file
)
for config_file in config_files:
if config_file and not os.path.islink(config_file):
setup = AccountSetup(config_file)
account_info = setup.list()
if account_info:
self.result.add(config_file, account_info)
self.out.display()
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, bleader
# Written by bleader <bleader@ratonland.org>
# Based on pkgin module written by Shaun Zinck <shaun.zinck at gmail.com>
# that was based on pacman module written by Afterburn <http://github.com/afterburn>
# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pkgng
short_description: Package manager for FreeBSD >= 9.0
description:
- Manage binary packages for FreeBSD using 'pkgng' which
is available in versions after 9.0.
version_added: "1.2"
options:
name:
description:
- Name of package to install/remove.
required: true
state:
description:
- State of the package.
choices: [ 'present', 'absent' ]
required: false
default: present
cached:
description:
- Use local package base instead of fetching an updated one.
choices: [ 'yes', 'no' ]
required: false
default: no
annotation:
description:
- A comma-separated list of keyvalue-pairs of the form
C(<+/-/:><key>[=<value>]). A C(+) denotes adding an annotation, a
C(-) denotes removing an annotation, and C(:) denotes modifying an
annotation.
If setting or modifying annotations, a value must be provided.
required: false
version_added: "1.6"
pkgsite:
description:
- For pkgng versions before 1.1.4, specify packagesite to use
for downloading packages. If not specified, use settings from
C(/usr/local/etc/pkg.conf).
- For newer pkgng versions, specify a the name of a repository
configured in C(/usr/local/etc/pkg/repos).
required: false
rootdir:
description:
- For pkgng versions 1.5 and later, pkg will install all packages
within the specified root directory.
- Can not be used together with I(chroot) or I(jail) options.
required: false
chroot:
version_added: "2.1"
description:
- Pkg will chroot in the specified environment.
- Can not be used together with I(rootdir) or I(jail) options.
required: false
jail:
version_added: "2.4"
description:
- Pkg will execute in the given jail name or id.
- Can not be used together with I(chroot) or I(rootdir) options.
autoremove:
version_added: "2.2"
description:
- Remove automatically installed packages which are no longer needed.
required: false
choices: [ "yes", "no" ]
default: no
author: "bleader (@bleader)"
notes:
- When using pkgsite, be careful that already in cache packages won't be downloaded again.
'''
EXAMPLES = '''
# Install package foo
- pkgng:
name: foo
state: present
# Annotate package foo and bar
- pkgng:
name: foo,bar
annotation: '+test1=baz,-test2,:test3=foobar'
# Remove packages foo and bar
- pkgng:
name: foo,bar
state: absent
'''
import re
from ansible.module_utils.basic import AnsibleModule
def query_package(module, pkgng_path, name, dir_arg):
rc, out, err = module.run_command("%s %s info -g -e %s" % (pkgng_path, dir_arg, name))
if rc == 0:
return True
return False
def pkgng_older_than(module, pkgng_path, compare_version):
rc, out, err = module.run_command("%s -v" % pkgng_path)
version = [int(x) for x in re.split(r'[\._]', out)]
i = 0
new_pkgng = True
while compare_version[i] == version[i]:
i += 1
if i == min(len(compare_version), len(version)):
break
else:
if compare_version[i] > version[i]:
new_pkgng = False
return not new_pkgng
def remove_packages(module, pkgng_path, packages, dir_arg):
remove_c = 0
# Using a for loop in case of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, pkgng_path, package, dir_arg):
continue
if not module.check_mode:
rc, out, err = module.run_command("%s %s delete -y %s" % (pkgng_path, dir_arg, package))
if not module.check_mode and query_package(module, pkgng_path, package, dir_arg):
module.fail_json(msg="failed to remove %s: %s" % (package, out))
remove_c += 1
if remove_c > 0:
return (True, "removed %s package(s)" % remove_c)
return (False, "package(s) already absent")
def install_packages(module, pkgng_path, packages, cached, pkgsite, dir_arg):
install_c = 0
# as of pkg-1.1.4, PACKAGESITE is deprecated in favor of repository definitions
# in /usr/local/etc/pkg/repos
old_pkgng = pkgng_older_than(module, pkgng_path, [1, 1, 4])
if pkgsite != "":
if old_pkgng:
pkgsite = "PACKAGESITE=%s" % (pkgsite)
else:
pkgsite = "-r %s" % (pkgsite)
# This environment variable skips mid-install prompts,
# setting them to their default values.
batch_var = 'env BATCH=yes'
if not module.check_mode and not cached:
if old_pkgng:
rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgng_path))
else:
rc, out, err = module.run_command("%s %s update" % (pkgng_path, dir_arg))
if rc != 0:
module.fail_json(msg="Could not update catalogue")
for package in packages:
if query_package(module, pkgng_path, package, dir_arg):
continue
if not module.check_mode:
if old_pkgng:
rc, out, err = module.run_command("%s %s %s install -g -U -y %s" % (batch_var, pkgsite, pkgng_path, package))
else:
rc, out, err = module.run_command("%s %s %s install %s -g -U -y %s" % (batch_var, pkgng_path, dir_arg, pkgsite, package))
if not module.check_mode and not query_package(module, pkgng_path, package, dir_arg):
module.fail_json(msg="failed to install %s: %s" % (package, out), stderr=err)
install_c += 1
if install_c > 0:
return (True, "added %s package(s)" % (install_c))
return (False, "package(s) already present")
def annotation_query(module, pkgng_path, package, tag, dir_arg):
rc, out, err = module.run_command("%s %s info -g -A %s" % (pkgng_path, dir_arg, package))
match = re.search(r'^\s*(?P<tag>%s)\s*:\s*(?P<value>\w+)' % tag, out, flags=re.MULTILINE)
if match:
return match.group('value')
return False
def annotation_add(module, pkgng_path, package, tag, value, dir_arg):
_value = annotation_query(module, pkgng_path, package, tag, dir_arg)
if not _value:
# Annotation does not exist, add it.
rc, out, err = module.run_command('%s %s annotate -y -A %s %s "%s"'
% (pkgng_path, dir_arg, package, tag, value))
if rc != 0:
module.fail_json(msg="could not annotate %s: %s"
% (package, out), stderr=err)
return True
elif _value != value:
# Annotation exists, but value differs
module.fail_json(
mgs="failed to annotate %s, because %s is already set to %s, but should be set to %s"
% (package, tag, _value, value))
return False
else:
# Annotation exists, nothing to do
return False
def annotation_delete(module, pkgng_path, package, tag, value, dir_arg):
_value = annotation_query(module, pkgng_path, package, tag, dir_arg)
if _value:
rc, out, err = module.run_command('%s %s annotate -y -D %s %s'
% (pkgng_path, dir_arg, package, tag))
if rc != 0:
module.fail_json(msg="could not delete annotation to %s: %s"
% (package, out), stderr=err)
return True
return False
def annotation_modify(module, pkgng_path, package, tag, value, dir_arg):
_value = annotation_query(module, pkgng_path, package, tag, dir_arg)
if not value:
# No such tag
module.fail_json(msg="could not change annotation to %s: tag %s does not exist"
% (package, tag))
elif _value == value:
# No change in value
return False
else:
rc,out,err = module.run_command('%s %s annotate -y -M %s %s "%s"'
% (pkgng_path, dir_arg, package, tag, value))
if rc != 0:
module.fail_json(msg="could not change annotation annotation to %s: %s"
% (package, out), stderr=err)
return True
def annotate_packages(module, pkgng_path, packages, annotation, dir_arg):
annotate_c = 0
annotations = map(lambda _annotation:
re.match(r'(?P<operation>[\+-:])(?P<tag>\w+)(=(?P<value>\w+))?',
_annotation).groupdict(),
re.split(r',', annotation))
operation = {
'+': annotation_add,
'-': annotation_delete,
':': annotation_modify
}
for package in packages:
for _annotation in annotations:
if operation[_annotation['operation']](module, pkgng_path, package, _annotation['tag'], _annotation['value']):
annotate_c += 1
if annotate_c > 0:
return (True, "added %s annotations." % annotate_c)
return (False, "changed no annotations")
def autoremove_packages(module, pkgng_path, dir_arg):
rc, out, err = module.run_command("%s %s autoremove -n" % (pkgng_path, dir_arg))
autoremove_c = 0
match = re.search('^Deinstallation has been requested for the following ([0-9]+) packages', out, re.MULTILINE)
if match:
autoremove_c = int(match.group(1))
if autoremove_c == 0:
return False, "no package(s) to autoremove"
if not module.check_mode:
rc, out, err = module.run_command("%s %s autoremove -y" % (pkgng_path, dir_arg))
return True, "autoremoved %d package(s)" % (autoremove_c)
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default="present", choices=["present","absent"], required=False),
name = dict(aliases=["pkg"], required=True, type='list'),
cached = dict(default=False, type='bool'),
annotation = dict(default="", required=False),
pkgsite = dict(default="", required=False),
rootdir = dict(default="", required=False, type='path'),
chroot = dict(default="", required=False, type='path'),
jail = dict(default="", required=False, type='str'),
autoremove = dict(default=False, type='bool')),
supports_check_mode = True,
mutually_exclusive =[["rootdir", "chroot", "jail"]])
pkgng_path = module.get_bin_path('pkg', True)
p = module.params
pkgs = p["name"]
changed = False
msgs = []
dir_arg = ""
if p["rootdir"] != "":
old_pkgng = pkgng_older_than(module, pkgng_path, [1, 5, 0])
if old_pkgng:
module.fail_json(msg="To use option 'rootdir' pkg version must be 1.5 or greater")
else:
dir_arg = "--rootdir %s" % (p["rootdir"])
if p["chroot"] != "":
dir_arg = '--chroot %s' % (p["chroot"])
if p["jail"] != "":
dir_arg = '--jail %s' % (p["jail"])
if p["state"] == "present":
_changed, _msg = install_packages(module, pkgng_path, pkgs, p["cached"], p["pkgsite"], dir_arg)
changed = changed or _changed
msgs.append(_msg)
elif p["state"] == "absent":
_changed, _msg = remove_packages(module, pkgng_path, pkgs, dir_arg)
changed = changed or _changed
msgs.append(_msg)
if p["autoremove"]:
_changed, _msg = autoremove_packages(module, pkgng_path, dir_arg)
changed = changed or _changed
msgs.append(_msg)
if p["annotation"]:
_changed, _msg = annotate_packages(module, pkgng_path, pkgs, p["annotation"], dir_arg)
changed = changed or _changed
msgs.append(_msg)
module.exit_json(changed=changed, msg=", ".join(msgs))
if __name__ == '__main__':
main()
| |
# pyOCD debugger
# Copyright (c) 2017-2019 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class TraceEvent(object):
"""! @brief Base trace event class."""
def __init__(self, desc="", ts=0):
self._desc = desc
self._timestamp = ts
@property
def timestamp(self):
return self._timestamp
@timestamp.setter
def timestamp(self, ts):
self._timestamp = ts
def __str__(self):
return "[{}] {}".format(self._timestamp, self._desc)
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, str(self))
class TraceOverflow(TraceEvent):
"""! @brief Trace overflow event."""
def __init__(self, ts=0):
super(TraceOverflow, self).__init__("overflow", ts)
class TraceTimestamp(TraceEvent):
"""! @brief Trace local timestamp."""
def __init__(self, tc, ts=0):
super(TraceTimestamp, self).__init__("timestamp", ts)
self._tc = 0
@property
def tc(self):
return self._tc
def __str__(self):
return "[{}] local timestamp TC={:#x} {}".format(self._timestamp, self.tc, self.timestamp)
class TraceITMEvent(TraceEvent):
"""! @brief Trace ITM stimulus port event."""
def __init__(self, port, data, width, ts=0):
super(TraceITMEvent, self).__init__("itm", ts)
self._port = port
self._data = data
self._width = width
@property
def port(self):
return self._port
@property
def data(self):
return self._data
@property
def width(self):
return self._width
def __str__(self):
width = self.width
if width == 1:
d = "{:#04x}".format(self.data)
elif width == 2:
d = "{:#06x}".format(self.data)
else:
d = "{:#010x}".format(self.data)
return "[{}] ITM: port={:d} data={}".format(self.timestamp, self.port, d)
class TraceEventCounter(TraceEvent):
"""! @brief Trace DWT counter overflow event."""
CPI_MASK = 0x01
EXC_MASK = 0x02
SLEEP_MASK = 0x04
LSU_MASK = 0x08
FOLD_MASK = 0x10
CYC_MASK = 0x20
def __init__(self, counterMask, ts=0):
super(TraceEventCounter, self).__init__("exception", ts)
self._mask = counterMask
@property
def counter_mask(self):
return self._mask
def _get_event_desc(self, evt):
msg = ""
if evt & TraceEventCounter.CYC_MASK:
msg += " Cyc"
if evt & TraceEventCounter.FOLD_MASK:
msg += " Fold"
if evt & TraceEventCounter.LSU_MASK:
msg += " LSU"
if evt & TraceEventCounter.SLEEP_MASK:
msg += " Sleep"
if evt & TraceEventCounter.EXC_MASK:
msg += " Exc"
if evt & TraceEventCounter.CPI_MASK:
msg += " CPI"
return msg
def __str__(self):
return "[{}] DWT: Event:{}".format(self.timestamp, self._get_event_desc(self.counter_mask))
class TraceExceptionEvent(TraceEvent):
"""! @brief Exception trace event."""
ENTERED = 1
EXITED = 2
RETURNED = 3
ACTION_DESC = {
ENTERED : "Entered",
EXITED : "Exited",
RETURNED : "Returned"
}
def __init__(self, exceptionNumber, exceptionName, action, ts=0):
super(TraceExceptionEvent, self).__init__("exception", ts)
self._number = exceptionNumber
self._name = exceptionName
self._action = action
@property
def exception_number(self):
return self._number
@property
def exception_name(self):
return self._name
@property
def action(self):
return self._action
def __str__(self):
action = TraceExceptionEvent.ACTION_DESC.get(self.action, "<invalid action>")
return "[{}] DWT: Exception #{:d} {} {}".format(self.timestamp, self.exception_number, action, self.exception_name)
class TracePeriodicPC(TraceEvent):
"""! @brief Periodic PC trace event."""
def __init__(self, pc, ts=0):
super(TracePeriodicPC, self).__init__("pc", ts)
self._pc = pc
@property
def pc(self):
return self._pc
def __str__(self):
return "[{}] DWT: PC={:#010x}".format(self.timestamp, self.pc)
class TraceDataTraceEvent(TraceEvent):
"""! @brief DWT data trace event.
Valid combinations:
- PC value.
- Bits[15:0] of a data address.
- Data value, whether it was read or written, and the transfer size.
- PC value, data value, whether it was read or written, and the transfer size.
- Bits[15:0] of a data address, data value, whether it was read or written, and the transfer size.
"""
def __init__(self, cmpn=None, pc=None, addr=None, value=None, rnw=None, sz=None, ts=0):
super(TraceDataTraceEvent, self).__init__("data-trace", ts)
self._cmpn = cmpn
self._pc = pc
self._addr = addr
self._value = value
self._rnw = rnw
self._sz = sz
@property
def comparator(self):
return self._cmpn
@property
def pc(self):
return self._pc
@property
def address(self):
return self._addr
@property
def value(self):
return self._value
@property
def is_read(self):
return self._rnw
@property
def transfer_size(self):
return self._sz
def __str__(self):
hasPC = self.pc is not None
hasAddress = self.address is not None
hasValue = self.value is not None
if hasPC:
msg = "PC={:#010x}".format(self.pc)
elif hasAddress:
msg = "Addr[15:0]={:#06x}".format(self.address)
else:
msg = ""
if hasValue:
width = self.transfer_size
rnw = "R" if self.is_read else "W"
if width == 1:
msg += " Value={}:{:#04x}".format(rnw, self.value)
elif width == 2:
msg += " Value={}:{:#06x}".format(rnw, self.value)
else:
msg += " Value={}:{:#010x}".format(rnw, self.value)
return "[{}] DWT: Data Trace {}".format(self.timestamp, msg.strip())
| |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class appfwpolicy_lbvserver_binding(base_resource) :
""" Binding class showing the lbvserver that can be bound to appfwpolicy.
"""
def __init__(self) :
self._boundto = ""
self._priority = 0
self._activepolicy = 0
self._gotopriorityexpression = ""
self._labeltype = ""
self._labelname = ""
self._name = ""
self.___count = 0
@property
def name(self) :
"""Name of the policy.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of the policy.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def boundto(self) :
"""The entity name to which policy is bound.
"""
try :
return self._boundto
except Exception as e:
raise e
@boundto.setter
def boundto(self, boundto) :
"""The entity name to which policy is bound.
"""
try :
self._boundto = boundto
except Exception as e:
raise e
@property
def priority(self) :
"""Specifies the priority of the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@property
def labeltype(self) :
"""Type of policy label invocation.<br/>Possible values = reqvserver, resvserver, policylabel.
"""
try :
return self._labeltype
except Exception as e:
raise e
@property
def activepolicy(self) :
"""Indicates whether policy is bound or not.
"""
try :
return self._activepolicy
except Exception as e:
raise e
@property
def labelname(self) :
"""Name of the label to invoke if the current policy rule evaluates to TRUE.
"""
try :
return self._labelname
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(appfwpolicy_lbvserver_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.appfwpolicy_lbvserver_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
""" Use this API to fetch appfwpolicy_lbvserver_binding resources.
"""
try :
obj = appfwpolicy_lbvserver_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
""" Use this API to fetch filtered set of appfwpolicy_lbvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = appfwpolicy_lbvserver_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
""" Use this API to count appfwpolicy_lbvserver_binding resources configued on NetScaler.
"""
try :
obj = appfwpolicy_lbvserver_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
""" Use this API to count the filtered set of appfwpolicy_lbvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = appfwpolicy_lbvserver_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class appfwpolicy_lbvserver_binding_response(base_response) :
def __init__(self, length=1) :
self.appfwpolicy_lbvserver_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.appfwpolicy_lbvserver_binding = [appfwpolicy_lbvserver_binding() for _ in range(length)]
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
table_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/default/tables/{tableName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"tableName": _SERIALIZER.url("table_name", table_name, 'str', max_length=63, min_length=3, pattern=r'^[A-Za-z][A-Za-z0-9]{2,62}$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
table_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/default/tables/{tableName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"tableName": _SERIALIZER.url("table_name", table_name, 'str', max_length=63, min_length=3, pattern=r'^[A-Za-z][A-Za-z0-9]{2,62}$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
table_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/default/tables/{tableName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"tableName": _SERIALIZER.url("table_name", table_name, 'str', max_length=63, min_length=3, pattern=r'^[A-Za-z][A-Za-z0-9]{2,62}$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
table_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/default/tables/{tableName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"tableName": _SERIALIZER.url("table_name", table_name, 'str', max_length=63, min_length=3, pattern=r'^[A-Za-z][A-Za-z0-9]{2,62}$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/default/tables')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class TableOperations(object):
"""TableOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2021_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def create(
self,
resource_group_name: str,
account_name: str,
table_name: str,
**kwargs: Any
) -> "_models.Table":
"""Creates a new table with the specified table name, under the specified account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param table_name: A table name must be unique within a storage account and must be between 3
and 63 characters.The name must comprise of only alphanumeric characters and it cannot begin
with a numeric character.
:type table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Table, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_04_01.models.Table
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Table"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_create_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
table_name=table_name,
template_url=self.create.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Table', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/default/tables/{tableName}'} # type: ignore
@distributed_trace
def update(
self,
resource_group_name: str,
account_name: str,
table_name: str,
**kwargs: Any
) -> "_models.Table":
"""Creates a new table with the specified table name, under the specified account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param table_name: A table name must be unique within a storage account and must be between 3
and 63 characters.The name must comprise of only alphanumeric characters and it cannot begin
with a numeric character.
:type table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Table, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_04_01.models.Table
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Table"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_update_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
table_name=table_name,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Table', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/default/tables/{tableName}'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
account_name: str,
table_name: str,
**kwargs: Any
) -> "_models.Table":
"""Gets the table with the specified table name, under the specified account if it exists.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param table_name: A table name must be unique within a storage account and must be between 3
and 63 characters.The name must comprise of only alphanumeric characters and it cannot begin
with a numeric character.
:type table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Table, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_04_01.models.Table
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Table"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
table_name=table_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Table', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/default/tables/{tableName}'} # type: ignore
@distributed_trace
def delete(
self,
resource_group_name: str,
account_name: str,
table_name: str,
**kwargs: Any
) -> None:
"""Deletes the table with the specified table name, under the specified account if it exists.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param table_name: A table name must be unique within a storage account and must be between 3
and 63 characters.The name must comprise of only alphanumeric characters and it cannot begin
with a numeric character.
:type table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
table_name=table_name,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/default/tables/{tableName}'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> Iterable["_models.ListTableResource"]:
"""Gets a list of all the tables under the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListTableResource or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.storage.v2021_04_01.models.ListTableResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListTableResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ListTableResource", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/default/tables'} # type: ignore
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for CrossTowerOps."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from absl.testing import parameterized
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import cross_tower_ops as cross_tower_ops_lib
from tensorflow.contrib.distribute.python import multi_worker_test_base
from tensorflow.contrib.distribute.python import values as value_lib
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import device_util
def _make_per_device(values, devices):
devices = cross_tower_ops_lib._get_devices_from(devices)
assert len(values) == len(devices)
index = {}
for d, v in zip(devices, values):
with ops.device(d):
placed_v = array_ops.identity(v)
index[d] = placed_v
return value_lib.PerDevice(index)
# pylint: disable=g-doc-args,g-doc-return-or-yield
def _fake_mirrored(value, devices):
"""Create a faked Mirrored object for testing.
All components of the returned Mirrored have the same objects, which is not
true in reality.
"""
devices = cross_tower_ops_lib._get_devices_from(devices)
return value_lib.Mirrored(
{d: v for d, v in zip(devices, [value] * len(devices))})
def _make_indexed_slices(values, indices, dense_shape, device):
with ops.device(device):
tensor = ops.IndexedSlices(
values=constant_op.constant(values),
indices=constant_op.constant(indices),
dense_shape=constant_op.constant(dense_shape))
return tensor
def _make_mirrored_indexed_slices(devices, values, indices, dense_shape):
return value_lib.Mirrored({
d: _make_indexed_slices(values, indices, dense_shape, d) for d in devices
})
_cpu_device = "/device:CPU:0"
class CrossTowerOpsTestBase(test.TestCase, parameterized.TestCase):
def _assert_indexed_slices_equal(self, left, right):
self.assertIsInstance(left, ops.IndexedSlices)
self.assertIsInstance(right, ops.IndexedSlices)
self.assertEqual(device_util.resolve(left.device),
device_util.resolve(right.device))
self.assertAllEqual(
self.evaluate(ops.convert_to_tensor(left)),
self.evaluate(ops.convert_to_tensor(right)))
def _assert_values_equal(self, left, right):
if isinstance(left, list):
for l, r in zip(left, right):
self._assert_values_equal(l, r)
else:
self.assertEqual(type(left), type(right))
self.assertEqual(left.devices, right.devices)
if isinstance(list(left._index.values())[0], ops.IndexedSlices):
for (d, v) in left._index.items():
self._assert_indexed_slices_equal(v, right._index[d])
elif context.executing_eagerly():
self.assertEqual([v.numpy() for v in left._index.values()],
list(right._index.values()))
else:
with self.test_session() as sess:
self.assertEqual(
sess.run(list(left._index.values())), list(right._index.values()))
def _testReductionAndBroadcast(self, cross_tower_ops, distribution):
devices = distribution.worker_devices
values = [constant_op.constant(float(d)) for d in range(len(devices))]
per_device = _make_per_device(values, devices)
mean = (len(devices) - 1.) / 2.
values_2 = [constant_op.constant(d + 1.0) for d in range(len(devices))]
per_device_2 = _make_per_device(values_2, devices)
mean_2 = mean + 1.
destination_mirrored = _fake_mirrored(1., devices)
destination_different = _fake_mirrored(1., _cpu_device)
destination_str = _cpu_device
destination_list = devices
all_destinations = [
None, destination_mirrored, destination_different, destination_str,
destination_list
]
# test reduce()
for destinations in all_destinations:
self._assert_values_equal(
cross_tower_ops.reduce("mean", per_device, destinations=destinations),
_fake_mirrored(mean, destinations or per_device))
self._assert_values_equal(
cross_tower_ops.reduce(
"mean", per_device_2, destinations=destinations),
_fake_mirrored(mean_2, destinations or per_device))
self._assert_values_equal(
cross_tower_ops.reduce("sum", per_device, destinations=destinations),
_fake_mirrored(mean * len(devices), destinations or per_device))
self._assert_values_equal(
cross_tower_ops.reduce(
"sum", per_device_2, destinations=destinations),
_fake_mirrored(mean_2 * len(devices), destinations or per_device))
# test batch_reduce()
for d1, d2 in itertools.product(all_destinations, all_destinations):
self._assert_values_equal(
cross_tower_ops.batch_reduce(
"mean", [(per_device, d1), (per_device_2, d2)]),
[_fake_mirrored(mean, d1 or per_device),
_fake_mirrored(mean_2, d2 or per_device_2)])
self._assert_values_equal(
cross_tower_ops.batch_reduce(
"sum", [(per_device, d1), (per_device_2, d2)]),
[_fake_mirrored(mean * len(devices), d1 or per_device),
_fake_mirrored(mean_2 * len(devices), d2 or per_device_2)])
# test broadcast()
for destinations in all_destinations:
if destinations is None:
continue
else:
self._assert_values_equal(
cross_tower_ops.broadcast(constant_op.constant(1.), destinations),
_fake_mirrored(1., destinations))
class SingleWorkerCrossTowerOpsTest(CrossTowerOpsTestBase):
# TODO(yuefengz): decouple the num_gpus check from distribution in
# combinations module so that we can pass in devices instead of a distribution
# strategy.
reduction_to_one_combinations = combinations.combine(
cross_tower_ops=[
combinations.NamedObject(
"DefaultReductionToOneDeviceCrossTowerOps",
cross_tower_ops_lib.ReductionToOneDeviceCrossTowerOps()),
combinations.NamedObject(
"ReductionToCPUDeviceCrossTowerOps",
cross_tower_ops_lib.ReductionToOneDeviceCrossTowerOps(
reduce_to_device=_cpu_device)),
combinations.NamedObject(
"AccumulateNCrossTowerOp",
cross_tower_ops_lib.ReductionToOneDeviceCrossTowerOps(
accumulation_fn=math_ops.accumulate_n)),
],
distribution=[
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus
],
mode=["graph", "eager"])
allreduce_combinations = combinations.combine(
cross_tower_ops=[
combinations.NamedObject(
"AllReduce",
cross_tower_ops_lib.AllReduceCrossTowerOps("nccl", 1, 0, 0)),
combinations.NamedObject(
"HierarchicalCopy",
cross_tower_ops_lib.AllReduceCrossTowerOps(
"hierarchical_copy", 8, 0, 0)),
combinations.NamedObject(
"AllReduceNoGradientRepacking",
cross_tower_ops_lib.AllReduceCrossTowerOps("nccl", 0, 0, 0)),
combinations.NamedObject(
"HierarchicalCopyAggregateSmallTensors",
cross_tower_ops_lib.AllReduceCrossTowerOps(
"hierarchical_copy", 0, 100, 10))
],
distribution=[combinations.mirrored_strategy_with_two_gpus],
mode=["graph", "eager"])
@combinations.generate(reduction_to_one_combinations + allreduce_combinations)
def testReductionAndBroadcast(self, cross_tower_ops, distribution):
with distribution.scope():
self._testReductionAndBroadcast(cross_tower_ops, distribution)
def testChooseAlgorithm(self):
device_links = [[1, 2, 3, 4], [0, 2, 3, 5], [0, 1, 3, 6], [0, 1, 2, 7],
[0, 5, 6, 7], [1, 4, 6, 7], [2, 4, 5, 7], [3, 4, 5, 6]]
result = cross_tower_ops_lib._choose_all_reduce_algorithm(device_links)
self.assertIsInstance(result, cross_tower_ops_lib.AllReduceCrossTowerOps)
self.assertEqual(result._all_reduce_alg, "hierarchical_copy")
self.assertEqual(result._num_packs, 8)
# if there are only 4 devices
device_links = [[1, 2, 3, 4], [0, 2, 3, 5], [0, 1, 3, 6], [0, 1, 2, 7]]
result = cross_tower_ops_lib._choose_all_reduce_algorithm(device_links)
self.assertIsInstance(result, cross_tower_ops_lib.AllReduceCrossTowerOps)
self.assertEqual(result._all_reduce_alg, "nccl")
self.assertEqual(result._num_packs, 1)
# if devices links contain each device itself
device_links = [[0, 1, 2, 3, 4], [0, 1, 2, 3, 5], [0, 1, 2, 3, 6],
[0, 1, 2, 3, 7], [0, 4, 5, 6, 7], [1, 4, 5, 6, 7],
[2, 4, 5, 6, 7], [3, 4, 5, 6, 7]]
result = cross_tower_ops_lib._choose_all_reduce_algorithm(device_links)
self.assertIsInstance(result, cross_tower_ops_lib.AllReduceCrossTowerOps)
self.assertEqual(result._all_reduce_alg, "hierarchical_copy")
self.assertEqual(result._num_packs, 8)
# if not dgx1-like links
device_links = [[0, 2, 3, 5], [0, 1, 3, 6], [0, 1, 2, 7], [0, 5, 6, 7],
[1, 4, 6, 7], [2, 4, 5, 7], [3, 4, 5, 6], [1, 2, 3, 4]]
result = cross_tower_ops_lib._choose_all_reduce_algorithm(device_links)
self.assertIsInstance(result, cross_tower_ops_lib.AllReduceCrossTowerOps)
self.assertEqual(result._all_reduce_alg, "nccl")
self.assertEqual(result._num_packs, 1)
@combinations.generate(combinations.combine(
mode=["graph", "eager"],
required_gpus=1))
def testSimpleReduceWithIndexedSlices(self):
devices = ["/cpu:0", "/gpu:0"]
t0 = _make_indexed_slices([[1., 2.]], [1], [5, 2], devices[0])
t1 = _make_indexed_slices([[3., 4.], [5., 6.]], [1, 3], [5, 2], devices[1])
per_device = value_lib.PerDevice({devices[0]: t0, devices[1]: t1})
result = cross_tower_ops_lib._simple_reduce(per_device, devices[0],
math_ops.add_n, "sum")
# Test that the result is semantically equal to both the concatenated
# IndexedSlices with and without duplicate indices.
total_with_dups = _make_indexed_slices(
[[1., 2.], [3., 4.], [5., 6.]], [1, 1, 3], [5, 2], devices[0])
total_without_dups = _make_indexed_slices(
[[4., 6.], [5., 6.]], [1, 3], [5, 2], devices[0])
self._assert_indexed_slices_equal(total_with_dups, result)
self._assert_indexed_slices_equal(total_without_dups, result)
@combinations.generate(combinations.combine(
cross_tower_ops_instance=[
combinations.NamedObject(
"ReductionToOneDeviceCrossTowerOps",
cross_tower_ops_lib.ReductionToOneDeviceCrossTowerOps()),
combinations.NamedObject(
"AllReduceCrossTowerOps",
cross_tower_ops_lib.AllReduceCrossTowerOps())
],
method_string=["sum", "mean"],
batch_reduce=[True, False],
mode=["graph", "eager"],
required_gpus=1))
def testIndexedSlicesAllReduce(self, cross_tower_ops_instance,
method_string, batch_reduce):
devices = ["/cpu:0", "/gpu:0"]
dense_shape = [5, 2]
t0 = _make_indexed_slices([[1., 2.]], [1], dense_shape, devices[0])
t1 = _make_indexed_slices(
[[3., 4.], [5., 6.]], [1, 3], dense_shape, devices[1])
per_device = value_lib.PerDevice({devices[0]: t0, devices[1]: t1})
if batch_reduce:
result = cross_tower_ops_instance.batch_reduce(method_string,
[(per_device, devices)])
else:
result = cross_tower_ops_instance.reduce(method_string, per_device,
devices)
total_indices_with_dups = [1, 1, 3]
total_indices_without_dups = [1, 3]
if method_string == "sum":
total_values_with_dups = [[1., 2.], [3., 4.], [5., 6.]]
total_values_without_dups = [[4., 6.], [5., 6.]]
else:
assert method_string == "mean"
total_values_with_dups = [[0.5, 1.], [1.5, 2.], [2.5, 3.]]
total_values_without_dups = [[2., 3.], [2.5, 3.]]
total_mirrored_with_dups = _make_mirrored_indexed_slices(
devices, total_values_with_dups, total_indices_with_dups, dense_shape)
total_mirrored_without_dups = _make_mirrored_indexed_slices(
devices, total_values_without_dups, total_indices_without_dups,
dense_shape)
# Test that the result is semantically equal to both the concatenated
# IndexedSlices, as well as when the duplicate indices are summed up.
if batch_reduce:
total_mirrored_with_dups = [total_mirrored_with_dups]
total_mirrored_without_dups = [total_mirrored_without_dups]
self._assert_values_equal(total_mirrored_with_dups, result)
self._assert_values_equal(total_mirrored_without_dups, result)
class MultiWorkerCrossTowerOpsTest(multi_worker_test_base.MultiWorkerTestBase,
CrossTowerOpsTestBase):
worker_devices = [
"/job:worker/replica:0/task:0", "/job:worker/replica:0/task:1"
]
multi_worker_allreduce_combinations = combinations.combine(
cross_tower_ops=[
combinations.NamedObject(
"MultiWorkerAllReduce",
cross_tower_ops_lib.MultiWorkerAllReduce(
worker_devices, 2, ("pscpu/pscpu", 2, -1), 0, 0, 0)),
combinations.NamedObject(
"MultiWorkerAllReducePack",
cross_tower_ops_lib.MultiWorkerAllReduce(
worker_devices, 2, ("pscpu/pscpu", 2, -1), 1, 0, 0)),
combinations.NamedObject(
"MultiWorkerAllReduceAggregation",
cross_tower_ops_lib.MultiWorkerAllReduce(
worker_devices, 2, ("pscpu/pscpu", 2, -1), 0, 100, 10)),
combinations.NamedObject(
"MultiWorkerAllReduceMultipleSpecs",
cross_tower_ops_lib.MultiWorkerAllReduce(
worker_devices, 2, [("pscpu/pscpu", 2, 100),
("xring", 2, -1)], 0, 0, 0)),
],
distribution=[
combinations.multi_worker_strategy_with_cpu,
combinations.multi_worker_strategy_with_one_gpu,
combinations.multi_worker_strategy_with_two_gpus
],
mode=["graph"])
@combinations.generate(multi_worker_allreduce_combinations)
def testReductionAndBroadcast(self, cross_tower_ops, distribution):
with distribution.scope():
self._testReductionAndBroadcast(cross_tower_ops, distribution)
if __name__ == "__main__":
test.main()
| |
from __future__ import print_function
import types
import warnings
import sys
import traceback
import inspect
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.lda import LDA
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.utils.validation import DataConversionWarning
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
def _yield_classifier_checks(name, Classifier):
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_classifiers_pickle
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
# test if classifiers can cope with y.shape = (n_samples, 1)
yield check_classifiers_input_shapes
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_regressors_pickle
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
# Test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
yield check_transformer_pickle
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
def _yield_all_checks(name, Estimator):
#yield check_parameters_default_constructible, name, Estimator
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
def check_estimator(Estimator):
"""Check if estimator adheres to sklearn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check.
"""
name = Estimator.__class__.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
check(name, Estimator)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_fast_parameters(estimator):
# speed up some estimators
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
# NMF
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_fast_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, 'predict_proba'):
estimator.predict_proba(X)
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_fast_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = inspect.getargspec(func).args
assert_true(args[2] in ["y", "Y"])
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_fast_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = "0 feature(s) (shape=(3, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def check_transformer_pickle(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
n_samples, n_features = X.shape
X = StandardScaler().fit_transform(X)
X -= X.min()
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
if not hasattr(transformer, 'transform'):
return
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
random_state = np.random.RandomState(seed=12345)
y_ = np.vstack([y, 2 * y + random_state.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit(X, y_).transform(X)
pickled_transformer = pickle.dumps(transformer)
unpickled_transformer = pickle.loads(pickled_transformer)
pickled_X_pred = unpickled_transformer.transform(X)
assert_array_almost_equal(pickled_X_pred, X_pred)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_fast_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
def check_classifiers_input_shapes(name, Classifier):
iris = load_iris()
X, y = iris.data, iris.target
X, y = shuffle(X, y, random_state=1)
X = StandardScaler().fit_transform(X)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y)
y_pred = classifier.predict(X)
set_random_state(classifier)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
classifier.fit(X, y[:, np.newaxis])
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
assert_equal(len(w), 1, msg)
assert_array_equal(y_pred, classifier.predict(X))
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_fast_parameters(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_classifiers_pickle(name, Classifier):
X, y = make_blobs(random_state=0)
X, y = shuffle(X, y, random_state=7)
X -= X.min()
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
y_pred = classifier.predict(X)
pickled_classifier = pickle.dumps(classifier)
unpickled_classifier = pickle.loads(pickled_classifier)
pickled_y_pred = unpickled_classifier.predict(X)
assert_array_almost_equal(pickled_y_pred, y_pred)
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_fast_parameters(regressor_1)
set_fast_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y) # X is already scaled
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
print(regressor)
assert_greater(regressor.score(X, y_), 0.5)
def check_regressors_pickle(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y) # X is already scaled
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
regressor.fit(X, y_)
y_pred = regressor.predict(X)
# store old predictions
pickled_regressor = pickle.dumps(regressor)
unpickled_regressor = pickle.loads(pickled_regressor)
pickled_y_pred = unpickled_regressor.predict(X)
assert_array_almost_equal(pickled_y_pred, y_pred)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_fast_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the orginal estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_fast_parameters(estimator_1)
set_fast_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LDA()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
args, varargs, kws, defaults = inspect.getargspec(init)
except TypeError:
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they need a non-default argument
args = args[2:]
else:
args = args[1:]
if args:
# non-empty list
assert_equal(len(args), len(defaults))
else:
return
for arg, default in zip(args, defaults):
assert_in(type(default), [str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if arg not in params.keys():
# deprecated parameter, not in get_params
assert_true(default is None)
continue
if isinstance(params[arg], np.ndarray):
assert_array_equal(params[arg], default)
else:
assert_equal(params[arg], default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
def check_get_params_invariance(name, estimator):
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV' 'RandomizedSearchCV'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Classes and utilites to keep track of files associated to an analysis.
The main class is `FileArchive`, which keep track of all the files associated to an analysis.
The `FileHandle` helper class encapsulates information on a particular file.
"""
from __future__ import absolute_import, division, print_function
import os
import sys
import time
import tempfile
from shutil import copyfile
from collections import OrderedDict
import numpy as np
# from numpy.core import defchararray
from astropy.table import Table, Column
from fermipy.fits_utils import write_tables_to_fits
def get_timestamp():
"""Get the current time as an integer"""
return int(time.time())
def get_unique_match(table, colname, value):
"""Get the row matching value for a particular column.
If exactly one row matchs, return index of that row,
Otherwise raise KeyError.
"""
# FIXME, This is here for python 3.5, where astropy is now returning bytes
# instead of str
if table[colname].dtype.kind in ['S', 'U']:
mask = table[colname].astype(str) == value
else:
mask = table[colname] == value
if mask.sum() != 1:
raise KeyError("%i rows in column %s match value %s" %
(mask.sum(), colname, value))
return np.argmax(mask)
# @unique
# class FileStatus(Enum):
class FileStatus(object):
"""Enumeration of file status types"""
no_file = 0 # File is not in system
expected = 1 # File will be created by a scheduled job
exists = 2 # File exists
missing = 3 # File should exist, but does not
superseded = 4 # File exists, but has been superseded
temp_removed = 5 # File was temporary and has been removed
# class FileFlags(Enum):
class FileFlags(object):
"""Bit masks to indicate file types"""
no_flags = 0 # No flags are set for this file
input_mask = 1 # File is input to job
output_mask = 2 # File is output from job
rm_mask = 4 # File is removed by job
gz_mask = 8 # File is compressed by job
internal_mask = 16 # File is internal to job
stageable = 32 # File can be staged to / from scratch area
in_ch_mask = input_mask | output_mask | rm_mask | internal_mask
out_ch_mask = output_mask | rm_mask | internal_mask
in_stage_mask = input_mask | stageable
out_stage_mask = output_mask | stageable
rmint_mask = rm_mask | internal_mask
class FileDict(object):
"""Small class to keep track of files used & createed by a link.
Parameters
----------
file_args : dict
Dictionary mapping argument to `FileFlags` enum
file_dict : dict
Dictionary mapping file path to `FileFlags` enum
"""
def __init__(self, **kwargs):
"""C'tor"""
self.file_args = kwargs.get('file_args', {})
self.file_dict = {}
def latch_file_info(self, args):
"""Extract the file paths from a set of arguments
"""
self.file_dict.clear()
for key, val in self.file_args.items():
try:
file_path = args[key]
if file_path is None:
continue
# 'args' is special
if key[0:4] == 'args':
if isinstance(file_path, list):
tokens = file_path
elif isinstance(file_path, str):
tokens = file_path.split()
else:
raise TypeError(
"Args has type %s, expect list or str" % type(file_path))
for token in tokens:
self.file_dict[token.replace('.gz', '')] = val
else:
self.file_dict[file_path.replace('.gz', '')] = val
except KeyError:
pass
def update(self, file_dict):
"""Update self with values from a dictionary
mapping file path [str] to `FileFlags` enum """
for key, val in file_dict.items():
if key in self.file_dict:
self.file_dict[key] |= val
else:
self.file_dict[key] = val
def items(self):
"""Return iterator over self.file_dict"""
return self.file_dict.items()
@property
def input_files(self):
"""Return a list of the input files needed by this link.
For `Link` sub-classes this will return the union
of all the input files of each internal `Link`.
That is to say this will include files produced by one
`Link` in a `Chain` and used as input to another `Link` in the `Chain`
"""
ret_list = []
for key, val in self.file_dict.items():
# For input files we only want files that were marked as input
if val & FileFlags.input_mask:
ret_list.append(key)
return ret_list
@property
def output_files(self):
"""Return a list of the output files produced by this link.
For `Link` sub-classes this will return the union
of all the output files of each internal `Link`.
That is to say this will include files produced by one
`Link` in a `Chain` and used as input to another `Link` in the `Chain`
"""
ret_list = []
for key, val in self.file_dict.items():
# For output files we only want files that were marked as output
if val & FileFlags.output_mask:
ret_list.append(key)
return ret_list
@property
def chain_input_files(self):
"""Return a list of the input files needed by this chain.
For `Link` sub-classes this will return only those files
that were not created by any internal `Link`
"""
ret_list = []
for key, val in self.file_dict.items():
# For chain input files we only want files that were not marked as output
# (I.e., not produced by some other step in the chain)
if val & FileFlags.in_ch_mask == FileFlags.input_mask:
ret_list.append(key)
return ret_list
@property
def chain_output_files(self):
"""Return a list of the all the output files produced by this link.
For `Link` sub-classes this will return only those files
that were not marked as internal files or marked for removal.
"""
ret_list = []
for key, val in self.file_dict.items():
# For pure input files we only want output files that were not
# marked as internal or temp
if val & FileFlags.out_ch_mask == FileFlags.output_mask:
ret_list.append(key)
return ret_list
@property
def input_files_to_stage(self):
"""Return a list of the input files needed by this link.
For `Link` sub-classes this will return the union
of all the input files of each internal `Link`.
That is to say this will include files produced by one
`Link` in a `Chain` and used as input to another `Link` in the `Chain`
"""
ret_list = []
for key, val in self.file_dict.items():
# For input files we only want files that were marked as input
if val & FileFlags.in_stage_mask == FileFlags.in_stage_mask:
ret_list.append(key)
return ret_list
@property
def output_files_to_stage(self):
"""Return a list of the input files needed by this link.
For `Link` sub-classes this will return the union
of all the input files of each internal `Link`.
That is to say this will include files produced by one
`Link` in a `Chain` and used as input to another `Link` in the `Chain`
"""
ret_list = []
for key, val in self.file_dict.items():
# For input files we only want files that were marked as input
if val & FileFlags.out_stage_mask == FileFlags.out_stage_mask:
ret_list.append(key)
return ret_list
@property
def internal_files(self):
"""Return a list of the intermediate files produced by this link.
This returns all files that were explicitly marked as internal files.
"""
ret_list = []
for key, val in self.file_dict.items():
# For internal files we only want files that were marked as
# internal
if val & FileFlags.internal_mask:
ret_list.append(key)
return ret_list
@property
def temp_files(self):
"""Return a list of the temporary files produced by this link.
This returns all files that were explicitly marked for removal.
"""
ret_list = []
for key, val in self.file_dict.items():
# For temp files we only want files that were marked for removal
if val & FileFlags.rm_mask:
ret_list.append(key)
return ret_list
@property
def gzip_files(self):
"""Return a list of the files compressed by this link.
This returns all files that were explicitly marked for compression.
"""
ret_list = []
for key, val in self.file_dict.items():
# For temp files we only want files that were marked for removal
if val & FileFlags.gz_mask:
ret_list.append(key)
return ret_list
def print_summary(self, stream=sys.stdout, indent=""):
"""Print a summary of the files in this file dict.
This version explictly counts the union of all input and output files.
"""
stream.write("%sTotal files : %i\n" %
(indent, len(self.file_dict)))
stream.write("%s Input files : %i\n" %
(indent, len(self.input_files)))
stream.write("%s Output files : %i\n" %
(indent, len(self.output_files)))
stream.write("%s Internal files : %i\n" %
(indent, len(self.internal_files)))
stream.write("%s Temp files : %i\n" %
(indent, len(self.temp_files)))
def print_chain_summary(self, stream=sys.stdout, indent=""):
"""Print a summary of the files in this file dict.
This version uses chain_input_files and chain_output_files to
count the input and output files.
"""
stream.write("%sTotal files : %i\n" %
(indent, len(self.file_dict)))
stream.write("%s Input files : %i\n" %
(indent, len(self.chain_input_files)))
stream.write("%s Output files : %i\n" %
(indent, len(self.chain_output_files)))
stream.write("%s Internal files : %i\n" %
(indent, len(self.internal_files)))
stream.write("%s Temp files : %i\n" %
(indent, len(self.temp_files)))
class FileStageManager(object):
"""Small class to deal with staging files to and from a scratch area """
def __init__(self, scratchdir, workdir):
"""C'tor """
try:
self.scratchdir = tempfile.mkdtemp(prefix=os.environ['USER'] + '.',
dir=scratchdir)
except OSError:
self.scratchdir = os.path.join(
scratchdir, os.environ['USER'], 'dummy')
self.workdir = os.path.abspath(workdir)
def split_local_path(self, local_file):
"""Split the local path into a directory name and a file name
If local_file is in self.workdir or a subdirectory of it,
the directory will consist of the relative path from workdir.
If local_file is not in self.workdir, directory will be empty.
Returns (dirname, basename)
"""
abspath = os.path.abspath(local_file)
if abspath.find(self.workdir) >= 0:
relpath = abspath.replace(self.workdir, '')[1:]
basename = os.path.basename(relpath)
dirname = os.path.dirname(relpath)
else:
basename = os.path.basename(local_file)
dirname = ''
return (dirname, basename)
def construct_scratch_path(self, dirname, basename):
"""Construct and return a path in the scratch area.
This will be <self.scratchdir>/<dirname>/<basename>
"""
return os.path.join(self.scratchdir, dirname, basename)
def get_scratch_path(self, local_file):
"""Construct and return a path in the scratch area from a local file.
"""
(local_dirname, local_basename) = self.split_local_path(local_file)
return self.construct_scratch_path(local_dirname, local_basename)
def map_files(self, local_files):
"""Build a dictionary mapping local paths to scratch paths.
Parameters
----------
local_files : list
List of filenames to be mapped to scratch area
Returns dict
Mapping local_file : fullpath of scratch file
"""
ret_dict = {}
for local_file in local_files:
ret_dict[local_file] = self.get_scratch_path(local_file)
return ret_dict
@staticmethod
def make_scratch_dirs(file_mapping, dry_run=True):
"""Make any directories need in the scratch area"""
scratch_dirs = {}
for value in file_mapping.values():
scratch_dirname = os.path.dirname(value)
scratch_dirs[scratch_dirname] = True
for scratch_dirname in scratch_dirs:
if dry_run:
print("mkdir -f %s" % (scratch_dirname))
else:
try:
os.makedirs(scratch_dirname)
except OSError:
pass
@staticmethod
def copy_to_scratch(file_mapping, dry_run=True):
"""Copy input files to scratch area """
for key, value in file_mapping.items():
if not os.path.exists(key):
continue
if dry_run:
print ("copy %s %s" % (key, value))
else:
print ("copy %s %s" % (key, value))
copyfile(key, value)
return file_mapping
@staticmethod
def copy_from_scratch(file_mapping, dry_run=True):
"""Copy output files from scratch area """
for key, value in file_mapping.items():
if dry_run:
print ("copy %s %s" % (value, key))
else:
try:
outdir = os.path.dirname(key)
os.makedirs(outdir)
except OSError:
pass
print ("copy %s %s" % (value, key))
copyfile(value, key)
return file_mapping
class FileHandle(object):
"""Class to keep track of infomration about a file file.
Parameters
----------
key : int
Unique id for this particular file
creator : int
Unique id for the job that created this file
timestamp : int
File creation time cast as an int
status : `FileStatus`
Enum giving current status of file
flags : `FileFlags`
Mask giving flags set on this file
path : str
Path to file
"""
def __init__(self, **kwargs):
"""C'tor
Take values of class members from keyword arguments.
"""
self.key = kwargs.get('key', -1)
self.creator = kwargs.get('creator', -1)
self.timestamp = kwargs.get('timestamp', 0)
self.status = kwargs.get('status', FileStatus.no_file)
self.flags = kwargs.get('flags', FileFlags.no_flags)
self.path = kwargs['path']
if self.path[0] == '@':
self.path = self.path[1:]
@staticmethod
def make_table(file_dict):
"""Build and return an `astropy.table.Table` to store `FileHandle`"""
col_key = Column(name='key', dtype=int)
col_path = Column(name='path', dtype='S256')
col_creator = Column(name='creator', dtype=int)
col_timestamp = Column(name='timestamp', dtype=int)
col_status = Column(name='status', dtype=int)
col_flags = Column(name='flags', dtype=int)
columns = [col_key, col_path, col_creator,
col_timestamp, col_status, col_flags]
table = Table(data=columns)
for val in file_dict.values():
val.append_to_table(table)
return table
@classmethod
def make_dict(cls, table):
"""Build and return a dict of `FileHandle` from an `astropy.table.Table`
The dictionary is keyed by FileHandle.key, which is a unique integer for each file
"""
ret_dict = {}
for row in table:
file_handle = cls.create_from_row(row)
ret_dict[file_handle.key] = file_handle
return ret_dict
@classmethod
def create_from_row(cls, table_row):
"""Build and return a `FileHandle` from an `astropy.table.row.Row` """
kwargs = {}
for key in table_row.colnames:
kwargs[key] = table_row[key]
try:
return cls(**kwargs)
except KeyError:
print(kwargs)
def check_status(self, basepath=None):
"""Check on the status of this particular file"""
if basepath is None:
fullpath = self.path
else:
fullpath = os.path.join(basepath, self.path)
exists = os.path.exists(fullpath)
if not exists:
if self.flags & FileFlags.gz_mask != 0:
fullpath += '.gz'
exists = os.path.exists(fullpath)
if exists:
if self.status == FileStatus.superseded:
pass
else:
self.status = FileStatus.exists
else:
if self.status in [FileStatus.no_file,
FileStatus.expected,
FileStatus.missing,
FileStatus.temp_removed]:
if self.flags & FileFlags.rmint_mask != 0:
self.status = FileStatus.temp_removed
elif self.status == FileStatus.exists:
self.status = FileStatus.missing
elif self.status == FileStatus.exists:
self.status = FileStatus.temp_removed
return self.status
def append_to_table(self, table):
"""Add this instance as a row on a `astropy.table.Table` """
table.add_row(dict(path=self.path,
key=self.key,
creator=self.creator,
timestamp=self.timestamp,
status=self.status,
flags=self.flags))
def update_table_row(self, table, row_idx):
"""Update the values in an `astropy.table.Table` for this instances"""
table[row_idx]['path'] = self.path
table[row_idx]['key'] = self.key
table[row_idx]['creator'] = self.creator
table[row_idx]['timestamp'] = self.timestamp
table[row_idx]['status'] = self.status
table[row_idx]['flags'] = self.flags
class FileArchive(object):
"""Class that keeps track of the status of files used in an analysis
Parameters
----------
table_file : str
Path to the file used to persist this `FileArchive`
table : `astropy.table.Table`
Persistent representation of this `FileArchive`
cache : `OrderedDict`
Transient representation of this `FileArchive`
base_path : str
Base file path for all files in this `FileArchive`
"""
# Singleton instance
_archive = None
def __init__(self, **kwargs):
"""C'tor
Takes self.base_path from kwargs['base_path']
Reads kwargs['file_archive_table']
"""
self._table_file = None
self._table = None
self._cache = OrderedDict()
self._base_path = kwargs['base_path']
self._read_table_file(kwargs['file_archive_table'])
def __getitem__(self, key):
""" Return the `FileHandle` whose linkname is key"""
return self._cache[key]
@property
def table_file(self):
"""Return the path to the file used to persist this `FileArchive` """
return self._table_file
@property
def table(self):
"""Return the persistent representation of this `FileArchive` """
return self._table
@property
def cache(self):
"""Return the transiet representation of this `FileArchive` """
return self._cache
@property
def base_path(self):
"""Return the base file path for all files in this `FileArchive` """
return self._base_path
def _get_fullpath(self, filepath):
"""Return filepath with the base_path prefixed """
if filepath[0] == '/':
return filepath
return os.path.join(self._base_path, filepath)
def _get_localpath(self, filepath):
"""Return the filepath with the base_path removed """
return filepath.replace(self._base_path, '')
def _fill_cache(self):
"""Fill the cache from the `astropy.table.Table`"""
for irow in range(len(self._table)):
file_handle = self._make_file_handle(irow)
self._cache[file_handle.path] = file_handle
def _read_table_file(self, table_file):
"""Read an `astropy.table.Table` to set up the archive"""
self._table_file = table_file
if os.path.exists(self._table_file):
self._table = Table.read(self._table_file)
else:
self._table = FileHandle.make_table({})
self._fill_cache()
def _make_file_handle(self, row_idx):
"""Build and return a `FileHandle` object from an `astropy.table.row.Row` """
row = self._table[row_idx]
return FileHandle.create_from_row(row)
def get_handle(self, filepath):
"""Get the `FileHandle` object associated to a particular file """
localpath = self._get_localpath(filepath)
return self._cache[localpath]
def register_file(self, filepath, creator, status=FileStatus.no_file, flags=FileFlags.no_flags):
"""Register a file in the archive.
If the file already exists, this raises a `KeyError`
Parameters
----------
filepath : str
The path to the file
creatror : int
A unique key for the job that created this file
status : `FileStatus`
Enumeration giving current status of file
flags : `FileFlags`
Enumeration giving flags set on this file
Returns `FileHandle`
"""
# check to see if the file already exists
try:
file_handle = self.get_handle(filepath)
raise KeyError("File %s already exists in archive" % filepath)
except KeyError:
pass
localpath = self._get_localpath(filepath)
if status == FileStatus.exists:
# Make sure the file really exists
fullpath = self._get_fullpath(filepath)
if not os.path.exists(fullpath):
print("register_file called on called on mising file %s" % fullpath)
status = FileStatus.missing
timestamp = 0
else:
timestamp = int(os.stat(fullpath).st_mtime)
else:
timestamp = 0
key = len(self._table) + 1
file_handle = FileHandle(path=localpath,
key=key,
creator=creator,
timestamp=timestamp,
status=status,
flags=flags)
file_handle.append_to_table(self._table)
self._cache[localpath] = file_handle
return file_handle
def update_file(self, filepath, creator, status):
"""Update a file in the archive
If the file does not exists, this raises a `KeyError`
Parameters
----------
filepath : str
The path to the file
creatror : int
A unique key for the job that created this file
status : `FileStatus`
Enumeration giving current status of file
Returns `FileHandle`
"""
file_handle = self.get_handle(filepath)
if status in [FileStatus.exists, FileStatus.superseded]:
# Make sure the file really exists
fullpath = file_handle.fullpath
if not os.path.exists(fullpath):
raise ValueError("File %s does not exist" % fullpath)
timestamp = int(os.stat(fullpath).st_mtime)
else:
timestamp = 0
file_handle.creator = creator
file_handle.timestamp = timestamp
file_handle.status = status
file_handle.update_table_row(self._table, file_handle.key - 1)
return file_handle
def get_file_ids(self, file_list, creator=None,
status=FileStatus.no_file, file_dict=None):
"""Get or create a list of file ids based on file names
Parameters
----------
file_list : list
The paths to the file
creatror : int
A unique key for the job that created these files
status : `FileStatus`
Enumeration giving current status of files
file_dict : `FileDict`
Mask giving flags set on this file
Returns list of integers
"""
ret_list = []
for fname in file_list:
if file_dict is None:
flags = FileFlags.no_flags
else:
flags = file_dict.file_dict[fname]
try:
fhandle = self.get_handle(fname)
except KeyError:
if creator is None:
creator = -1
# raise KeyError("Can not register a file %s without a creator"%fname)
fhandle = self.register_file(fname, creator, status, flags)
ret_list.append(fhandle.key)
return ret_list
def get_file_paths(self, id_list):
"""Get a list of file paths based of a set of ids
Parameters
----------
id_list : list
List of integer file keys
Returns list of file paths
"""
if id_list is None:
return []
try:
path_array = self._table[id_list - 1]['path']
except IndexError:
print("IndexError ", len(self._table), id_list)
path_array = []
return [path for path in path_array]
def write_table_file(self, table_file=None):
"""Write the table to self._table_file"""
if self._table is None:
raise RuntimeError("No table to write")
if table_file is not None:
self._table_file = table_file
if self._table_file is None:
raise RuntimeError("No output file specified for table")
write_tables_to_fits(self._table_file, [self._table], clobber=True,
namelist=['FILE_ARCHIVE'])
def update_file_status(self):
"""Update the status of all the files in the archive"""
nfiles = len(self.cache.keys())
status_vect = np.zeros((6), int)
sys.stdout.write("Updating status of %i files: " % nfiles)
sys.stdout.flush()
for i, key in enumerate(self.cache.keys()):
if i % 200 == 0:
sys.stdout.write('.')
sys.stdout.flush()
fhandle = self.cache[key]
fhandle.check_status(self._base_path)
fhandle.update_table_row(self._table, fhandle.key - 1)
status_vect[fhandle.status] += 1
sys.stdout.write("!\n")
sys.stdout.flush()
sys.stdout.write("Summary:\n")
sys.stdout.write(" no_file: %i\n" % status_vect[0])
sys.stdout.write(" expected: %i\n" % status_vect[1])
sys.stdout.write(" exists: %i\n" % status_vect[2])
sys.stdout.write(" missing: %i\n" % status_vect[3])
sys.stdout.write(" superseded: %i\n" % status_vect[4])
sys.stdout.write(" temp_removed: %i\n" % status_vect[5])
@classmethod
def get_archive(cls):
"""Return the singleton `FileArchive` instance """
return cls._archive
@classmethod
def build_archive(cls, **kwargs):
"""Return the singleton `FileArchive` instance, building it if needed"""
if cls._archive is None:
cls._archive = cls(**kwargs)
return cls._archive
def main_browse():
"""Entry point for command line use for browsing a FileArchive """
import argparse
parser = argparse.ArgumentParser(usage="file_archive.py [options]",
description="Browse a job archive")
parser.add_argument('--files', action='store', dest='file_archive_table',
type=str, default='file_archive_temp.fits', help="File archive file")
parser.add_argument('--base', action='store', dest='base_path',
type=str, default=os.path.abspath('.'), help="File archive base path")
args = parser.parse_args(sys.argv[1:])
FileArchive.build_archive(**args.__dict__)
if __name__ == '__main__':
main_browse()
| |
"""
Find intermediate evalutation results in assert statements through builtin AST.
This should replace oldinterpret.py eventually.
"""
import sys
import ast
import py
from _pytest.assertion import util
from _pytest.assertion.reinterpret import BuiltinAssertionError
if sys.platform.startswith("java"):
# See http://bugs.jython.org/issue1497
_exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict",
"ListComp", "GeneratorExp", "Yield", "Compare", "Call",
"Repr", "Num", "Str", "Attribute", "Subscript", "Name",
"List", "Tuple")
_stmts = ("FunctionDef", "ClassDef", "Return", "Delete", "Assign",
"AugAssign", "Print", "For", "While", "If", "With", "Raise",
"TryExcept", "TryFinally", "Assert", "Import", "ImportFrom",
"Exec", "Global", "Expr", "Pass", "Break", "Continue")
_expr_nodes = set(getattr(ast, name) for name in _exprs)
_stmt_nodes = set(getattr(ast, name) for name in _stmts)
def _is_ast_expr(node):
return node.__class__ in _expr_nodes
def _is_ast_stmt(node):
return node.__class__ in _stmt_nodes
else:
def _is_ast_expr(node):
return isinstance(node, ast.expr)
def _is_ast_stmt(node):
return isinstance(node, ast.stmt)
try:
_Starred = ast.Starred
except AttributeError:
# Python 2. Define a dummy class so isinstance() will always be False.
class _Starred(object): pass
class Failure(Exception):
"""Error found while interpreting AST."""
def __init__(self, explanation=""):
self.cause = sys.exc_info()
self.explanation = explanation
def interpret(source, frame, should_fail=False):
mod = ast.parse(source)
visitor = DebugInterpreter(frame)
try:
visitor.visit(mod)
except Failure:
failure = sys.exc_info()[1]
return getfailure(failure)
if should_fail:
return ("(assertion failed, but when it was re-run for "
"printing intermediate values, it did not fail. Suggestions: "
"compute assert expression before the assert or use --assert=plain)")
def run(offending_line, frame=None):
if frame is None:
frame = py.code.Frame(sys._getframe(1))
return interpret(offending_line, frame)
def getfailure(e):
explanation = util.format_explanation(e.explanation)
value = e.cause[1]
if str(value):
lines = explanation.split('\n')
lines[0] += " << %s" % (value,)
explanation = '\n'.join(lines)
text = "%s: %s" % (e.cause[0].__name__, explanation)
if text.startswith('AssertionError: assert '):
text = text[16:]
return text
operator_map = {
ast.BitOr : "|",
ast.BitXor : "^",
ast.BitAnd : "&",
ast.LShift : "<<",
ast.RShift : ">>",
ast.Add : "+",
ast.Sub : "-",
ast.Mult : "*",
ast.Div : "/",
ast.FloorDiv : "//",
ast.Mod : "%",
ast.Eq : "==",
ast.NotEq : "!=",
ast.Lt : "<",
ast.LtE : "<=",
ast.Gt : ">",
ast.GtE : ">=",
ast.Pow : "**",
ast.Is : "is",
ast.IsNot : "is not",
ast.In : "in",
ast.NotIn : "not in"
}
unary_map = {
ast.Not : "not %s",
ast.Invert : "~%s",
ast.USub : "-%s",
ast.UAdd : "+%s"
}
class DebugInterpreter(ast.NodeVisitor):
"""Interpret AST nodes to gleam useful debugging information. """
def __init__(self, frame):
self.frame = frame
def generic_visit(self, node):
# Fallback when we don't have a special implementation.
if _is_ast_expr(node):
mod = ast.Expression(node)
co = self._compile(mod)
try:
result = self.frame.eval(co)
except Exception:
raise Failure()
explanation = self.frame.repr(result)
return explanation, result
elif _is_ast_stmt(node):
mod = ast.Module([node])
co = self._compile(mod, "exec")
try:
self.frame.exec_(co)
except Exception:
raise Failure()
return None, None
else:
raise AssertionError("can't handle %s" %(node,))
def _compile(self, source, mode="eval"):
return compile(source, "<assertion interpretation>", mode)
def visit_Expr(self, expr):
return self.visit(expr.value)
def visit_Module(self, mod):
for stmt in mod.body:
self.visit(stmt)
def visit_Name(self, name):
explanation, result = self.generic_visit(name)
# See if the name is local.
source = "%r in locals() is not globals()" % (name.id,)
co = self._compile(source)
try:
local = self.frame.eval(co)
except Exception:
# have to assume it isn't
local = None
if local is None or not self.frame.is_true(local):
return name.id, result
return explanation, result
def visit_Compare(self, comp):
left = comp.left
left_explanation, left_result = self.visit(left)
for op, next_op in zip(comp.ops, comp.comparators):
next_explanation, next_result = self.visit(next_op)
op_symbol = operator_map[op.__class__]
explanation = "%s %s %s" % (left_explanation, op_symbol,
next_explanation)
source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,)
co = self._compile(source)
try:
result = self.frame.eval(co, __exprinfo_left=left_result,
__exprinfo_right=next_result)
except Exception:
raise Failure(explanation)
try:
if not self.frame.is_true(result):
break
except KeyboardInterrupt:
raise
except:
break
left_explanation, left_result = next_explanation, next_result
if util._reprcompare is not None:
res = util._reprcompare(op_symbol, left_result, next_result)
if res:
explanation = res
return explanation, result
def visit_BoolOp(self, boolop):
is_or = isinstance(boolop.op, ast.Or)
explanations = []
for operand in boolop.values:
explanation, result = self.visit(operand)
explanations.append(explanation)
if result == is_or:
break
name = is_or and " or " or " and "
explanation = "(" + name.join(explanations) + ")"
return explanation, result
def visit_UnaryOp(self, unary):
pattern = unary_map[unary.op.__class__]
operand_explanation, operand_result = self.visit(unary.operand)
explanation = pattern % (operand_explanation,)
co = self._compile(pattern % ("__exprinfo_expr",))
try:
result = self.frame.eval(co, __exprinfo_expr=operand_result)
except Exception:
raise Failure(explanation)
return explanation, result
def visit_BinOp(self, binop):
left_explanation, left_result = self.visit(binop.left)
right_explanation, right_result = self.visit(binop.right)
symbol = operator_map[binop.op.__class__]
explanation = "(%s %s %s)" % (left_explanation, symbol,
right_explanation)
source = "__exprinfo_left %s __exprinfo_right" % (symbol,)
co = self._compile(source)
try:
result = self.frame.eval(co, __exprinfo_left=left_result,
__exprinfo_right=right_result)
except Exception:
raise Failure(explanation)
return explanation, result
def visit_Call(self, call):
func_explanation, func = self.visit(call.func)
arg_explanations = []
ns = {"__exprinfo_func" : func}
arguments = []
for arg in call.args:
arg_explanation, arg_result = self.visit(arg)
if isinstance(arg, _Starred):
arg_name = "__exprinfo_star"
ns[arg_name] = arg_result
arguments.append("*%s" % (arg_name,))
arg_explanations.append("*%s" % (arg_explanation,))
else:
arg_name = "__exprinfo_%s" % (len(ns),)
ns[arg_name] = arg_result
arguments.append(arg_name)
arg_explanations.append(arg_explanation)
for keyword in call.keywords:
arg_explanation, arg_result = self.visit(keyword.value)
if keyword.arg:
arg_name = "__exprinfo_%s" % (len(ns),)
keyword_source = "%s=%%s" % (keyword.arg)
arguments.append(keyword_source % (arg_name,))
arg_explanations.append(keyword_source % (arg_explanation,))
else:
arg_name = "__exprinfo_kwds"
arguments.append("**%s" % (arg_name,))
arg_explanations.append("**%s" % (arg_explanation,))
ns[arg_name] = arg_result
if getattr(call, 'starargs', None):
arg_explanation, arg_result = self.visit(call.starargs)
arg_name = "__exprinfo_star"
ns[arg_name] = arg_result
arguments.append("*%s" % (arg_name,))
arg_explanations.append("*%s" % (arg_explanation,))
if getattr(call, 'kwargs', None):
arg_explanation, arg_result = self.visit(call.kwargs)
arg_name = "__exprinfo_kwds"
ns[arg_name] = arg_result
arguments.append("**%s" % (arg_name,))
arg_explanations.append("**%s" % (arg_explanation,))
args_explained = ", ".join(arg_explanations)
explanation = "%s(%s)" % (func_explanation, args_explained)
args = ", ".join(arguments)
source = "__exprinfo_func(%s)" % (args,)
co = self._compile(source)
try:
result = self.frame.eval(co, **ns)
except Exception:
raise Failure(explanation)
pattern = "%s\n{%s = %s\n}"
rep = self.frame.repr(result)
explanation = pattern % (rep, rep, explanation)
return explanation, result
def _is_builtin_name(self, name):
pattern = "%r not in globals() and %r not in locals()"
source = pattern % (name.id, name.id)
co = self._compile(source)
try:
return self.frame.eval(co)
except Exception:
return False
def visit_Attribute(self, attr):
if not isinstance(attr.ctx, ast.Load):
return self.generic_visit(attr)
source_explanation, source_result = self.visit(attr.value)
explanation = "%s.%s" % (source_explanation, attr.attr)
source = "__exprinfo_expr.%s" % (attr.attr,)
co = self._compile(source)
try:
try:
result = self.frame.eval(co, __exprinfo_expr=source_result)
except AttributeError:
# Maybe the attribute name needs to be mangled?
if not attr.attr.startswith("__") or attr.attr.endswith("__"):
raise
source = "getattr(__exprinfo_expr.__class__, '__name__', '')"
co = self._compile(source)
class_name = self.frame.eval(co, __exprinfo_expr=source_result)
mangled_attr = "_" + class_name + attr.attr
source = "__exprinfo_expr.%s" % (mangled_attr,)
co = self._compile(source)
result = self.frame.eval(co, __exprinfo_expr=source_result)
except Exception:
raise Failure(explanation)
explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result),
self.frame.repr(result),
source_explanation, attr.attr)
# Check if the attr is from an instance.
source = "%r in getattr(__exprinfo_expr, '__dict__', {})"
source = source % (attr.attr,)
co = self._compile(source)
try:
from_instance = self.frame.eval(co, __exprinfo_expr=source_result)
except Exception:
from_instance = None
if from_instance is None or self.frame.is_true(from_instance):
rep = self.frame.repr(result)
pattern = "%s\n{%s = %s\n}"
explanation = pattern % (rep, rep, explanation)
return explanation, result
def visit_Assert(self, assrt):
test_explanation, test_result = self.visit(assrt.test)
explanation = "assert %s" % (test_explanation,)
if not self.frame.is_true(test_result):
try:
raise BuiltinAssertionError
except Exception:
raise Failure(explanation)
return explanation, test_result
def visit_Assign(self, assign):
value_explanation, value_result = self.visit(assign.value)
explanation = "... = %s" % (value_explanation,)
name = ast.Name("__exprinfo_expr", ast.Load(),
lineno=assign.value.lineno,
col_offset=assign.value.col_offset)
new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno,
col_offset=assign.col_offset)
mod = ast.Module([new_assign])
co = self._compile(mod, "exec")
try:
self.frame.exec_(co, __exprinfo_expr=value_result)
except Exception:
raise Failure(explanation)
return explanation, value_result
| |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'ArpResolutionHistoryStatusEnum' : _MetaInfoEnum('ArpResolutionHistoryStatusEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper',
{
'status-none':'STATUS_NONE',
'status-resolution-request':'STATUS_RESOLUTION_REQUEST',
'status-resolved-reply':'STATUS_RESOLVED_REPLY',
'status-resolved-grat-arp':'STATUS_RESOLVED_GRAT_ARP',
'status-resolved-request':'STATUS_RESOLVED_REQUEST',
'status-resolved-lc-sync':'STATUS_RESOLVED_LC_SYNC',
'status-resolved-lc-sync-purge-delay':'STATUS_RESOLVED_LC_SYNC_PURGE_DELAY',
'status-resolved-client':'STATUS_RESOLVED_CLIENT',
'status-removed-client':'STATUS_REMOVED_CLIENT',
'status-already-resolved':'STATUS_ALREADY_RESOLVED',
'status-failed':'STATUS_FAILED',
'status-dropped-interface-down':'STATUS_DROPPED_INTERFACE_DOWN',
'status-dropped-broadcast-disabled':'STATUS_DROPPED_BROADCAST_DISABLED',
'status-dropped-interface-unavailable':'STATUS_DROPPED_INTERFACE_UNAVAILABLE',
'status-dropped-bad-subnet':'STATUS_DROPPED_BAD_SUBNET',
'status-dropped-dynamic-learning-disabled':'STATUS_DROPPED_DYNAMIC_LEARNING_DISABLED',
'status-dropped-out-of-subnet-disabled':'STATUS_DROPPED_OUT_OF_SUBNET_DISABLED',
'status-removed-client-sweep':'STATUS_REMOVED_CLIENT_SWEEP',
'status-added-client':'STATUS_ADDED_CLIENT',
'status-added-v1':'STATUS_ADDED_V1',
'status-removed-v1':'STATUS_REMOVED_V1',
'status-resolved-peer-sync':'STATUS_RESOLVED_PEER_SYNC',
}, 'Cisco-IOS-XR-ipv4-arp-oper', _yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-oper']),
'IpArpBagEncapEnum' : _MetaInfoEnum('IpArpBagEncapEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper',
{
'none':'NONE',
'arpa':'ARPA',
'snap':'SNAP',
'ieee802-1q':'IEEE802_1Q',
'srp':'SRP',
'srpa':'SRPA',
'srpb':'SRPB',
}, 'Cisco-IOS-XR-ipv4-arp-oper', _yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-oper']),
'ArpGmpBagEncapEnum' : _MetaInfoEnum('ArpGmpBagEncapEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper',
{
'none':'NONE',
'arpa':'ARPA',
'snap':'SNAP',
'ieee802-1q':'IEEE802_1Q',
'srp':'SRP',
'srpa':'SRPA',
'srpb':'SRPB',
}, 'Cisco-IOS-XR-ipv4-arp-oper', _yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-oper']),
'IpArpBagMediaEnum' : _MetaInfoEnum('IpArpBagMediaEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper',
{
'media-arpa':'MEDIA_ARPA',
'media-srp':'MEDIA_SRP',
'media-unknown':'MEDIA_UNKNOWN',
}, 'Cisco-IOS-XR-ipv4-arp-oper', _yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-oper']),
'ArpGmpBagEntryEnum' : _MetaInfoEnum('ArpGmpBagEntryEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper',
{
'null':'NULL',
'static':'STATIC',
'alias':'ALIAS',
}, 'Cisco-IOS-XR-ipv4-arp-oper', _yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-oper']),
'IpArpBagStateEnum' : _MetaInfoEnum('IpArpBagStateEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper',
{
'state-none':'STATE_NONE',
'state-interface':'STATE_INTERFACE',
'state-standby':'STATE_STANDBY',
'state-static':'STATE_STATIC',
'state-alias':'STATE_ALIAS',
'state-mobile':'STATE_MOBILE',
'state-incomplete':'STATE_INCOMPLETE',
'state-deleted':'STATE_DELETED',
'state-dynamic':'STATE_DYNAMIC',
'state-probe':'STATE_PROBE',
'state-purge-delayed':'STATE_PURGE_DELAYED',
'state-dhcp':'STATE_DHCP',
'state-vxlan':'STATE_VXLAN',
'state-sat':'STATE_SAT',
'state-r-sync':'STATE_R_SYNC',
'state-max':'STATE_MAX',
}, 'Cisco-IOS-XR-ipv4-arp-oper', _yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-oper']),
'ArpGmp.VrfInfos.VrfInfo' : {
'meta_info' : _MetaInfoClass('ArpGmp.VrfInfos.VrfInfo',
False,
[
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' VRF name for the default VRF use 'default'
''',
'vrf_name',
'Cisco-IOS-XR-ipv4-arp-oper', True),
_MetaInfoClassMember('rsi-handle', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' RSI registration handle
''',
'rsi_handle',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('rsi-handle-high', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' RSI registration handle (top 32-bits)
''',
'rsi_handle_high',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('table-id', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' IPv4 unicast table ID
''',
'table_id',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('vrf-id-number', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' VRF ID
''',
'vrf_id_number',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('vrf-name-xr', ATTRIBUTE, 'str' , None, None,
[], [],
''' VRF Name
''',
'vrf_name_xr',
'Cisco-IOS-XR-ipv4-arp-oper', False),
],
'Cisco-IOS-XR-ipv4-arp-oper',
'vrf-info',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper'
),
},
'ArpGmp.VrfInfos' : {
'meta_info' : _MetaInfoClass('ArpGmp.VrfInfos',
False,
[
_MetaInfoClassMember('vrf-info', REFERENCE_LIST, 'VrfInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'ArpGmp.VrfInfos.VrfInfo',
[], [],
''' VRF related ARP-GMP operational data
''',
'vrf_info',
'Cisco-IOS-XR-ipv4-arp-oper', False),
],
'Cisco-IOS-XR-ipv4-arp-oper',
'vrf-infos',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper'
),
},
'ArpGmp.Vrfs.Vrf.ConfiguredIpAddresses.ConfiguredIpAddress' : {
'meta_info' : _MetaInfoClass('ArpGmp.Vrfs.Vrf.ConfiguredIpAddresses.ConfiguredIpAddress',
False,
[
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Configured ARP-GMP IP
''',
'address',
'Cisco-IOS-XR-ipv4-arp-oper', True),
_MetaInfoClassMember('encapsulation-type', REFERENCE_ENUM_CLASS, 'ArpGmpBagEncapEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'ArpGmpBagEncapEnum',
[], [],
''' Encap type
''',
'encapsulation_type',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('entry-type', REFERENCE_ENUM_CLASS, 'ArpGmpBagEntryEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'ArpGmpBagEntryEnum',
[], [],
''' Entry type static/alias
''',
'entry_type',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('hardware-address', ATTRIBUTE, 'str' , None, None,
[], ['[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'],
''' Hardware address
''',
'hardware_address',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('ip-address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IP address
''',
'ip_address',
'Cisco-IOS-XR-ipv4-arp-oper', False),
],
'Cisco-IOS-XR-ipv4-arp-oper',
'configured-ip-address',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper'
),
},
'ArpGmp.Vrfs.Vrf.ConfiguredIpAddresses' : {
'meta_info' : _MetaInfoClass('ArpGmp.Vrfs.Vrf.ConfiguredIpAddresses',
False,
[
_MetaInfoClassMember('configured-ip-address', REFERENCE_LIST, 'ConfiguredIpAddress' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'ArpGmp.Vrfs.Vrf.ConfiguredIpAddresses.ConfiguredIpAddress',
[], [],
''' ARP-GMP configured IP address information
''',
'configured_ip_address',
'Cisco-IOS-XR-ipv4-arp-oper', False),
],
'Cisco-IOS-XR-ipv4-arp-oper',
'configured-ip-addresses',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper'
),
},
'ArpGmp.Vrfs.Vrf.Routes.Route' : {
'meta_info' : _MetaInfoClass('ArpGmp.Vrfs.Vrf.Routes.Route',
False,
[
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IP address
''',
'address',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('interface-name', REFERENCE_LEAFLIST, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3}\\d+)|(([a-zA-Z0-9_]*\\d+/){4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface names
''',
'interface_name',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('interface-name-xr', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3}\\d+)|(([a-zA-Z0-9_]*\\d+/){4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface name (first element of InterfaceNames
array)
''',
'interface_name_xr',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('ip-address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IP address
''',
'ip_address',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('prefix-length', ATTRIBUTE, 'int' , None, None,
[(0, 32)], [],
''' Prefix length
''',
'prefix_length',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('prefix-length-xr', ATTRIBUTE, 'int' , None, None,
[(0, 255)], [],
''' IP address length
''',
'prefix_length_xr',
'Cisco-IOS-XR-ipv4-arp-oper', False),
],
'Cisco-IOS-XR-ipv4-arp-oper',
'route',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper'
),
},
'ArpGmp.Vrfs.Vrf.Routes' : {
'meta_info' : _MetaInfoClass('ArpGmp.Vrfs.Vrf.Routes',
False,
[
_MetaInfoClassMember('route', REFERENCE_LIST, 'Route' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'ArpGmp.Vrfs.Vrf.Routes.Route',
[], [],
''' ARP GMP route information
''',
'route',
'Cisco-IOS-XR-ipv4-arp-oper', False),
],
'Cisco-IOS-XR-ipv4-arp-oper',
'routes',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper'
),
},
'ArpGmp.Vrfs.Vrf.InterfaceConfiguredIps.InterfaceConfiguredIp.AssociatedConfigurationEntry' : {
'meta_info' : _MetaInfoClass('ArpGmp.Vrfs.Vrf.InterfaceConfiguredIps.InterfaceConfiguredIp.AssociatedConfigurationEntry',
False,
[
_MetaInfoClassMember('encapsulation-type', REFERENCE_ENUM_CLASS, 'ArpGmpBagEncapEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'ArpGmpBagEncapEnum',
[], [],
''' Encap type
''',
'encapsulation_type',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('entry-type', REFERENCE_ENUM_CLASS, 'ArpGmpBagEntryEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'ArpGmpBagEntryEnum',
[], [],
''' Entry type static/alias
''',
'entry_type',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('hardware-address', ATTRIBUTE, 'str' , None, None,
[], ['[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'],
''' Hardware address
''',
'hardware_address',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('ip-address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IP address
''',
'ip_address',
'Cisco-IOS-XR-ipv4-arp-oper', False),
],
'Cisco-IOS-XR-ipv4-arp-oper',
'associated-configuration-entry',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper'
),
},
'ArpGmp.Vrfs.Vrf.InterfaceConfiguredIps.InterfaceConfiguredIp' : {
'meta_info' : _MetaInfoClass('ArpGmp.Vrfs.Vrf.InterfaceConfiguredIps.InterfaceConfiguredIp',
False,
[
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Configured ARP-GMP IP
''',
'address',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('associated-configuration-entry', REFERENCE_CLASS, 'AssociatedConfigurationEntry' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'ArpGmp.Vrfs.Vrf.InterfaceConfiguredIps.InterfaceConfiguredIp.AssociatedConfigurationEntry',
[], [],
''' Associated configuration entry
''',
'associated_configuration_entry',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3}\\d+)|(([a-zA-Z0-9_]*\\d+/){4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface name
''',
'interface_name',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('interface-name-xr', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3}\\d+)|(([a-zA-Z0-9_]*\\d+/){4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface name
''',
'interface_name_xr',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('reference-count', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Route reference count
''',
'reference_count',
'Cisco-IOS-XR-ipv4-arp-oper', False),
],
'Cisco-IOS-XR-ipv4-arp-oper',
'interface-configured-ip',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper'
),
},
'ArpGmp.Vrfs.Vrf.InterfaceConfiguredIps' : {
'meta_info' : _MetaInfoClass('ArpGmp.Vrfs.Vrf.InterfaceConfiguredIps',
False,
[
_MetaInfoClassMember('interface-configured-ip', REFERENCE_LIST, 'InterfaceConfiguredIp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'ArpGmp.Vrfs.Vrf.InterfaceConfiguredIps.InterfaceConfiguredIp',
[], [],
''' ARP GMP interface and associated configured
IP data
''',
'interface_configured_ip',
'Cisco-IOS-XR-ipv4-arp-oper', False),
],
'Cisco-IOS-XR-ipv4-arp-oper',
'interface-configured-ips',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper'
),
},
'ArpGmp.Vrfs.Vrf' : {
'meta_info' : _MetaInfoClass('ArpGmp.Vrfs.Vrf',
False,
[
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' VRF name for the default VRF use 'default'
''',
'vrf_name',
'Cisco-IOS-XR-ipv4-arp-oper', True),
_MetaInfoClassMember('configured-ip-addresses', REFERENCE_CLASS, 'ConfiguredIpAddresses' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'ArpGmp.Vrfs.Vrf.ConfiguredIpAddresses',
[], [],
''' Table of ARP-GMP configured IP addresses
information
''',
'configured_ip_addresses',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('interface-configured-ips', REFERENCE_CLASS, 'InterfaceConfiguredIps' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'ArpGmp.Vrfs.Vrf.InterfaceConfiguredIps',
[], [],
''' Table of ARP GMP interface and associated
configured IP data
''',
'interface_configured_ips',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('routes', REFERENCE_CLASS, 'Routes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'ArpGmp.Vrfs.Vrf.Routes',
[], [],
''' Table of ARP GMP route information
''',
'routes',
'Cisco-IOS-XR-ipv4-arp-oper', False),
],
'Cisco-IOS-XR-ipv4-arp-oper',
'vrf',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper'
),
},
'ArpGmp.Vrfs' : {
'meta_info' : _MetaInfoClass('ArpGmp.Vrfs',
False,
[
_MetaInfoClassMember('vrf', REFERENCE_LIST, 'Vrf' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'ArpGmp.Vrfs.Vrf',
[], [],
''' Per VRF ARP-GMP operational data
''',
'vrf',
'Cisco-IOS-XR-ipv4-arp-oper', False),
],
'Cisco-IOS-XR-ipv4-arp-oper',
'vrfs',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper'
),
},
'ArpGmp' : {
'meta_info' : _MetaInfoClass('ArpGmp',
False,
[
_MetaInfoClassMember('vrf-infos', REFERENCE_CLASS, 'VrfInfos' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'ArpGmp.VrfInfos',
[], [],
''' Table of VRF related ARP-GMP operational data
''',
'vrf_infos',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('vrfs', REFERENCE_CLASS, 'Vrfs' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'ArpGmp.Vrfs',
[], [],
''' Table of per VRF ARP-GMP operational data
''',
'vrfs',
'Cisco-IOS-XR-ipv4-arp-oper', False),
],
'Cisco-IOS-XR-ipv4-arp-oper',
'arp-gmp',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper'
),
},
'Arp.Nodes.Node.ResolutionHistoryDynamic.ArpEntry' : {
'meta_info' : _MetaInfoClass('Arp.Nodes.Node.ResolutionHistoryDynamic.ArpEntry',
False,
[
_MetaInfoClassMember('client-id', ATTRIBUTE, 'int' , None, None,
[(-2147483648, 2147483647)], [],
''' Resolving Client ID
''',
'client_id',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('entry-state', ATTRIBUTE, 'int' , None, None,
[(-2147483648, 2147483647)], [],
''' ARP entry state
''',
'entry_state',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('idb-interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3}\\d+)|(([a-zA-Z0-9_]*\\d+/){4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface
''',
'idb_interface_name',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('ipv4-address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv4 address
''',
'ipv4_address',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('mac-address', ATTRIBUTE, 'str' , None, None,
[], ['[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'],
''' MAC address
''',
'mac_address',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('nsec-timestamp', ATTRIBUTE, 'long' , None, None,
[(0, 18446744073709551615L)], [],
''' Timestamp for entry in nanoseconds since Epoch,
i.e. since 00:00:00 UTC, January 1, 1970
''',
'nsec_timestamp',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('resolution-request-count', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Resolution Request count
''',
'resolution_request_count',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ArpResolutionHistoryStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'ArpResolutionHistoryStatusEnum',
[], [],
''' Resolution status
''',
'status',
'Cisco-IOS-XR-ipv4-arp-oper', False),
],
'Cisco-IOS-XR-ipv4-arp-oper',
'arp-entry',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper'
),
},
'Arp.Nodes.Node.ResolutionHistoryDynamic' : {
'meta_info' : _MetaInfoClass('Arp.Nodes.Node.ResolutionHistoryDynamic',
False,
[
_MetaInfoClassMember('arp-entry', REFERENCE_LIST, 'ArpEntry' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'Arp.Nodes.Node.ResolutionHistoryDynamic.ArpEntry',
[], [],
''' Resolution history array
''',
'arp_entry',
'Cisco-IOS-XR-ipv4-arp-oper', False),
],
'Cisco-IOS-XR-ipv4-arp-oper',
'resolution-history-dynamic',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper'
),
},
'Arp.Nodes.Node.TrafficVrfs.TrafficVrf' : {
'meta_info' : _MetaInfoClass('Arp.Nodes.Node.TrafficVrfs.TrafficVrf',
False,
[
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' VRF name
''',
'vrf_name',
'Cisco-IOS-XR-ipv4-arp-oper', True),
_MetaInfoClassMember('alias-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total alias entries in the cache
''',
'alias_entries',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('arp-packet-interface-out-of-subnet', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total arp packets on interface due to out of
subnet
''',
'arp_packet_interface_out_of_subnet',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('arp-packet-node-out-of-subnet', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total ARP packets on node due to out of subnet
''',
'arp_packet_node_out_of_subnet',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('dhcp-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total DHCP entries in the cache
''',
'dhcp_entries',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('dynamic-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total dynamic entries in the cache
''',
'dynamic_entries',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('gratuitous-replies-sent', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total Gratuituous ARP replies sent
''',
'gratuitous_replies_sent',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('idb-structures', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total idb structures on this node
''',
'idb_structures',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('interface-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total interface entries in the cache
''',
'interface_entries',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('ip-packets-dropped-interface', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total ip packets droped on this interface
''',
'ip_packets_dropped_interface',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('ip-packets-dropped-node', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total ip packets droped on this node
''',
'ip_packets_dropped_node',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('local-proxy-replies-sent', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total Local Proxy ARP replies sent
''',
'local_proxy_replies_sent',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('no-buffer-errors', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total errors for no buffer
''',
'no_buffer_errors',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('out-of-memory-errors', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total errors for out of memory
''',
'out_of_memory_errors',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('proxy-replies-sent', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total Proxy ARP replies sent
''',
'proxy_replies_sent',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('replies-received', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total ARP replies received
''',
'replies_received',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('replies-sent', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total ARP replies sent
''',
'replies_sent',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('requests-received', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total ARP requests received
''',
'requests_received',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('requests-sent', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total ARP requests sent
''',
'requests_sent',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('resolution-replies-received', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total ARP resolution replies received
''',
'resolution_replies_received',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('resolution-requests-dropped', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' total ARP resolution requests dropped
''',
'resolution_requests_dropped',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('resolution-requests-received', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total ARP resolution requests received
''',
'resolution_requests_received',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('standby-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total standby entries in the cache
''',
'standby_entries',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('static-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total static entries in the cache
''',
'static_entries',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('total-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total ARP entries in the cache
''',
'total_entries',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('vxlan-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total VXLAN entries in the cache
''',
'vxlan_entries',
'Cisco-IOS-XR-ipv4-arp-oper', False),
],
'Cisco-IOS-XR-ipv4-arp-oper',
'traffic-vrf',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper'
),
},
'Arp.Nodes.Node.TrafficVrfs' : {
'meta_info' : _MetaInfoClass('Arp.Nodes.Node.TrafficVrfs',
False,
[
_MetaInfoClassMember('traffic-vrf', REFERENCE_LIST, 'TrafficVrf' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'Arp.Nodes.Node.TrafficVrfs.TrafficVrf',
[], [],
''' Per VRF traffic data
''',
'traffic_vrf',
'Cisco-IOS-XR-ipv4-arp-oper', False),
],
'Cisco-IOS-XR-ipv4-arp-oper',
'traffic-vrfs',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper'
),
},
'Arp.Nodes.Node.TrafficNode' : {
'meta_info' : _MetaInfoClass('Arp.Nodes.Node.TrafficNode',
False,
[
_MetaInfoClassMember('alias-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total alias entries in the cache
''',
'alias_entries',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('arp-packet-interface-out-of-subnet', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total arp packets on interface due to out of
subnet
''',
'arp_packet_interface_out_of_subnet',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('arp-packet-node-out-of-subnet', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total ARP packets on node due to out of subnet
''',
'arp_packet_node_out_of_subnet',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('dhcp-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total DHCP entries in the cache
''',
'dhcp_entries',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('dynamic-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total dynamic entries in the cache
''',
'dynamic_entries',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('gratuitous-replies-sent', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total Gratuituous ARP replies sent
''',
'gratuitous_replies_sent',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('idb-structures', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total idb structures on this node
''',
'idb_structures',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('interface-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total interface entries in the cache
''',
'interface_entries',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('ip-packets-dropped-interface', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total ip packets droped on this interface
''',
'ip_packets_dropped_interface',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('ip-packets-dropped-node', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total ip packets droped on this node
''',
'ip_packets_dropped_node',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('local-proxy-replies-sent', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total Local Proxy ARP replies sent
''',
'local_proxy_replies_sent',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('no-buffer-errors', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total errors for no buffer
''',
'no_buffer_errors',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('out-of-memory-errors', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total errors for out of memory
''',
'out_of_memory_errors',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('proxy-replies-sent', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total Proxy ARP replies sent
''',
'proxy_replies_sent',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('replies-received', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total ARP replies received
''',
'replies_received',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('replies-sent', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total ARP replies sent
''',
'replies_sent',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('requests-received', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total ARP requests received
''',
'requests_received',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('requests-sent', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total ARP requests sent
''',
'requests_sent',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('resolution-replies-received', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total ARP resolution replies received
''',
'resolution_replies_received',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('resolution-requests-dropped', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' total ARP resolution requests dropped
''',
'resolution_requests_dropped',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('resolution-requests-received', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total ARP resolution requests received
''',
'resolution_requests_received',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('standby-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total standby entries in the cache
''',
'standby_entries',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('static-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total static entries in the cache
''',
'static_entries',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('total-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total ARP entries in the cache
''',
'total_entries',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('vxlan-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total VXLAN entries in the cache
''',
'vxlan_entries',
'Cisco-IOS-XR-ipv4-arp-oper', False),
],
'Cisco-IOS-XR-ipv4-arp-oper',
'traffic-node',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper'
),
},
'Arp.Nodes.Node.ResolutionHistoryClient.ArpEntry' : {
'meta_info' : _MetaInfoClass('Arp.Nodes.Node.ResolutionHistoryClient.ArpEntry',
False,
[
_MetaInfoClassMember('client-id', ATTRIBUTE, 'int' , None, None,
[(-2147483648, 2147483647)], [],
''' Resolving Client ID
''',
'client_id',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('entry-state', ATTRIBUTE, 'int' , None, None,
[(-2147483648, 2147483647)], [],
''' ARP entry state
''',
'entry_state',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('idb-interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3}\\d+)|(([a-zA-Z0-9_]*\\d+/){4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface
''',
'idb_interface_name',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('ipv4-address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv4 address
''',
'ipv4_address',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('mac-address', ATTRIBUTE, 'str' , None, None,
[], ['[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'],
''' MAC address
''',
'mac_address',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('nsec-timestamp', ATTRIBUTE, 'long' , None, None,
[(0, 18446744073709551615L)], [],
''' Timestamp for entry in nanoseconds since Epoch,
i.e. since 00:00:00 UTC, January 1, 1970
''',
'nsec_timestamp',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('resolution-request-count', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Resolution Request count
''',
'resolution_request_count',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ArpResolutionHistoryStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'ArpResolutionHistoryStatusEnum',
[], [],
''' Resolution status
''',
'status',
'Cisco-IOS-XR-ipv4-arp-oper', False),
],
'Cisco-IOS-XR-ipv4-arp-oper',
'arp-entry',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper'
),
},
'Arp.Nodes.Node.ResolutionHistoryClient' : {
'meta_info' : _MetaInfoClass('Arp.Nodes.Node.ResolutionHistoryClient',
False,
[
_MetaInfoClassMember('arp-entry', REFERENCE_LIST, 'ArpEntry' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'Arp.Nodes.Node.ResolutionHistoryClient.ArpEntry',
[], [],
''' Resolution history array
''',
'arp_entry',
'Cisco-IOS-XR-ipv4-arp-oper', False),
],
'Cisco-IOS-XR-ipv4-arp-oper',
'resolution-history-client',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper'
),
},
'Arp.Nodes.Node.Entries.Entry' : {
'meta_info' : _MetaInfoClass('Arp.Nodes.Node.Entries.Entry',
False,
[
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IP Address of ARP entry
''',
'address',
'Cisco-IOS-XR-ipv4-arp-oper', True),
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3}\\d+)|(([a-zA-Z0-9_]*\\d+/){4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface name
''',
'interface_name',
'Cisco-IOS-XR-ipv4-arp-oper', True),
_MetaInfoClassMember('age', ATTRIBUTE, 'long' , None, None,
[(0, 18446744073709551615L)], [],
''' Age of this entry
''',
'age',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('encapsulation-type', REFERENCE_ENUM_CLASS, 'IpArpBagEncapEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'IpArpBagEncapEnum',
[], [],
''' Source encapsulation type
''',
'encapsulation_type',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('hardware-address', ATTRIBUTE, 'str' , None, None,
[], ['[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'],
''' Hardware address
''',
'hardware_address',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('hardware-length', ATTRIBUTE, 'int' , None, None,
[(0, 255)], [],
''' Source hardware length
''',
'hardware_length',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('media-type', REFERENCE_ENUM_CLASS, 'IpArpBagMediaEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'IpArpBagMediaEnum',
[], [],
''' Media type for this entry
''',
'media_type',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('state', REFERENCE_ENUM_CLASS, 'IpArpBagStateEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'IpArpBagStateEnum',
[], [],
''' State of this entry
''',
'state',
'Cisco-IOS-XR-ipv4-arp-oper', False),
],
'Cisco-IOS-XR-ipv4-arp-oper',
'entry',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper'
),
},
'Arp.Nodes.Node.Entries' : {
'meta_info' : _MetaInfoClass('Arp.Nodes.Node.Entries',
False,
[
_MetaInfoClassMember('entry', REFERENCE_LIST, 'Entry' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'Arp.Nodes.Node.Entries.Entry',
[], [],
''' ARP entry
''',
'entry',
'Cisco-IOS-XR-ipv4-arp-oper', False),
],
'Cisco-IOS-XR-ipv4-arp-oper',
'entries',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper'
),
},
'Arp.Nodes.Node.TrafficInterfaces.TrafficInterface' : {
'meta_info' : _MetaInfoClass('Arp.Nodes.Node.TrafficInterfaces.TrafficInterface',
False,
[
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3}\\d+)|(([a-zA-Z0-9_]*\\d+/){4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface name
''',
'interface_name',
'Cisco-IOS-XR-ipv4-arp-oper', True),
_MetaInfoClassMember('alias-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total alias entries in the cache
''',
'alias_entries',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('arp-packet-interface-out-of-subnet', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total arp packets on interface due to out of
subnet
''',
'arp_packet_interface_out_of_subnet',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('arp-packet-node-out-of-subnet', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total ARP packets on node due to out of subnet
''',
'arp_packet_node_out_of_subnet',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('dhcp-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total DHCP entries in the cache
''',
'dhcp_entries',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('dynamic-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total dynamic entries in the cache
''',
'dynamic_entries',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('gratuitous-replies-sent', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total Gratuituous ARP replies sent
''',
'gratuitous_replies_sent',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('idb-structures', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total idb structures on this node
''',
'idb_structures',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('interface-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total interface entries in the cache
''',
'interface_entries',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('ip-packets-dropped-interface', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total ip packets droped on this interface
''',
'ip_packets_dropped_interface',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('ip-packets-dropped-node', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total ip packets droped on this node
''',
'ip_packets_dropped_node',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('local-proxy-replies-sent', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total Local Proxy ARP replies sent
''',
'local_proxy_replies_sent',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('no-buffer-errors', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total errors for no buffer
''',
'no_buffer_errors',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('out-of-memory-errors', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total errors for out of memory
''',
'out_of_memory_errors',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('proxy-replies-sent', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total Proxy ARP replies sent
''',
'proxy_replies_sent',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('replies-received', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total ARP replies received
''',
'replies_received',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('replies-sent', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total ARP replies sent
''',
'replies_sent',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('requests-received', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total ARP requests received
''',
'requests_received',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('requests-sent', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total ARP requests sent
''',
'requests_sent',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('resolution-replies-received', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total ARP resolution replies received
''',
'resolution_replies_received',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('resolution-requests-dropped', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' total ARP resolution requests dropped
''',
'resolution_requests_dropped',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('resolution-requests-received', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total ARP resolution requests received
''',
'resolution_requests_received',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('standby-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total standby entries in the cache
''',
'standby_entries',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('static-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total static entries in the cache
''',
'static_entries',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('total-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total ARP entries in the cache
''',
'total_entries',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('vxlan-entries', ATTRIBUTE, 'int' , None, None,
[(0, 4294967295)], [],
''' Total VXLAN entries in the cache
''',
'vxlan_entries',
'Cisco-IOS-XR-ipv4-arp-oper', False),
],
'Cisco-IOS-XR-ipv4-arp-oper',
'traffic-interface',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper'
),
},
'Arp.Nodes.Node.TrafficInterfaces' : {
'meta_info' : _MetaInfoClass('Arp.Nodes.Node.TrafficInterfaces',
False,
[
_MetaInfoClassMember('traffic-interface', REFERENCE_LIST, 'TrafficInterface' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'Arp.Nodes.Node.TrafficInterfaces.TrafficInterface',
[], [],
''' Per interface traffic data
''',
'traffic_interface',
'Cisco-IOS-XR-ipv4-arp-oper', False),
],
'Cisco-IOS-XR-ipv4-arp-oper',
'traffic-interfaces',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper'
),
},
'Arp.Nodes.Node' : {
'meta_info' : _MetaInfoClass('Arp.Nodes.Node',
False,
[
_MetaInfoClassMember('node-name', ATTRIBUTE, 'str' , None, None,
[], ['([a-zA-Z0-9_]*\\d+/){1,2}([a-zA-Z0-9_]*\\d+)'],
''' Node name
''',
'node_name',
'Cisco-IOS-XR-ipv4-arp-oper', True),
_MetaInfoClassMember('entries', REFERENCE_CLASS, 'Entries' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'Arp.Nodes.Node.Entries',
[], [],
''' Table of ARP entries
''',
'entries',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('resolution-history-client', REFERENCE_CLASS, 'ResolutionHistoryClient' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'Arp.Nodes.Node.ResolutionHistoryClient',
[], [],
''' Per node client-installed ARP resolution
history data
''',
'resolution_history_client',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('resolution-history-dynamic', REFERENCE_CLASS, 'ResolutionHistoryDynamic' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'Arp.Nodes.Node.ResolutionHistoryDynamic',
[], [],
''' Per node dynamically-resolved ARP resolution
history data
''',
'resolution_history_dynamic',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('traffic-interfaces', REFERENCE_CLASS, 'TrafficInterfaces' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'Arp.Nodes.Node.TrafficInterfaces',
[], [],
''' ARP Traffic information per interface
''',
'traffic_interfaces',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('traffic-node', REFERENCE_CLASS, 'TrafficNode' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'Arp.Nodes.Node.TrafficNode',
[], [],
''' Per node ARP Traffic data
''',
'traffic_node',
'Cisco-IOS-XR-ipv4-arp-oper', False),
_MetaInfoClassMember('traffic-vrfs', REFERENCE_CLASS, 'TrafficVrfs' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'Arp.Nodes.Node.TrafficVrfs',
[], [],
''' ARP Traffic information per VRF
''',
'traffic_vrfs',
'Cisco-IOS-XR-ipv4-arp-oper', False),
],
'Cisco-IOS-XR-ipv4-arp-oper',
'node',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper'
),
},
'Arp.Nodes' : {
'meta_info' : _MetaInfoClass('Arp.Nodes',
False,
[
_MetaInfoClassMember('node', REFERENCE_LIST, 'Node' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'Arp.Nodes.Node',
[], [],
''' Per-node ARP operational data
''',
'node',
'Cisco-IOS-XR-ipv4-arp-oper', False),
],
'Cisco-IOS-XR-ipv4-arp-oper',
'nodes',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper'
),
},
'Arp' : {
'meta_info' : _MetaInfoClass('Arp',
False,
[
_MetaInfoClassMember('nodes', REFERENCE_CLASS, 'Nodes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper', 'Arp.Nodes',
[], [],
''' Table of per-node ARP operational data
''',
'nodes',
'Cisco-IOS-XR-ipv4-arp-oper', False),
],
'Cisco-IOS-XR-ipv4-arp-oper',
'arp',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-arp-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_oper'
),
},
}
_meta_table['ArpGmp.VrfInfos.VrfInfo']['meta_info'].parent =_meta_table['ArpGmp.VrfInfos']['meta_info']
_meta_table['ArpGmp.Vrfs.Vrf.ConfiguredIpAddresses.ConfiguredIpAddress']['meta_info'].parent =_meta_table['ArpGmp.Vrfs.Vrf.ConfiguredIpAddresses']['meta_info']
_meta_table['ArpGmp.Vrfs.Vrf.Routes.Route']['meta_info'].parent =_meta_table['ArpGmp.Vrfs.Vrf.Routes']['meta_info']
_meta_table['ArpGmp.Vrfs.Vrf.InterfaceConfiguredIps.InterfaceConfiguredIp.AssociatedConfigurationEntry']['meta_info'].parent =_meta_table['ArpGmp.Vrfs.Vrf.InterfaceConfiguredIps.InterfaceConfiguredIp']['meta_info']
_meta_table['ArpGmp.Vrfs.Vrf.InterfaceConfiguredIps.InterfaceConfiguredIp']['meta_info'].parent =_meta_table['ArpGmp.Vrfs.Vrf.InterfaceConfiguredIps']['meta_info']
_meta_table['ArpGmp.Vrfs.Vrf.ConfiguredIpAddresses']['meta_info'].parent =_meta_table['ArpGmp.Vrfs.Vrf']['meta_info']
_meta_table['ArpGmp.Vrfs.Vrf.Routes']['meta_info'].parent =_meta_table['ArpGmp.Vrfs.Vrf']['meta_info']
_meta_table['ArpGmp.Vrfs.Vrf.InterfaceConfiguredIps']['meta_info'].parent =_meta_table['ArpGmp.Vrfs.Vrf']['meta_info']
_meta_table['ArpGmp.Vrfs.Vrf']['meta_info'].parent =_meta_table['ArpGmp.Vrfs']['meta_info']
_meta_table['ArpGmp.VrfInfos']['meta_info'].parent =_meta_table['ArpGmp']['meta_info']
_meta_table['ArpGmp.Vrfs']['meta_info'].parent =_meta_table['ArpGmp']['meta_info']
_meta_table['Arp.Nodes.Node.ResolutionHistoryDynamic.ArpEntry']['meta_info'].parent =_meta_table['Arp.Nodes.Node.ResolutionHistoryDynamic']['meta_info']
_meta_table['Arp.Nodes.Node.TrafficVrfs.TrafficVrf']['meta_info'].parent =_meta_table['Arp.Nodes.Node.TrafficVrfs']['meta_info']
_meta_table['Arp.Nodes.Node.ResolutionHistoryClient.ArpEntry']['meta_info'].parent =_meta_table['Arp.Nodes.Node.ResolutionHistoryClient']['meta_info']
_meta_table['Arp.Nodes.Node.Entries.Entry']['meta_info'].parent =_meta_table['Arp.Nodes.Node.Entries']['meta_info']
_meta_table['Arp.Nodes.Node.TrafficInterfaces.TrafficInterface']['meta_info'].parent =_meta_table['Arp.Nodes.Node.TrafficInterfaces']['meta_info']
_meta_table['Arp.Nodes.Node.ResolutionHistoryDynamic']['meta_info'].parent =_meta_table['Arp.Nodes.Node']['meta_info']
_meta_table['Arp.Nodes.Node.TrafficVrfs']['meta_info'].parent =_meta_table['Arp.Nodes.Node']['meta_info']
_meta_table['Arp.Nodes.Node.TrafficNode']['meta_info'].parent =_meta_table['Arp.Nodes.Node']['meta_info']
_meta_table['Arp.Nodes.Node.ResolutionHistoryClient']['meta_info'].parent =_meta_table['Arp.Nodes.Node']['meta_info']
_meta_table['Arp.Nodes.Node.Entries']['meta_info'].parent =_meta_table['Arp.Nodes.Node']['meta_info']
_meta_table['Arp.Nodes.Node.TrafficInterfaces']['meta_info'].parent =_meta_table['Arp.Nodes.Node']['meta_info']
_meta_table['Arp.Nodes.Node']['meta_info'].parent =_meta_table['Arp.Nodes']['meta_info']
_meta_table['Arp.Nodes']['meta_info'].parent =_meta_table['Arp']['meta_info']
| |
import os.path as osp
import chainer
import chainer.functions as F
import chainer.links as L
import numpy as np
from .. import data
from .. import initializers
class FCN8s(chainer.Chain):
pretrained_model = osp.expanduser(
'~/data/models/chainer/fcn8s_from_caffe.npz')
def __init__(self, n_class=21):
self.n_class = n_class
kwargs = {
'initialW': chainer.initializers.Zero(),
'initial_bias': chainer.initializers.Zero(),
}
super(FCN8s, self).__init__()
with self.init_scope():
self.conv1_1 = L.Convolution2D(3, 64, 3, 1, 100, **kwargs)
self.conv1_2 = L.Convolution2D(64, 64, 3, 1, 1, **kwargs)
self.conv2_1 = L.Convolution2D(64, 128, 3, 1, 1, **kwargs)
self.conv2_2 = L.Convolution2D(128, 128, 3, 1, 1, **kwargs)
self.conv3_1 = L.Convolution2D(128, 256, 3, 1, 1, **kwargs)
self.conv3_2 = L.Convolution2D(256, 256, 3, 1, 1, **kwargs)
self.conv3_3 = L.Convolution2D(256, 256, 3, 1, 1, **kwargs)
self.conv4_1 = L.Convolution2D(256, 512, 3, 1, 1, **kwargs)
self.conv4_2 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.conv4_3 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.conv5_1 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.conv5_2 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.conv5_3 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.fc6 = L.Convolution2D(512, 4096, 7, 1, 0, **kwargs)
self.fc7 = L.Convolution2D(4096, 4096, 1, 1, 0, **kwargs)
self.score_fr = L.Convolution2D(4096, n_class, 1, 1, 0, **kwargs)
self.upscore2 = L.Deconvolution2D(
n_class, n_class, 4, 2, 0, nobias=True,
initialW=initializers.UpsamplingDeconvWeight())
self.upscore8 = L.Deconvolution2D(
n_class, n_class, 16, 8, 0, nobias=True,
initialW=initializers.UpsamplingDeconvWeight())
self.score_pool3 = L.Convolution2D(256, n_class, 1, 1, 0, **kwargs)
self.score_pool4 = L.Convolution2D(512, n_class, 1, 1, 0, **kwargs)
self.upscore_pool4 = L.Deconvolution2D(
n_class, n_class, 4, 2, 0, nobias=True,
initialW=initializers.UpsamplingDeconvWeight())
def __call__(self, x, t=None):
# conv1
h = F.relu(self.conv1_1(x))
conv1_1 = h
h = F.relu(self.conv1_2(conv1_1))
conv1_2 = h
h = F.max_pooling_2d(conv1_2, 2, stride=2, pad=0)
pool1 = h # 1/2
# conv2
h = F.relu(self.conv2_1(pool1))
conv2_1 = h
h = F.relu(self.conv2_2(conv2_1))
conv2_2 = h
h = F.max_pooling_2d(conv2_2, 2, stride=2, pad=0)
pool2 = h # 1/4
# conv3
h = F.relu(self.conv3_1(pool2))
conv3_1 = h
h = F.relu(self.conv3_2(conv3_1))
conv3_2 = h
h = F.relu(self.conv3_3(conv3_2))
conv3_3 = h
h = F.max_pooling_2d(conv3_3, 2, stride=2, pad=0)
pool3 = h # 1/8
# conv4
h = F.relu(self.conv4_1(pool3))
h = F.relu(self.conv4_2(h))
h = F.relu(self.conv4_3(h))
h = F.max_pooling_2d(h, 2, stride=2, pad=0)
pool4 = h # 1/16
# conv5
h = F.relu(self.conv5_1(pool4))
h = F.relu(self.conv5_2(h))
h = F.relu(self.conv5_3(h))
h = F.max_pooling_2d(h, 2, stride=2, pad=0)
pool5 = h # 1/32
# fc6
h = F.relu(self.fc6(pool5))
h = F.dropout(h, ratio=.5)
fc6 = h # 1/32
# fc7
h = F.relu(self.fc7(fc6))
h = F.dropout(h, ratio=.5)
fc7 = h # 1/32
# score_fr
h = self.score_fr(fc7)
score_fr = h # 1/32
# score_pool3
h = self.score_pool3(pool3)
score_pool3 = h # 1/8
# score_pool4
h = self.score_pool4(pool4)
score_pool4 = h # 1/16
# upscore2
h = self.upscore2(score_fr)
upscore2 = h # 1/16
# score_pool4c
h = score_pool4[:, :,
5:5 + upscore2.shape[2],
5:5 + upscore2.shape[3]]
score_pool4c = h # 1/16
# fuse_pool4
h = upscore2 + score_pool4c
fuse_pool4 = h # 1/16
# upscore_pool4
h = self.upscore_pool4(fuse_pool4)
upscore_pool4 = h # 1/8
# score_pool4c
h = score_pool3[:, :,
9:9 + upscore_pool4.shape[2],
9:9 + upscore_pool4.shape[3]]
score_pool3c = h # 1/8
# fuse_pool3
h = upscore_pool4 + score_pool3c
fuse_pool3 = h # 1/8
# upscore8
h = self.upscore8(fuse_pool3)
upscore8 = h # 1/1
# score
h = upscore8[:, :, 31:31 + x.shape[2], 31:31 + x.shape[3]]
score = h # 1/1
self.score = score
if t is None:
assert not chainer.config.train
return
loss = F.softmax_cross_entropy(score, t, normalize=False)
if np.isnan(float(loss.data)):
raise ValueError('Loss is nan.')
chainer.report({'loss': loss}, self)
return loss
def init_from_fcn16s(self, fcn16s):
for l1 in fcn16s.children():
try:
l2 = getattr(self, l1.name)
except Exception:
continue
assert l1.W.shape == l2.W.shape
l2.W.data[...] = l1.W.data[...]
if l2.b is not None:
assert l1.b.shape == l2.b.shape
l2.b.data[...] = l1.b.data[...]
@classmethod
def download(cls):
return data.cached_download(
url='https://drive.google.com/uc?id=0B9P1L--7Wd2vb0cxV0VhcG1Lb28',
path=cls.pretrained_model,
md5='256c2a8235c1c65e62e48d3284fbd384',
)
def predict(self, imgs):
lbls = []
for img in imgs:
with chainer.no_backprop_mode(), \
chainer.using_config('train', False):
x = self.xp.asarray(img[None])
self.__call__(x)
lbl = chainer.functions.argmax(self.score, axis=1)
lbl = chainer.cuda.to_cpu(lbl.array[0])
lbls.append(lbl)
return lbls
class FCN8sAtOnce(FCN8s):
pretrained_model = osp.expanduser(
'~/data/models/chainer/fcn8s-atonce_from_caffe.npz')
def __call__(self, x, t=None):
# conv1
h = F.relu(self.conv1_1(x))
conv1_1 = h
h = F.relu(self.conv1_2(conv1_1))
conv1_2 = h
h = F.max_pooling_2d(conv1_2, 2, stride=2, pad=0)
pool1 = h # 1/2
# conv2
h = F.relu(self.conv2_1(pool1))
conv2_1 = h
h = F.relu(self.conv2_2(conv2_1))
conv2_2 = h
h = F.max_pooling_2d(conv2_2, 2, stride=2, pad=0)
pool2 = h # 1/4
# conv3
h = F.relu(self.conv3_1(pool2))
conv3_1 = h
h = F.relu(self.conv3_2(conv3_1))
conv3_2 = h
h = F.relu(self.conv3_3(conv3_2))
conv3_3 = h
h = F.max_pooling_2d(conv3_3, 2, stride=2, pad=0)
pool3 = h # 1/8
# conv4
h = F.relu(self.conv4_1(pool3))
h = F.relu(self.conv4_2(h))
h = F.relu(self.conv4_3(h))
h = F.max_pooling_2d(h, 2, stride=2, pad=0)
pool4 = h # 1/16
# conv5
h = F.relu(self.conv5_1(pool4))
h = F.relu(self.conv5_2(h))
h = F.relu(self.conv5_3(h))
h = F.max_pooling_2d(h, 2, stride=2, pad=0)
pool5 = h # 1/32
# fc6
h = F.relu(self.fc6(pool5))
h = F.dropout(h, ratio=.5)
fc6 = h # 1/32
# fc7
h = F.relu(self.fc7(fc6))
h = F.dropout(h, ratio=.5)
fc7 = h # 1/32
# score_fr
h = self.score_fr(fc7)
score_fr = h # 1/32
# score_pool3
scale_pool3 = 0.0001 * pool3 # XXX: scale to train at once
h = self.score_pool3(scale_pool3)
score_pool3 = h # 1/8
# score_pool4
scale_pool4 = 0.01 * pool4 # XXX: scale to train at once
h = self.score_pool4(scale_pool4)
score_pool4 = h # 1/16
# upscore2
h = self.upscore2(score_fr)
upscore2 = h # 1/16
# score_pool4c
h = score_pool4[:, :,
5:5 + upscore2.shape[2],
5:5 + upscore2.shape[3]]
score_pool4c = h # 1/16
# fuse_pool4
h = upscore2 + score_pool4c
fuse_pool4 = h # 1/16
# upscore_pool4
h = self.upscore_pool4(fuse_pool4)
upscore_pool4 = h # 1/8
# score_pool4c
h = score_pool3[:, :,
9:9 + upscore_pool4.shape[2],
9:9 + upscore_pool4.shape[3]]
score_pool3c = h # 1/8
# fuse_pool3
h = upscore_pool4 + score_pool3c
fuse_pool3 = h # 1/8
# upscore8
h = self.upscore8(fuse_pool3)
upscore8 = h # 1/1
# score
h = upscore8[:, :, 31:31 + x.shape[2], 31:31 + x.shape[3]]
score = h # 1/1
self.score = score
if t is None:
assert not chainer.config.train
return
loss = F.softmax_cross_entropy(score, t, normalize=False)
if np.isnan(float(loss.data)):
raise ValueError('Loss is nan.')
chainer.report({'loss': loss}, self)
return loss
def init_from_vgg16(self, vgg16):
for l in self.children():
if l.name.startswith('conv'):
l1 = getattr(vgg16, l.name)
l2 = getattr(self, l.name)
assert l1.W.shape == l2.W.shape
assert l1.b.shape == l2.b.shape
l2.W.data[...] = l1.W.data[...]
l2.b.data[...] = l1.b.data[...]
elif l.name in ['fc6', 'fc7']:
l1 = getattr(vgg16, l.name)
l2 = getattr(self, l.name)
assert l1.W.size == l2.W.size
assert l1.b.size == l2.b.size
l2.W.data[...] = l1.W.data.reshape(l2.W.shape)[...]
l2.b.data[...] = l1.b.data.reshape(l2.b.shape)[...]
@classmethod
def download(cls):
return data.cached_download(
url='https://drive.google.com/uc?id=0B9P1L--7Wd2vZ1RJdXotZkNhSEk',
path=cls.pretrained_model,
md5='5f3ffdc7fae1066606e1ef45cfda548f',
)
| |
from datetime import datetime, timedelta, tzinfo
import unittest
import pytz
import re
#noinspection PyUnresolvedReferences
from nose.tools import assert_equal, assert_raises # you need it for tests in form of continuations
import six
from flask_restful import inputs
def test_reverse_rfc822_datetime():
dates = [
("Sat, 01 Jan 2011 00:00:00 -0000", datetime(2011, 1, 1, tzinfo=pytz.utc)),
("Sat, 01 Jan 2011 23:59:59 -0000", datetime(2011, 1, 1, 23, 59, 59, tzinfo=pytz.utc)),
("Sat, 01 Jan 2011 21:59:59 -0200", datetime(2011, 1, 1, 23, 59, 59, tzinfo=pytz.utc)),
]
for date_string, expected in dates:
yield assert_equal, inputs.datetime_from_rfc822(date_string), expected
def test_reverse_iso8601_datetime():
dates = [
("2011-01-01T00:00:00+00:00", datetime(2011, 1, 1, tzinfo=pytz.utc)),
("2011-01-01T23:59:59+00:00", datetime(2011, 1, 1, 23, 59, 59, tzinfo=pytz.utc)),
("2011-01-01T23:59:59.001000+00:00", datetime(2011, 1, 1, 23, 59, 59, 1000, tzinfo=pytz.utc)),
("2011-01-01T23:59:59+02:00", datetime(2011, 1, 1, 21, 59, 59, tzinfo=pytz.utc))
]
for date_string, expected in dates:
yield assert_equal, inputs.datetime_from_iso8601(date_string), expected
def test_urls():
urls = [
'http://www.djangoproject.com/',
'http://localhost/',
'http://example.com/',
'http://www.example.com/',
'http://www.example.com:8000/test',
'http://valid-with-hyphens.com/',
'http://subdomain.example.com/',
'http://200.8.9.10/',
'http://200.8.9.10:8000/test',
'http://valid-----hyphens.com/',
'http://example.com?something=value',
'http://example.com/index.php?something=value&another=value2',
'http://foo:bar@example.com',
'http://foo:@example.com',
'http://foo:@2001:db8:85a3::8a2e:370:7334',
'http://foo2:qd1%r@example.com',
]
for value in urls:
yield assert_equal, inputs.url(value), value
def check_bad_url_raises(value):
try:
inputs.url(value)
assert False, "shouldn't get here"
except ValueError as e:
assert_equal(six.text_type(e), u"{0} is not a valid URL".format(value))
def test_bad_urls():
values = [
'foo',
'http://',
'http://example',
'http://example.',
'http://.com',
'http://invalid-.com',
'http://-invalid.com',
'http://inv-.alid-.com',
'http://inv-.-alid.com',
'foo bar baz',
u'foo \u2713',
'http://@foo:bar@example.com',
'http://:bar@example.com',
'http://bar:bar:bar@example.com',
]
for value in values:
yield check_bad_url_raises, value
def test_bad_url_error_message():
values = [
'google.com',
'domain.google.com',
'kevin:pass@google.com/path?query',
u'google.com/path?\u2713',
]
for value in values:
yield check_url_error_message, value
def check_url_error_message(value):
try:
inputs.url(value)
assert False, u"inputs.url({0}) should raise an exception".format(value)
except ValueError as e:
assert_equal(six.text_type(e),
(u"{0} is not a valid URL. Did you mean: http://{0}".format(value)))
def test_regex_bad_input():
cases = (
'abc',
'123abc',
'abc123',
'',
)
num_only = inputs.regex(r'^[0-9]+$')
for value in cases:
yield assert_raises, ValueError, lambda: num_only(value)
def test_regex_good_input():
cases = (
'123',
'1234567890',
'00000',
)
num_only = inputs.regex(r'^[0-9]+$')
for value in cases:
yield assert_equal, num_only(value), value
def test_regex_bad_pattern():
"""Regex error raised immediately when regex input parser is created."""
assert_raises(re.error, inputs.regex, '[')
class TypesTestCase(unittest.TestCase):
def test_boolean_false(self):
assert_equal(inputs.boolean("False"), False)
def test_boolean_is_false_for_0(self):
assert_equal(inputs.boolean("0"), False)
def test_boolean_true(self):
assert_equal(inputs.boolean("true"), True)
def test_boolean_is_true_for_1(self):
assert_equal(inputs.boolean("1"), True)
def test_boolean_upper_case(self):
assert_equal(inputs.boolean("FaLSE"), False)
def test_boolean(self):
assert_equal(inputs.boolean("FaLSE"), False)
def test_boolean_with_python_bool(self):
"""Input that is already a native python `bool` should be passed through
without extra processing."""
assert_equal(inputs.boolean(True), True)
assert_equal(inputs.boolean(False), False)
def test_bad_boolean(self):
assert_raises(ValueError, lambda: inputs.boolean("blah"))
def test_date_later_than_1900(self):
assert_equal(inputs.date("1900-01-01"), datetime(1900, 1, 1))
def test_date_input_error(self):
assert_raises(ValueError, lambda: inputs.date("2008-13-13"))
def test_date_input(self):
assert_equal(inputs.date("2008-08-01"), datetime(2008, 8, 1))
def test_natual_negative(self):
assert_raises(ValueError, lambda: inputs.natural(-1))
def test_natural(self):
assert_equal(3, inputs.natural(3))
def test_natual_string(self):
assert_raises(ValueError, lambda: inputs.natural('foo'))
def test_positive(self):
assert_equal(1, inputs.positive(1))
assert_equal(10000, inputs.positive(10000))
def test_positive_zero(self):
assert_raises(ValueError, lambda: inputs.positive(0))
def test_positive_negative_input(self):
assert_raises(ValueError, lambda: inputs.positive(-1))
def test_int_range_good(self):
int_range = inputs.int_range(1, 5)
assert_equal(3, int_range(3))
def test_int_range_inclusive(self):
int_range = inputs.int_range(1, 5)
assert_equal(5, int_range(5))
def test_int_range_low(self):
int_range = inputs.int_range(0, 5)
assert_raises(ValueError, lambda: int_range(-1))
def test_int_range_high(self):
int_range = inputs.int_range(0, 5)
assert_raises(ValueError, lambda: int_range(6))
def test_isointerval():
intervals = [
(
# Full precision with explicit UTC.
"2013-01-01T12:30:00Z/P1Y2M3DT4H5M6S",
(
datetime(2013, 1, 1, 12, 30, 0, tzinfo=pytz.utc),
datetime(2014, 3, 5, 16, 35, 6, tzinfo=pytz.utc),
),
),
(
# Full precision with alternate UTC indication
"2013-01-01T12:30+00:00/P2D",
(
datetime(2013, 1, 1, 12, 30, 0, tzinfo=pytz.utc),
datetime(2013, 1, 3, 12, 30, 0, tzinfo=pytz.utc),
),
),
(
# Implicit UTC with time
"2013-01-01T15:00/P1M",
(
datetime(2013, 1, 1, 15, 0, 0, tzinfo=pytz.utc),
datetime(2013, 1, 31, 15, 0, 0, tzinfo=pytz.utc),
),
),
(
# TZ conversion
"2013-01-01T17:00-05:00/P2W",
(
datetime(2013, 1, 1, 22, 0, 0, tzinfo=pytz.utc),
datetime(2013, 1, 15, 22, 0, 0, tzinfo=pytz.utc),
),
),
(
# Date upgrade to midnight-midnight period
"2013-01-01/P3D",
(
datetime(2013, 1, 1, 0, 0, 0, tzinfo=pytz.utc),
datetime(2013, 1, 4, 0, 0, 0, 0, tzinfo=pytz.utc),
),
),
(
# Start/end with UTC
"2013-01-01T12:00:00Z/2013-02-01T12:00:00Z",
(
datetime(2013, 1, 1, 12, 0, 0, tzinfo=pytz.utc),
datetime(2013, 2, 1, 12, 0, 0, tzinfo=pytz.utc),
),
),
(
# Start/end with time upgrade
"2013-01-01/2013-06-30",
(
datetime(2013, 1, 1, tzinfo=pytz.utc),
datetime(2013, 6, 30, tzinfo=pytz.utc),
),
),
(
# Start/end with TZ conversion
"2013-02-17T12:00:00-07:00/2013-02-28T15:00:00-07:00",
(
datetime(2013, 2, 17, 19, 0, 0, tzinfo=pytz.utc),
datetime(2013, 2, 28, 22, 0, 0, tzinfo=pytz.utc),
),
),
# Resolution expansion for single date(time)
(
# Second with UTC
"2013-01-01T12:30:45Z",
(
datetime(2013, 1, 1, 12, 30, 45, tzinfo=pytz.utc),
datetime(2013, 1, 1, 12, 30, 46, tzinfo=pytz.utc),
),
),
(
# Second with tz conversion
"2013-01-01T12:30:45+02:00",
(
datetime(2013, 1, 1, 10, 30, 45, tzinfo=pytz.utc),
datetime(2013, 1, 1, 10, 30, 46, tzinfo=pytz.utc),
),
),
(
# Second with implicit UTC
"2013-01-01T12:30:45",
(
datetime(2013, 1, 1, 12, 30, 45, tzinfo=pytz.utc),
datetime(2013, 1, 1, 12, 30, 46, tzinfo=pytz.utc),
),
),
(
# Minute with UTC
"2013-01-01T12:30+00:00",
(
datetime(2013, 1, 1, 12, 30, tzinfo=pytz.utc),
datetime(2013, 1, 1, 12, 31, tzinfo=pytz.utc),
),
),
(
# Minute with conversion
"2013-01-01T12:30+04:00",
(
datetime(2013, 1, 1, 8, 30, tzinfo=pytz.utc),
datetime(2013, 1, 1, 8, 31, tzinfo=pytz.utc),
),
),
(
# Minute with implicit UTC
"2013-01-01T12:30",
(
datetime(2013, 1, 1, 12, 30, tzinfo=pytz.utc),
datetime(2013, 1, 1, 12, 31, tzinfo=pytz.utc),
),
),
(
# Hour, explicit UTC
"2013-01-01T12Z",
(
datetime(2013, 1, 1, 12, tzinfo=pytz.utc),
datetime(2013, 1, 1, 13, tzinfo=pytz.utc),
),
),
(
# Hour with offset
"2013-01-01T12-07:00",
(
datetime(2013, 1, 1, 19, tzinfo=pytz.utc),
datetime(2013, 1, 1, 20, tzinfo=pytz.utc),
),
),
(
# Hour with implicit UTC
"2013-01-01T12",
(
datetime(2013, 1, 1, 12, tzinfo=pytz.utc),
datetime(2013, 1, 1, 13, tzinfo=pytz.utc),
),
),
(
# Interval with trailing zero fractional seconds should
# be accepted.
"2013-01-01T12:00:00.0/2013-01-01T12:30:00.000000",
(
datetime(2013, 1, 1, 12, tzinfo=pytz.utc),
datetime(2013, 1, 1, 12, 30, tzinfo=pytz.utc),
),
),
]
for value, expected in intervals:
yield assert_equal, inputs.iso8601interval(value), expected
def test_invalid_isointerval_error():
try:
inputs.iso8601interval('2013-01-01/blah')
except ValueError as error:
assert_equal(
str(error),
"Invalid argument: 2013-01-01/blah. argument must be a valid ISO8601 "
"date/time interval.",
)
return
assert False, 'Should raise a ValueError'
def test_bad_isointervals():
bad_intervals = [
'2013-01T14:',
'',
'asdf',
'01/01/2013',
]
for bad_interval in bad_intervals:
yield (
assert_raises,
ValueError,
inputs.iso8601interval,
bad_interval,
)
if __name__ == '__main__':
unittest.main()
| |
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
#
# SPDX-License-Identifier: GPL-2.0
# Test various network-related functionality, such as the dhcp, ping, and
# tftpboot commands.
import pytest
import u_boot_utils
"""
Note: This test relies on boardenv_* containing configuration values to define
which the network environment available for testing. Without this, this test
will be automatically skipped.
For example:
# Boolean indicating whether the Ethernet device is attached to USB, and hence
# USB enumeration needs to be performed prior to network tests.
# This variable may be omitted if its value is False.
env__net_uses_usb = False
# Boolean indicating whether the Ethernet device is attached to PCI, and hence
# PCI enumeration needs to be performed prior to network tests.
# This variable may be omitted if its value is False.
env__net_uses_pci = True
# True if a DHCP server is attached to the network, and should be tested.
# If DHCP testing is not possible or desired, this variable may be omitted or
# set to False.
env__net_dhcp_server = True
# A list of environment variables that should be set in order to configure a
# static IP. If solely relying on DHCP, this variable may be omitted or set to
# an empty list.
env__net_static_env_vars = [
("ipaddr", "10.0.0.100"),
("netmask", "255.255.255.0"),
("serverip", "10.0.0.1"),
]
# Details regarding a file that may be read from a TFTP server. This variable
# may be omitted or set to None if TFTP testing is not possible or desired.
env__net_tftp_readable_file = {
"fn": "ubtest-readable.bin",
"addr": 0x10000000,
"size": 5058624,
"crc32": "c2244b26",
}
# Details regarding a file that may be read from a NFS server. This variable
# may be omitted or set to None if NFS testing is not possible or desired.
env__net_nfs_readable_file = {
"fn": "ubtest-readable.bin",
"addr": 0x10000000,
"size": 5058624,
"crc32": "c2244b26",
}
"""
net_set_up = False
def test_net_pre_commands(u_boot_console):
"""Execute any commands required to enable network hardware.
These commands are provided by the boardenv_* file; see the comment at the
beginning of this file.
"""
init_usb = u_boot_console.config.env.get('env__net_uses_usb', False)
if init_usb:
u_boot_console.run_command('usb start')
init_pci = u_boot_console.config.env.get('env__net_uses_pci', False)
if init_pci:
u_boot_console.run_command('pci enum')
@pytest.mark.buildconfigspec('cmd_dhcp')
def test_net_dhcp(u_boot_console):
"""Test the dhcp command.
The boardenv_* file may be used to enable/disable this test; see the
comment at the beginning of this file.
"""
test_dhcp = u_boot_console.config.env.get('env__net_dhcp_server', False)
if not test_dhcp:
pytest.skip('No DHCP server available')
u_boot_console.run_command('setenv autoload no')
output = u_boot_console.run_command('dhcp')
assert 'DHCP client bound to address ' in output
global net_set_up
net_set_up = True
@pytest.mark.buildconfigspec('net')
def test_net_setup_static(u_boot_console):
"""Set up a static IP configuration.
The configuration is provided by the boardenv_* file; see the comment at
the beginning of this file.
"""
env_vars = u_boot_console.config.env.get('env__net_static_env_vars', None)
if not env_vars:
pytest.skip('No static network configuration is defined')
for (var, val) in env_vars:
u_boot_console.run_command('setenv %s %s' % (var, val))
global net_set_up
net_set_up = True
@pytest.mark.buildconfigspec('cmd_ping')
def test_net_ping(u_boot_console):
"""Test the ping command.
The $serverip (as set up by either test_net_dhcp or test_net_setup_static)
is pinged. The test validates that the host is alive, as reported by the
ping command's output.
"""
if not net_set_up:
pytest.skip('Network not initialized')
output = u_boot_console.run_command('ping $serverip')
assert 'is alive' in output
@pytest.mark.buildconfigspec('cmd_net')
def test_net_tftpboot(u_boot_console):
"""Test the tftpboot command.
A file is downloaded from the TFTP server, its size and optionally its
CRC32 are validated.
The details of the file to download are provided by the boardenv_* file;
see the comment at the beginning of this file.
"""
if not net_set_up:
pytest.skip('Network not initialized')
f = u_boot_console.config.env.get('env__net_tftp_readable_file', None)
if not f:
pytest.skip('No TFTP readable file to read')
addr = f.get('addr', None)
if not addr:
addr = u_boot_utils.find_ram_base(u_boot_console) + (1024 * 1024 * 4)
fn = f['fn']
output = u_boot_console.run_command('tftpboot %x %s' % (addr, fn))
expected_text = 'Bytes transferred = '
sz = f.get('size', None)
if sz:
expected_text += '%d' % sz
assert expected_text in output
expected_crc = f.get('crc32', None)
if not expected_crc:
return
if u_boot_console.config.buildconfig.get('config_cmd_crc32', 'n') != 'y':
return
output = u_boot_console.run_command('crc32 %x $filesize' % addr)
assert expected_crc in output
@pytest.mark.buildconfigspec('cmd_nfs')
def test_net_nfs(u_boot_console):
"""Test the nfs command.
A file is downloaded from the NFS server, its size and optionally its
CRC32 are validated.
The details of the file to download are provided by the boardenv_* file;
see the comment at the beginning of this file.
"""
if not net_set_up:
pytest.skip('Network not initialized')
f = u_boot_console.config.env.get('env__net_nfs_readable_file', None)
if not f:
pytest.skip('No NFS readable file to read')
addr = f.get('addr', None)
if not addr:
addr = u_boot_utils.find_ram_base(u_boot_console) + (1024 * 1024 * 4)
fn = f['fn']
output = u_boot_console.run_command('nfs %x %s' % (addr, fn))
expected_text = 'Bytes transferred = '
sz = f.get('size', None)
if sz:
expected_text += '%d' % sz
assert expected_text in output
expected_crc = f.get('crc32', None)
if not expected_crc:
return
if u_boot_console.config.buildconfig.get('config_cmd_crc32', 'n') != 'y':
return
output = u_boot_console.run_command('crc32 %x $filesize' % addr)
assert expected_crc in output
| |
"""
=================================================
Linear fascicle evaluation (LiFE)
=================================================
Evaluating the results of tractography algorithms is one of the biggest
challenges for diffusion MRI. One proposal for evaluation of tractography
results is to use a forward model that predicts the signal from each of a set of
streamlines, and then fit a linear model to these simultaneous predictions
[Pestilli2014]_.
We will use streamlines generated using probabilistic tracking on CSA
peaks. For brevity, we will include in this example only streamlines going
through the corpus callosum connecting left to right superior frontal
cortex. The process of tracking and finding these streamlines is fully
demonstrated in the `streamline_tools.py` example. If this example has been
run, we can read the streamlines from file. Otherwise, we'll run that example
first, by importing it. This provides us with all of the variables that were
created in that example:
"""
import numpy as np
import os.path as op
import nibabel as nib
import dipy.core.optimize as opt
if not op.exists('lr-superiorfrontal.trk'):
from streamline_tools import *
else:
# We'll need to know where the corpus callosum is from these variables:
from dipy.data import (read_stanford_labels,
fetch_stanford_t1,
read_stanford_t1)
hardi_img, gtab, labels_img = read_stanford_labels()
labels = labels_img.get_data()
cc_slice = labels == 2
fetch_stanford_t1()
t1 = read_stanford_t1()
t1_data = t1.get_data()
data = hardi_img.get_data()
# Read the candidates from file in voxel space:
candidate_sl = [s[0] for s in nib.trackvis.read('lr-superiorfrontal.trk',
points_space='voxel')[0]]
"""
The streamlines that are entered into the model are termed 'candidate
streamliness' (or a 'candidate connectome'):
"""
"""
Let's visualize the initial candidate group of streamlines in 3D, relative to the
anatomical structure of this brain:
"""
from dipy.viz.colormap import line_colors
from dipy.viz import fvtk
candidate_streamlines_actor = fvtk.streamtube(candidate_sl,
line_colors(candidate_sl))
cc_ROI_actor = fvtk.contour(cc_slice, levels=[1], colors=[(1., 1., 0.)],
opacities=[1.])
vol_actor = fvtk.slicer(t1_data)
vol_actor.display(40, None, None)
vol_actor2 = vol_actor.copy()
vol_actor2.display(None, None, 35)
# Add display objects to canvas
ren = fvtk.ren()
fvtk.add(ren, candidate_streamlines_actor)
fvtk.add(ren, cc_ROI_actor)
fvtk.add(ren, vol_actor)
fvtk.add(ren, vol_actor2)
fvtk.record(ren, n_frames=1, out_path='life_candidates.png',
size=(800, 800))
"""
.. figure:: life_candidates.png
:align: center
**Candidate connectome before life optimization**
"""
"""
Next, we initialize a LiFE model. We import the `dipy.tracking.life` module,
which contains the classes and functions that implement the model:
"""
import dipy.tracking.life as life
fiber_model = life.FiberModel(gtab)
"""
Since we read the streamlines from a file, already in the voxel space, we do not
need to transform them into this space. Otherwise, if the streamline coordinates
were in the world space (relative to the scanner iso-center, or relative to the
mid-point of the AC-PC-connecting line), we would use this::
inv_affine = np.linalg.inv(hardi_img.get_affine())
the inverse transformation from world space to the voxel space as the affine for
the following model fit.
The next step is to fit the model, producing a `FiberFit` class instance, that
stores the data, as well as the results of the fitting procedure.
The LiFE model posits that the signal in the diffusion MRI volume can be
explained by the streamlines, by the equation
.. math::
y = X\beta
Where $y$ is the diffusion MRI signal, $\beta$ are a set of weights on the
streamlines and $X$ is a design matrix. This matrix has the dimensions $m$ by
$n$, where $m=n_{voxels} \cdot n_{directions}$, and $n_{voxels}$ is the set of
voxels in the ROI that contains the streamlines considered in this model. The
$i^{th}$ column of the matrix contains the expected contributions of the
$i^{th}$ streamline (arbitrarly ordered) to each of the voxels. $X$ is a sparse
matrix, because each streamline traverses only a small percentage of the
voxels. The expected contributions of the streamline are calculated using a
forward model, where each node of the streamline is modeled as a cylindrical
fiber compartment with Gaussian diffusion, using the diffusion tensor model. See
[Pestilli2014]_ for more detail on the model, and variations of this model.
"""
fiber_fit = fiber_model.fit(data, candidate_sl, affine=np.eye(4))
"""
The `FiberFit` class instance holds various properties of the model fit. For
example, it has the weights $\beta$, that are assigned to each streamline. In
most cases, a tractography through some region will include redundant
streamlines, and these streamlines will have $\beta_i$ that are 0.
"""
import matplotlib.pyplot as plt
import matplotlib
fig, ax = plt.subplots(1)
ax.hist(fiber_fit.beta, bins=100, histtype='step')
ax.set_xlabel('Fiber weights')
ax.set_ylabel('# fibers')
fig.savefig('beta_histogram.png')
"""
.. figure:: beta_histogram.png
:align: center
**LiFE streamline weights**
"""
"""
We use $\beta$ to filter out these redundant streamlines, and generate an
optimized group of streamlines:
"""
optimized_sl = list(np.array(candidate_sl)[np.where(fiber_fit.beta>0)[0]])
ren = fvtk.ren()
fvtk.add(ren, fvtk.streamtube(optimized_sl, line_colors(optimized_sl)))
fvtk.add(ren, cc_ROI_actor)
fvtk.add(ren, vol_actor)
fvtk.record(ren, n_frames=1, out_path='life_optimized.png',
size=(800, 800))
"""
.. figure:: life_optimized.png
:align: center
**Streamlines selected via LiFE optimization**
"""
"""
The new set of streamlines should do well in fitting the data, and redundant
streamlines have presumably been removed (in this case, about 50% of the
streamlines).
But how well does the model do in explaining the diffusion data? We can
quantify that: the `FiberFit` class instance has a `predict` method, which can
be used to invert the model and predict back either the data that was used to
fit the model, or other unseen data (e.g. in cross-validation, see
:ref:`kfold_xval`).
Without arguments, the `.predict()` method will predict the diffusion signal
for the same gradient table that was used in the fit data, but `gtab` and `S0`
key-word arguments can be used to predict for other acquisition schemes and
other baseline non-diffusion-weighted signals.
"""
model_predict = fiber_fit.predict()
"""
We will focus on the error in prediction of the diffusion-weighted data, and
calculate the root of the mean squared error.
"""
model_error = model_predict - fiber_fit.data
model_rmse = np.sqrt(np.mean(model_error[:, 10:] ** 2, -1))
"""
As a baseline against which we can compare, we calculate another error term. In
this case, we assume that the weight for each streamline is equal
to zero. This produces the naive prediction of the mean of the signal in each
voxel.
"""
beta_baseline = np.zeros(fiber_fit.beta.shape[0])
pred_weighted = np.reshape(opt.spdot(fiber_fit.life_matrix, beta_baseline),
(fiber_fit.vox_coords.shape[0],
np.sum(~gtab.b0s_mask)))
mean_pred = np.empty((fiber_fit.vox_coords.shape[0], gtab.bvals.shape[0]))
S0 = fiber_fit.b0_signal
"""
Since the fitting is done in the demeaned S/S0 domain, we need
to add back the mean and then multiply by S0 in every voxel:
"""
mean_pred[..., gtab.b0s_mask] = S0[:, None]
mean_pred[..., ~gtab.b0s_mask] =\
(pred_weighted + fiber_fit.mean_signal[:, None]) * S0[:, None]
mean_error = mean_pred - fiber_fit.data
mean_rmse = np.sqrt(np.mean(mean_error ** 2, -1))
"""
First, we can compare the overall distribution of errors between these two
alternative models of the ROI. We show the distribution of differences in error
(improvement through model fitting, relative to the baseline model). Here,
positive values denote an improvement in error with model fit, relative to
without the model fit.
"""
fig, ax = plt.subplots(1)
ax.hist(mean_rmse - model_rmse, bins=100, histtype='step')
ax.text(0.2, 0.9,'Median RMSE, mean model: %.2f' % np.median(mean_rmse),
horizontalalignment='left',
verticalalignment='center', transform=ax.transAxes)
ax.text(0.2, 0.8,'Median RMSE, LiFE: %.2f' % np.median(model_rmse),
horizontalalignment='left',
verticalalignment='center', transform=ax.transAxes)
ax.set_xlabel('RMS Error')
ax.set_ylabel('# voxels')
fig.savefig('error_histograms.png')
"""
.. figure:: error_histograms.png
:align: center
**Improvement in error with fitting of the LiFE model**.
"""
"""
Second, we can show the spatial distribution of the two error terms,
and of the improvement with the model fit:
"""
vol_model = np.ones(data.shape[:3]) * np.nan
vol_model[fiber_fit.vox_coords[:, 0],
fiber_fit.vox_coords[:, 1],
fiber_fit.vox_coords[:, 2]] = model_rmse
vol_mean = np.ones(data.shape[:3]) * np.nan
vol_mean[fiber_fit.vox_coords[:, 0],
fiber_fit.vox_coords[:, 1],
fiber_fit.vox_coords[:, 2]] = mean_rmse
vol_improve = np.ones(data.shape[:3]) * np.nan
vol_improve[fiber_fit.vox_coords[:, 0],
fiber_fit.vox_coords[:, 1],
fiber_fit.vox_coords[:, 2]] = mean_rmse - model_rmse
sl_idx = 49
from mpl_toolkits.axes_grid1 import AxesGrid
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95)
ax = AxesGrid(fig, 111,
nrows_ncols = (1, 3),
label_mode = "1",
share_all = True,
cbar_location="top",
cbar_mode="each",
cbar_size="10%",
cbar_pad="5%")
ax[0].matshow(np.rot90(t1_data[sl_idx, :, :]), cmap=matplotlib.cm.bone)
im = ax[0].matshow(np.rot90(vol_model[sl_idx, :, :]), cmap=matplotlib.cm.hot)
ax.cbar_axes[0].colorbar(im)
ax[1].matshow(np.rot90(t1_data[sl_idx, :, :]), cmap=matplotlib.cm.bone)
im = ax[1].matshow(np.rot90(vol_mean[sl_idx, :, :]), cmap=matplotlib.cm.hot)
ax.cbar_axes[1].colorbar(im)
ax[2].matshow(np.rot90(t1_data[sl_idx, :, :]), cmap=matplotlib.cm.bone)
im = ax[2].matshow(np.rot90(vol_improve[sl_idx, :, :]), cmap=matplotlib.cm.RdBu)
ax.cbar_axes[2].colorbar(im)
for lax in ax:
lax.set_xticks([])
lax.set_yticks([])
fig.savefig("spatial_errors.png")
"""
.. figure:: spatial_errors.png
:align: center
**Spatial distribution of error and improvement**
"""
"""
This image demonstrates that in many places, fitting the LiFE model results in
substantial reduction of the error.
Note that for full-brain tractographies *LiFE* can require large amounts of
memory. For detailed memory profiling of the algorithm, based on the
streamlines generated in :ref:`example_probabilistic_fiber_tracking`, see `this
IPython notebook
<http://nbviewer.ipython.org/gist/arokem/bc29f34ebc97510d9def>`_.
For the Matlab implementation of LiFE, head over to `Franco Pestilli's github
webpage <http://francopestilli.github.io/life/>`_.
References
~~~~~~~~~~~~~~~~~~~~~~
.. [Pestilli2014] Pestilli, F., Yeatman, J, Rokem, A. Kay, K. and Wandell
B.A. (2014). Validation and statistical inference in living
connectomes. Nature Methods 11:
1058-1063. doi:10.1038/nmeth.3098
.. include:: ../links_names.inc
"""
| |
"""
The db layer module is used to load information from the static database
data into a static data object. Acts as an ORM.
"""
import logging
import sqlite3
import sys
from typing import Dict
import tcod as libtcod
import CreatureRogue.settings as settings
from CreatureRogue.data_layer.ailment import Ailment
from CreatureRogue.data_layer.color import Color
from CreatureRogue.data_layer.data import StaticGameData, ACCURACY_STAT, EVASION_STAT
from CreatureRogue.data_layer.encounter import Encounter
from CreatureRogue.data_layer.growth_rate import GrowthRate
from CreatureRogue.data_layer.location import Location
from CreatureRogue.data_layer.location_area import LocationArea
from CreatureRogue.data_layer.move_data import MoveData
from CreatureRogue.data_layer.move_target import MoveTarget
from CreatureRogue.data_layer.pokeball import Pokeball
from CreatureRogue.data_layer.region import Region
from CreatureRogue.data_layer.species import Species
from CreatureRogue.data_layer.stat import Stat
from CreatureRogue.data_layer.type import Type
from CreatureRogue.data_layer.type_chart import TypeChart
from CreatureRogue.data_layer.xp_lookup import XpLookup
from CreatureRogue.data_layer.map_loader import MapDataTileType
class Loader:
def __init__(self, db_file):
self.db_file = db_file
def load_static_data(self) -> StaticGameData:
"""
Given a database file we want to load the entire of the static
data into the application so that it can be quickly accessed as
required.
"""
try:
with sqlite3.connect(self.db_file) as conn:
# XP
growth_rates = self._load_growth_rates(conn)
xp_lookup = self._load_xp_lookup(conn, growth_rates)
# Types
types = self._load_types(conn)
type_chart = self._load_type_chart(conn, types)
# Stats
stats = self._load_stats(conn)
# Ailments
ailments = self._load_ailments(conn)
# Moves
move_targets = self._load_move_targets(conn)
moves = self._load_moves(conn, types, stats, move_targets, ailments)
# Pokeballs
pokeballs = self._load_pokeballs(conn)
# Species
colors = self._load_colors(conn)
species = self._load_species(conn, types, colors, stats, growth_rates, moves)
# Regions/Areas
regions = self._load_regions(conn)
locations = self._load_locations(conn, regions)
location_areas = self._load_location_areas(conn, locations, species)
# Map Data Tile Types
map_data_tile_types = self._load_map_data_tile_types(conn)
except sqlite3.Error as err:
logging.error("An error occurred attempting to pull data from the database", err)
sys.exit(1)
return StaticGameData(species, types, type_chart, moves, stats, colors, growth_rates, move_targets,
regions, locations, location_areas, xp_lookup, pokeballs, ailments, map_data_tile_types)
@staticmethod
def _load_ailments(conn) -> Dict[int, Ailment]:
logging.info("Loading ailments")
ailments = {}
cur = conn.cursor()
cur.execute(
'SELECT id, name FROM move_meta_ailments INNER JOIN move_meta_ailment_names ON move_meta_ailments.id = move_meta_ailment_names.move_meta_ailment_id WHERE local_language_id={0}'.format(
settings.LOCAL_LANGUAGE_ID))
for ailment_id, name in cur.fetchall():
ailments[ailment_id] = Ailment(ailment_id, name)
return ailments
@staticmethod
def _load_stats(conn) -> Dict[int, Stat]:
logging.info("Loading stats")
stats = {}
cur = conn.cursor()
cur.execute(
'SELECT id, name, short_name FROM stats INNER JOIN stat_names ON stats.id = stat_names.stat_id WHERE local_language_id={0}'.format(
settings.LOCAL_LANGUAGE_ID))
for stat_id, name, short_name in cur.fetchall():
stats[stat_id] = Stat(name, short_name)
return stats
@staticmethod
def _load_colors(conn) -> Dict[int, Color]:
logging.info("Loading colors")
colors = {}
cur = conn.cursor()
cur.execute(
'SELECT id, name, red, green, blue FROM pokemon_colors INNER JOIN pokemon_color_names ON pokemon_colors.id = pokemon_color_names.pokemon_color_id WHERE local_language_id=' + str(
settings.LOCAL_LANGUAGE_ID))
for color_id, name, red, green, blue in cur.fetchall():
colors[color_id] = Color(name, red, green, blue)
return colors
@staticmethod
def _load_growth_rates(conn) -> Dict[int, GrowthRate]:
logging.info("Loading growth rates")
growth_rates = {}
cur = conn.cursor()
cur.execute('SELECT id, identifier FROM growth_rates')
for gr_id, name in cur.fetchall():
growth_rates[gr_id] = GrowthRate(name)
return growth_rates
@staticmethod
def _load_xp_lookup(conn, growth_rates: Dict[int, GrowthRate]) -> XpLookup:
logging.info("Loading xp lookup")
xp_lookup = {growth_rates[growth_rate_id]: {} for growth_rate_id in growth_rates}
cur = conn.cursor()
cur.execute('SELECT growth_rate_id, level, experience FROM experience ORDER BY growth_rate_id, level')
for growth_rate_id, level, xp in cur.fetchall():
xp_lookup[growth_rates[growth_rate_id]][level] = xp
return XpLookup(xp_lookup)
@staticmethod
def _load_pokeballs(conn) -> Dict[int, Pokeball]:
logging.info("Loading pokeballs")
pokeballs = {}
cur = conn.cursor()
cur.execute('SELECT id, name, catch_rate, top_color, bottom_color, display_char FROM pokeballs')
for pokeball_id, name, catch_rate, top_color, bottom_color, display_char in cur.fetchall():
r_top, g_top, b_top = [int(a) for a in top_color.split(',')]
r_bottom, g_bottom, b_bottom = [int(a) for a in bottom_color.split(',')]
pokeballs[pokeball_id] = Pokeball(pokeball_id, name, catch_rate, libtcod.Color(r_top, g_top, b_top),
libtcod.Color(r_bottom, g_bottom, b_bottom), display_char)
return pokeballs
@staticmethod
def _load_move_targets(conn) -> Dict[int, MoveTarget]:
logging.info("Loading move targets")
targets = {}
cur = conn.cursor()
cur.execute(
'SELECT id, identifier, name, description FROM move_targets INNER JOIN move_target_prose ON id = move_target_id WHERE local_language_id={0}'.format(
settings.LOCAL_LANGUAGE_ID))
for mt_id, identifier, name, description in cur.fetchall():
targets[mt_id] = MoveTarget(identifier, name, description)
return targets
@staticmethod
def _load_types(conn) -> Dict[int, Type]:
logging.info("Loading types")
types = {}
cur = conn.cursor()
cur.execute(
'SELECT types.id, name FROM types INNER JOIN type_names ON type_id = types.id WHERE local_language_id={0}'.format(
settings.LOCAL_LANGUAGE_ID))
for type_id, name in cur.fetchall():
types[type_id] = Type(name)
return types
@staticmethod
def _load_type_chart(conn, types: Dict[int, Type]) -> TypeChart:
logging.info("Loading type chart")
chart = {}
cur = conn.cursor()
cur.execute('SELECT damage_type_id, target_type_id, damage_factor FROM type_efficacy')
for damage_type_id, target_type_id, damage_factor in cur.fetchall():
damage_type = types[damage_type_id]
target_type = types[target_type_id]
if damage_type not in chart:
chart[damage_type] = {}
chart[damage_type][target_type] = int(damage_factor)
return TypeChart(chart)
@staticmethod
def _load_moves(conn, types: Dict[int, Type], stats: Dict[int, Stat], move_targets: Dict[int, MoveTarget], ailments: Dict[int, Ailment]) -> Dict[int, MoveData]:
logging.info("Loading moves")
moves = {}
cur = conn.cursor()
cur.execute('SELECT * FROM move_data')
for move_id, name, pp, type_id, power, damage_class_id, accuracy, min_hits, max_hits, target_id, ailment_id in cur.fetchall():
if damage_class_id == 2: # Physical
attack_stat = stats[2]
defense_stat = stats[3]
elif damage_class_id == 3: # Special
attack_stat = stats[4]
defense_stat = stats[5]
else: # Non-damaging
attack_stat = None
defense_stat = None
accuracy_stat = stats[7]
evasion_stat = stats[8]
stat_cur = conn.cursor()
stat_cur.execute('SELECT stat_id, change FROM move_meta_stat_changes WHERE move_id = {0}'.format(move_id))
stat_effects = {stats[stat]: 0 for stat in stats}
for stat_id, change in stat_cur.fetchall():
stat_effects[stats[stat_id]] = change
moves[move_id] = MoveData(name, pp, types[type_id], power, accuracy, min_hits, max_hits, stat_effects,
attack_stat, defense_stat, accuracy_stat, evasion_stat,
move_targets[target_id], ailments[ailment_id])
return moves
@staticmethod
def _load_species(conn, types: Dict[int, Type], colors: Dict[int, Color], stats: Dict[int, Stat], growth_rates: Dict[int, GrowthRate], moves: Dict[int, MoveData]) -> Dict[int, Species]:
logging.info("Loading species")
species = {}
cur = conn.cursor()
cur.execute(
'SELECT species_id, creature_id, pokedex_number, name, height, weight, base_experience, color_id, growth_rate_id, flavor_text, genus, capture_rate FROM creature_species_data WHERE pokedex_id = {0} AND local_language_id = {1}'.format(
settings.POKEDEX_ID, settings.LOCAL_LANGUAGE_ID))
for species_id, creature_id, pokedex_number, name, height, weight, base_exp, color_id, growth_rate_id, flavor_text, genus, capture_rate in cur.fetchall():
types_cur = conn.cursor()
types_cur.execute('SELECT type_id FROM pokemon_types WHERE pokemon_id = {0}'.format(creature_id))
species_types = [types[row[0]] for row in types_cur]
stats_cur = conn.cursor()
stats_cur.execute(
'SELECT stat_id, base_stat FROM pokemon_stats INNER JOIN stats ON stats.id = pokemon_stats.stat_id WHERE pokemon_id = {0}'.format(
creature_id))
species_stats = {stats[row[0]]: row[1] for row in stats_cur}
species_stats[stats[EVASION_STAT]] = 1
species_stats[stats[ACCURACY_STAT]] = 1
moves_cur = conn.cursor()
moves_cur.execute(
'SELECT move_id, level FROM pokemon_moves WHERE pokemon_move_method_id=1 AND pokemon_id={0} AND version_group_id = {1}'.format(
creature_id, settings.VERSION_GROUP_ID))
level_moves = {n: [] for n in range(1, 101)}
for move_id, level in moves_cur.fetchall():
level_moves[level].append(moves[move_id])
species[species_id] = Species(pokedex_number, name, height, weight, species_types, species_stats,
base_exp, growth_rates[growth_rate_id], name[0:1], colors[color_id], # TODO - What if the creature has no name?
level_moves, flavor_text, genus, capture_rate)
return species
@staticmethod
def _load_regions(conn) -> Dict[int, Region]:
logging.info("Loading regions")
regions = {}
cur = conn.cursor()
cur.execute(
'SELECT id, identifier, name FROM regions INNER JOIN region_names ON regions.id = region_names.region_id WHERE local_language_id={0}'.format(
settings.LOCAL_LANGUAGE_ID))
for region_id, identifier, name in cur.fetchall():
regions[region_id] = Region(region_id=region_id, identifier=identifier, name=name)
return regions
@staticmethod
def _load_locations(conn, regions: Dict[int, Region]) -> Dict[int, Location]:
logging.info("Loading locations")
locations = {}
cur = conn.cursor()
cur.execute(
'SELECT id, identifier, name, region_id FROM locations INNER JOIN location_names ON locations.id = location_names.location_id WHERE local_language_id={0} AND NOT region_id IS NULL'.format(
settings.LOCAL_LANGUAGE_ID))
for location_id, identifier, name, region_id in cur.fetchall():
locations[location_id] = Location(identifier, name, regions[region_id])
return locations
@staticmethod
def _load_location_areas(conn, locations: Dict[int, Location], species: Dict[int, Species]) -> Dict[int, LocationArea]:
logging.info("Loading location areas")
location_areas = {}
cur = conn.cursor()
cur.execute(
'SELECT location_areas.id, location_areas.identifier, location_area_prose.name, location_areas.location_id FROM location_areas INNER JOIN location_area_prose ON location_areas.id = location_area_prose.location_area_id WHERE NOT location_areas.location_id IS NULL AND local_language_id={0}'.format(
settings.LOCAL_LANGUAGE_ID))
for area_id, identifier, name, location_id in cur.fetchall():
rate_cur = conn.cursor()
rate_cur.execute(
'SELECT encounter_method_id, rate FROM location_area_encounter_rates WHERE version_id = (SELECT MAX(version_id) FROM location_area_encounter_rates WHERE location_area_id = {0}) AND location_area_id = {0}'.format(
area_id))
enc_cur = conn.cursor()
enc_cur.execute(
'SELECT species_id, MIN(min_level), MAX(max_level), MAX(rarity), encounter_method_id FROM encounters INNER JOIN pokemon on pokemon_id = pokemon.id INNER JOIN encounter_slots ON encounter_slots.id = encounters.encounter_slot_id WHERE location_area_id = {0} GROUP BY pokemon_id, encounter_method_id'.format(
area_id))
walk_encs = []
for species_id, min_level, max_level, rarity, method_id in enc_cur.fetchall():
if method_id == 1:
walk_encs.append(Encounter(species[species_id], min_level, max_level, rarity))
location_areas[area_id] = LocationArea(identifier, name, locations[location_id], walk_encs)
return location_areas
@staticmethod
def _load_map_data_tile_types(conn) -> Dict[int, MapDataTileType]:
logging.info("Loading tile types")
tile_types = {}
cur = conn.cursor()
cur.execute("SELECT id, display_character, red, green, blue, traversable, name FROM region_map_data_cell_types")
for tile_type_id, display_character, red, green, blue, traversable, name in cur.fetchall():
tile_types[tile_type_id] = MapDataTileType(name=name, red=red, green=green, blue=blue, traversable=traversable, display_character=display_character)
return tile_types
| |
"""Utilities for writing code that runs on Python 2 and 3"""
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.1.0"
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result)
# This is a bit ugly, but it avoids running this again.
delattr(tp, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(types.ModuleType):
"""Lazy loading of moved objects"""
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
del attr
moves = sys.modules["django.utils.six.moves"] = _MovedItems("moves")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_code = "func_code"
_func_defaults = "func_defaults"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
if PY3:
def get_unbound_function(unbound):
return unbound
advance_iterator = next
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
else:
def get_unbound_function(unbound):
return unbound.im_func
def advance_iterator(it):
return it.next()
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
def iterkeys(d):
"""Return an iterator over the keys of a dictionary."""
return getattr(d, _iterkeys)()
def itervalues(d):
"""Return an iterator over the values of a dictionary."""
return getattr(d, _itervalues)()
def iteritems(d):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return getattr(d, _iteritems)()
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
def u(s):
return unicode(s, "unicode_escape")
int2byte = chr
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
print_ = getattr(builtins, "print")
del builtins
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
def print_(*args, **kwargs):
"""The new-style print function."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("NewBase", (base,), {})
### Additional customizations for Django ###
if PY3:
_iterlists = "lists"
else:
_iterlists = "iterlists"
def iterlists(d):
"""Return an iterator over the values of a MultiValueDict."""
return getattr(d, _iterlists)()
add_move(MovedModule("_dummy_thread", "dummy_thread"))
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
from typing import List, Optional
import hvac
if sys.version_info >= (3, 8):
from functools import cached_property
else:
from cached_property import cached_property
from hvac.exceptions import InvalidPath, VaultError
from requests import Response
from airflow.utils.log.logging_mixin import LoggingMixin
DEFAULT_KUBERNETES_JWT_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/token'
DEFAULT_KV_ENGINE_VERSION = 2
VALID_KV_VERSIONS: List[int] = [1, 2]
VALID_AUTH_TYPES: List[str] = [
'approle',
'aws_iam',
'azure',
'github',
'gcp',
'kubernetes',
'ldap',
'radius',
'token',
'userpass',
]
class _VaultClient(LoggingMixin):
"""
Retrieves Authenticated client from Hashicorp Vault. This is purely internal class promoting
authentication code reuse between the Hook and the SecretBackend, it should not be used directly in
Airflow DAGs. Use VaultBackend for backend integration and Hook in case you want to communicate
with VaultHook using standard Airflow Connection definition.
:param url: Base URL for the Vault instance being addressed.
:param auth_type: Authentication Type for Vault. Default is ``token``. Available values are in
('approle', 'aws_iam', 'azure', 'github', 'gcp', 'kubernetes', 'ldap', 'radius', 'token', 'userpass')
:param auth_mount_point: It can be used to define mount_point for authentication chosen
Default depends on the authentication method used.
:param mount_point: The "path" the secret engine was mounted on. Default is "secret". Note that
this mount_point is not used for authentication if authentication is done via a
different engine. For authentication mount_points see, auth_mount_point.
:param kv_engine_version: Selects the version of the engine to run (``1`` or ``2``, default: ``2``).
:param token: Authentication token to include in requests sent to Vault
(for ``token`` and ``github`` auth_type).
:param token_path: path to file containing authentication token to include in requests sent to Vault
(for ``token`` and ``github`` auth_type).
:param username: Username for Authentication (for ``ldap`` and ``userpass`` auth_types).
:param password: Password for Authentication (for ``ldap`` and ``userpass`` auth_types).
:param key_id: Key ID for Authentication (for ``aws_iam`` and ''azure`` auth_type).
:param secret_id: Secret ID for Authentication (for ``approle``, ``aws_iam`` and ``azure`` auth_types).
:param role_id: Role ID for Authentication (for ``approle``, ``aws_iam`` auth_types).
:param kubernetes_role: Role for Authentication (for ``kubernetes`` auth_type).
:param kubernetes_jwt_path: Path for kubernetes jwt token (for ``kubernetes`` auth_type, default:
``/var/run/secrets/kubernetes.io/serviceaccount/token``).
:param gcp_key_path: Path to Google Cloud Service Account key file (JSON) (for ``gcp`` auth_type).
Mutually exclusive with gcp_keyfile_dict
:param gcp_keyfile_dict: Dictionary of keyfile parameters. (for ``gcp`` auth_type).
Mutually exclusive with gcp_key_path
:param gcp_scopes: Comma-separated string containing OAuth2 scopes (for ``gcp`` auth_type).
:param azure_tenant_id: The tenant id for the Azure Active Directory (for ``azure`` auth_type).
:param azure_resource: The configured URL for the application registered in Azure Active Directory
(for ``azure`` auth_type).
:param radius_host: Host for radius (for ``radius`` auth_type).
:param radius_secret: Secret for radius (for ``radius`` auth_type).
:param radius_port: Port for radius (for ``radius`` auth_type).
"""
def __init__(
self,
url: Optional[str] = None,
auth_type: str = 'token',
auth_mount_point: Optional[str] = None,
mount_point: str = "secret",
kv_engine_version: Optional[int] = None,
token: Optional[str] = None,
token_path: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
key_id: Optional[str] = None,
secret_id: Optional[str] = None,
role_id: Optional[str] = None,
kubernetes_role: Optional[str] = None,
kubernetes_jwt_path: Optional[str] = '/var/run/secrets/kubernetes.io/serviceaccount/token',
gcp_key_path: Optional[str] = None,
gcp_keyfile_dict: Optional[dict] = None,
gcp_scopes: Optional[str] = None,
azure_tenant_id: Optional[str] = None,
azure_resource: Optional[str] = None,
radius_host: Optional[str] = None,
radius_secret: Optional[str] = None,
radius_port: Optional[int] = None,
**kwargs,
):
super().__init__()
if kv_engine_version and kv_engine_version not in VALID_KV_VERSIONS:
raise VaultError(
f"The version is not supported: {kv_engine_version}. "
f"It should be one of {VALID_KV_VERSIONS}"
)
if auth_type not in VALID_AUTH_TYPES:
raise VaultError(
f"The auth_type is not supported: {auth_type}. " f"It should be one of {VALID_AUTH_TYPES}"
)
if auth_type == "token" and not token and not token_path:
raise VaultError("The 'token' authentication type requires 'token' or 'token_path'")
if auth_type == "github" and not token and not token_path:
raise VaultError("The 'github' authentication type requires 'token' or 'token_path'")
if auth_type == "approle" and not role_id:
raise VaultError("The 'approle' authentication type requires 'role_id'")
if auth_type == "kubernetes":
if not kubernetes_role:
raise VaultError("The 'kubernetes' authentication type requires 'kubernetes_role'")
if not kubernetes_jwt_path:
raise VaultError("The 'kubernetes' authentication type requires 'kubernetes_jwt_path'")
if auth_type == "azure":
if not azure_resource:
raise VaultError("The 'azure' authentication type requires 'azure_resource'")
if not azure_tenant_id:
raise VaultError("The 'azure' authentication type requires 'azure_tenant_id'")
if auth_type == "radius":
if not radius_host:
raise VaultError("The 'radius' authentication type requires 'radius_host'")
if not radius_secret:
raise VaultError("The 'radius' authentication type requires 'radius_secret'")
self.kv_engine_version = kv_engine_version if kv_engine_version else 2
self.url = url
self.auth_type = auth_type
self.kwargs = kwargs
self.token = token
self.token_path = token_path
self.auth_mount_point = auth_mount_point
self.mount_point = mount_point
self.username = username
self.password = password
self.key_id = key_id
self.secret_id = secret_id
self.role_id = role_id
self.kubernetes_role = kubernetes_role
self.kubernetes_jwt_path = kubernetes_jwt_path
self.gcp_key_path = gcp_key_path
self.gcp_keyfile_dict = gcp_keyfile_dict
self.gcp_scopes = gcp_scopes
self.azure_tenant_id = azure_tenant_id
self.azure_resource = azure_resource
self.radius_host = radius_host
self.radius_secret = radius_secret
self.radius_port = radius_port
@property
def client(self):
"""
Authentication to Vault can expire. This wrapper function checks that
it is still authenticated to Vault, and invalidates the cache if this
is not the case.
:rtype: hvac.Client
:return: Vault Client
"""
if not self._client.is_authenticated():
# Invalidate the cache:
# https://github.com/pydanny/cached-property#invalidating-the-cache
self.__dict__.pop('_client', None)
return self._client
@cached_property
def _client(self) -> hvac.Client:
"""
Return an authenticated Hashicorp Vault client.
:rtype: hvac.Client
:return: Vault Client
"""
_client = hvac.Client(url=self.url, **self.kwargs)
if self.auth_type == "approle":
self._auth_approle(_client)
elif self.auth_type == 'aws_iam':
self._auth_aws_iam(_client)
elif self.auth_type == 'azure':
self._auth_azure(_client)
elif self.auth_type == "gcp":
self._auth_gcp(_client)
elif self.auth_type == "github":
self._auth_github(_client)
elif self.auth_type == "kubernetes":
self._auth_kubernetes(_client)
elif self.auth_type == "ldap":
self._auth_ldap(_client)
elif self.auth_type == "radius":
self._auth_radius(_client)
elif self.auth_type == "token":
self._set_token(_client)
elif self.auth_type == "userpass":
self._auth_userpass(_client)
else:
raise VaultError(f"Authentication type '{self.auth_type}' not supported")
if _client.is_authenticated():
return _client
else:
raise VaultError("Vault Authentication Error!")
def _auth_userpass(self, _client: hvac.Client) -> None:
if self.auth_mount_point:
_client.auth_userpass(
username=self.username, password=self.password, mount_point=self.auth_mount_point
)
else:
_client.auth_userpass(username=self.username, password=self.password)
def _auth_radius(self, _client: hvac.Client) -> None:
if self.auth_mount_point:
_client.auth.radius.configure(
host=self.radius_host,
secret=self.radius_secret,
port=self.radius_port,
mount_point=self.auth_mount_point,
)
else:
_client.auth.radius.configure(
host=self.radius_host, secret=self.radius_secret, port=self.radius_port
)
def _auth_ldap(self, _client: hvac.Client) -> None:
if self.auth_mount_point:
_client.auth.ldap.login(
username=self.username, password=self.password, mount_point=self.auth_mount_point
)
else:
_client.auth.ldap.login(username=self.username, password=self.password)
def _auth_kubernetes(self, _client: hvac.Client) -> None:
if not self.kubernetes_jwt_path:
raise VaultError("The kubernetes_jwt_path should be set here. This should not happen.")
with open(self.kubernetes_jwt_path) as f:
jwt = f.read().strip()
if self.auth_mount_point:
_client.auth_kubernetes(role=self.kubernetes_role, jwt=jwt, mount_point=self.auth_mount_point)
else:
_client.auth_kubernetes(role=self.kubernetes_role, jwt=jwt)
def _auth_github(self, _client: hvac.Client) -> None:
if self.auth_mount_point:
_client.auth.github.login(token=self.token, mount_point=self.auth_mount_point)
else:
_client.auth.github.login(token=self.token)
def _auth_gcp(self, _client: hvac.Client) -> None:
from airflow.providers.google.cloud.utils.credentials_provider import (
_get_scopes,
get_credentials_and_project_id,
)
scopes = _get_scopes(self.gcp_scopes)
credentials, _ = get_credentials_and_project_id(
key_path=self.gcp_key_path, keyfile_dict=self.gcp_keyfile_dict, scopes=scopes
)
if self.auth_mount_point:
_client.auth.gcp.configure(credentials=credentials, mount_point=self.auth_mount_point)
else:
_client.auth.gcp.configure(credentials=credentials)
def _auth_azure(self, _client: hvac.Client) -> None:
if self.auth_mount_point:
_client.auth.azure.configure(
tenant_id=self.azure_tenant_id,
resource=self.azure_resource,
client_id=self.key_id,
client_secret=self.secret_id,
mount_point=self.auth_mount_point,
)
else:
_client.auth.azure.configure(
tenant_id=self.azure_tenant_id,
resource=self.azure_resource,
client_id=self.key_id,
client_secret=self.secret_id,
)
def _auth_aws_iam(self, _client: hvac.Client) -> None:
if self.auth_mount_point:
_client.auth_aws_iam(
access_key=self.key_id,
secret_key=self.secret_id,
role=self.role_id,
mount_point=self.auth_mount_point,
)
else:
_client.auth_aws_iam(access_key=self.key_id, secret_key=self.secret_id, role=self.role_id)
def _auth_approle(self, _client: hvac.Client) -> None:
if self.auth_mount_point:
_client.auth.approle.login(
role_id=self.role_id, secret_id=self.secret_id, mount_point=self.auth_mount_point
)
else:
_client.auth.approle.login(role_id=self.role_id, secret_id=self.secret_id)
def _set_token(self, _client: hvac.Client) -> None:
if self.token_path:
with open(self.token_path) as f:
_client.token = f.read().strip()
else:
_client.token = self.token
def get_secret(self, secret_path: str, secret_version: Optional[int] = None) -> Optional[dict]:
"""
Get secret value from the KV engine.
:param secret_path: The path of the secret.
:param secret_version: Specifies the version of Secret to return. If not set, the latest
version is returned. (Can only be used in case of version 2 of KV).
See https://hvac.readthedocs.io/en/stable/usage/secrets_engines/kv_v1.html
and https://hvac.readthedocs.io/en/stable/usage/secrets_engines/kv_v2.html for details.
:return: secret stored in the vault as a dictionary
"""
try:
if self.kv_engine_version == 1:
if secret_version:
raise VaultError("Secret version can only be used with version 2 of the KV engine")
response = self.client.secrets.kv.v1.read_secret(
path=secret_path, mount_point=self.mount_point
)
else:
response = self.client.secrets.kv.v2.read_secret_version(
path=secret_path, mount_point=self.mount_point, version=secret_version
)
except InvalidPath:
self.log.debug("Secret not found %s with mount point %s", secret_path, self.mount_point)
return None
return_data = response["data"] if self.kv_engine_version == 1 else response["data"]["data"]
return return_data
def get_secret_metadata(self, secret_path: str) -> Optional[dict]:
"""
Reads secret metadata (including versions) from the engine. It is only valid for KV version 2.
:param secret_path: The path of the secret.
:rtype: dict
:return: secret metadata. This is a Dict containing metadata for the secret.
See https://hvac.readthedocs.io/en/stable/usage/secrets_engines/kv_v2.html for details.
"""
if self.kv_engine_version == 1:
raise VaultError("Metadata might only be used with version 2 of the KV engine.")
try:
return self.client.secrets.kv.v2.read_secret_metadata(
path=secret_path, mount_point=self.mount_point
)
except InvalidPath:
self.log.debug("Secret not found %s with mount point %s", secret_path, self.mount_point)
return None
def get_secret_including_metadata(
self, secret_path: str, secret_version: Optional[int] = None
) -> Optional[dict]:
"""
Reads secret including metadata. It is only valid for KV version 2.
See https://hvac.readthedocs.io/en/stable/usage/secrets_engines/kv_v2.html for details.
:param secret_path: The path of the secret.
:param secret_version: Specifies the version of Secret to return. If not set, the latest
version is returned. (Can only be used in case of version 2 of KV).
:rtype: dict
:return: The key info. This is a Dict with "data" mapping keeping secret
and "metadata" mapping keeping metadata of the secret.
"""
if self.kv_engine_version == 1:
raise VaultError("Metadata might only be used with version 2 of the KV engine.")
try:
return self.client.secrets.kv.v2.read_secret_version(
path=secret_path, mount_point=self.mount_point, version=secret_version
)
except InvalidPath:
self.log.debug(
"Secret not found %s with mount point %s and version %s",
secret_path,
self.mount_point,
secret_version,
)
return None
def create_or_update_secret(
self, secret_path: str, secret: dict, method: Optional[str] = None, cas: Optional[int] = None
) -> Response:
"""
Creates or updates secret.
:param secret_path: The path of the secret.
:param secret: Secret to create or update for the path specified
:param method: Optional parameter to explicitly request a POST (create) or PUT (update) request to
the selected kv secret engine. If no argument is provided for this parameter, hvac attempts to
intelligently determine which method is appropriate. Only valid for KV engine version 1
:param cas: Set the "cas" value to use a Check-And-Set operation. If not set the write will be
allowed. If set to 0 a write will only be allowed if the key doesn't exist.
If the index is non-zero the write will only be allowed if the key's current version
matches the version specified in the cas parameter. Only valid for KV engine version 2.
:rtype: requests.Response
:return: The response of the create_or_update_secret request.
See https://hvac.readthedocs.io/en/stable/usage/secrets_engines/kv_v1.html
and https://hvac.readthedocs.io/en/stable/usage/secrets_engines/kv_v2.html for details.
"""
if self.kv_engine_version == 2 and method:
raise VaultError("The method parameter is only valid for version 1")
if self.kv_engine_version == 1 and cas:
raise VaultError("The cas parameter is only valid for version 2")
if self.kv_engine_version == 1:
response = self.client.secrets.kv.v1.create_or_update_secret(
secret_path=secret_path, secret=secret, mount_point=self.mount_point, method=method
)
else:
response = self.client.secrets.kv.v2.create_or_update_secret(
secret_path=secret_path, secret=secret, mount_point=self.mount_point, cas=cas
)
return response
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import json
import os
import tempfile
import unittest
from contextlib import redirect_stdout
from airflow import models
from airflow.cli import cli_parser
from airflow.cli.commands import user_command
TEST_USER1_EMAIL = 'test-user1@example.com'
TEST_USER2_EMAIL = 'test-user2@example.com'
def _does_user_belong_to_role(appbuilder, email, rolename):
user = appbuilder.sm.find_user(email=email)
role = appbuilder.sm.find_role(rolename)
if user and role:
return role in user.roles
return False
class TestCliUsers(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dagbag = models.DagBag(include_examples=True)
cls.parser = cli_parser.get_parser()
def setUp(self):
from airflow.www import app as application
self.app = application.create_app(testing=True)
self.appbuilder = self.app.appbuilder # pylint: disable=no-member
self.clear_roles_and_roles()
def tearDown(self):
self.clear_roles_and_roles()
def clear_roles_and_roles(self):
for email in [TEST_USER1_EMAIL, TEST_USER2_EMAIL]:
test_user = self.appbuilder.sm.find_user(email=email)
if test_user:
self.appbuilder.sm.del_register_user(test_user)
for role_name in ['FakeTeamA', 'FakeTeamB']:
if self.appbuilder.sm.find_role(role_name):
self.appbuilder.sm.delete_role(role_name)
def test_cli_create_user_random_password(self):
args = self.parser.parse_args(
[
'users',
'create',
'--username',
'test1',
'--lastname',
'doe',
'--firstname',
'jon',
'--email',
'jdoe@foo.com',
'--role',
'Viewer',
'--use-random-password',
]
)
user_command.users_create(args)
def test_cli_create_user_supplied_password(self):
args = self.parser.parse_args(
[
'users',
'create',
'--username',
'test2',
'--lastname',
'doe',
'--firstname',
'jon',
'--email',
'jdoe@apache.org',
'--role',
'Viewer',
'--password',
'test',
]
)
user_command.users_create(args)
def test_cli_delete_user(self):
args = self.parser.parse_args(
[
'users',
'create',
'--username',
'test3',
'--lastname',
'doe',
'--firstname',
'jon',
'--email',
'jdoe@example.com',
'--role',
'Viewer',
'--use-random-password',
]
)
user_command.users_create(args)
args = self.parser.parse_args(
[
'users',
'delete',
'--username',
'test3',
]
)
user_command.users_delete(args)
def test_cli_list_users(self):
for i in range(0, 3):
args = self.parser.parse_args(
[
'users',
'create',
'--username',
f'user{i}',
'--lastname',
'doe',
'--firstname',
'jon',
'--email',
f'jdoe+{i}@gmail.com',
'--role',
'Viewer',
'--use-random-password',
]
)
user_command.users_create(args)
with redirect_stdout(io.StringIO()) as stdout:
user_command.users_list(self.parser.parse_args(['users', 'list']))
stdout = stdout.getvalue()
for i in range(0, 3):
self.assertIn(f'user{i}', stdout)
def test_cli_list_users_with_args(self):
user_command.users_list(self.parser.parse_args(['users', 'list', '--output', 'json']))
def test_cli_import_users(self):
def assert_user_in_roles(email, roles):
for role in roles:
self.assertTrue(_does_user_belong_to_role(self.appbuilder, email, role))
def assert_user_not_in_roles(email, roles):
for role in roles:
self.assertFalse(_does_user_belong_to_role(self.appbuilder, email, role))
assert_user_not_in_roles(TEST_USER1_EMAIL, ['Admin', 'Op'])
assert_user_not_in_roles(TEST_USER2_EMAIL, ['Public'])
users = [
{
"username": "imported_user1",
"lastname": "doe1",
"firstname": "jon",
"email": TEST_USER1_EMAIL,
"roles": ["Admin", "Op"],
},
{
"username": "imported_user2",
"lastname": "doe2",
"firstname": "jon",
"email": TEST_USER2_EMAIL,
"roles": ["Public"],
},
]
self._import_users_from_file(users)
assert_user_in_roles(TEST_USER1_EMAIL, ['Admin', 'Op'])
assert_user_in_roles(TEST_USER2_EMAIL, ['Public'])
users = [
{
"username": "imported_user1",
"lastname": "doe1",
"firstname": "jon",
"email": TEST_USER1_EMAIL,
"roles": ["Public"],
},
{
"username": "imported_user2",
"lastname": "doe2",
"firstname": "jon",
"email": TEST_USER2_EMAIL,
"roles": ["Admin"],
},
]
self._import_users_from_file(users)
assert_user_not_in_roles(TEST_USER1_EMAIL, ['Admin', 'Op'])
assert_user_in_roles(TEST_USER1_EMAIL, ['Public'])
assert_user_not_in_roles(TEST_USER2_EMAIL, ['Public'])
assert_user_in_roles(TEST_USER2_EMAIL, ['Admin'])
def test_cli_export_users(self):
user1 = {
"username": "imported_user1",
"lastname": "doe1",
"firstname": "jon",
"email": TEST_USER1_EMAIL,
"roles": ["Public"],
}
user2 = {
"username": "imported_user2",
"lastname": "doe2",
"firstname": "jon",
"email": TEST_USER2_EMAIL,
"roles": ["Admin"],
}
self._import_users_from_file([user1, user2])
users_filename = self._export_users_to_file()
with open(users_filename, mode='r') as file:
retrieved_users = json.loads(file.read())
os.remove(users_filename)
# ensure that an export can be imported
self._import_users_from_file(retrieved_users)
def find_by_username(username):
matches = [u for u in retrieved_users if u['username'] == username]
if not matches:
self.fail(f"Couldn't find user with username {username}")
matches[0].pop('id') # this key not required for import
return matches[0]
self.assertEqual(find_by_username('imported_user1'), user1)
self.assertEqual(find_by_username('imported_user2'), user2)
def _import_users_from_file(self, user_list):
json_file_content = json.dumps(user_list)
f = tempfile.NamedTemporaryFile(delete=False)
try:
f.write(json_file_content.encode())
f.flush()
args = self.parser.parse_args(['users', 'import', f.name])
user_command.users_import(args)
finally:
os.remove(f.name)
def _export_users_to_file(self):
f = tempfile.NamedTemporaryFile(delete=False)
args = self.parser.parse_args(['users', 'export', f.name])
user_command.users_export(args)
return f.name
def test_cli_add_user_role(self):
args = self.parser.parse_args(
[
'users',
'create',
'--username',
'test4',
'--lastname',
'doe',
'--firstname',
'jon',
'--email',
TEST_USER1_EMAIL,
'--role',
'Viewer',
'--use-random-password',
]
)
user_command.users_create(args)
self.assertFalse(
_does_user_belong_to_role(appbuilder=self.appbuilder, email=TEST_USER1_EMAIL, rolename='Op'),
"User should not yet be a member of role 'Op'",
)
args = self.parser.parse_args(['users', 'add-role', '--username', 'test4', '--role', 'Op'])
user_command.users_manage_role(args, remove=False)
self.assertTrue(
_does_user_belong_to_role(appbuilder=self.appbuilder, email=TEST_USER1_EMAIL, rolename='Op'),
"User should have been added to role 'Op'",
)
def test_cli_remove_user_role(self):
args = self.parser.parse_args(
[
'users',
'create',
'--username',
'test4',
'--lastname',
'doe',
'--firstname',
'jon',
'--email',
TEST_USER1_EMAIL,
'--role',
'Viewer',
'--use-random-password',
]
)
user_command.users_create(args)
self.assertTrue(
_does_user_belong_to_role(appbuilder=self.appbuilder, email=TEST_USER1_EMAIL, rolename='Viewer'),
"User should have been created with role 'Viewer'",
)
args = self.parser.parse_args(['users', 'remove-role', '--username', 'test4', '--role', 'Viewer'])
user_command.users_manage_role(args, remove=True)
self.assertFalse(
_does_user_belong_to_role(appbuilder=self.appbuilder, email=TEST_USER1_EMAIL, rolename='Viewer'),
"User should have been removed from role 'Viewer'",
)
| |
# global imports
import numpy as np
from future.builtins import range
# local imports
import correlation_toolbox.helper as cthlp
'''
Documentation:
Correlation toolbox for AnalogSignals (binned data) of format
data = np.array([[t1,t2,...,tn], # unit1
[t1,t2,...,tn], # unit2
.
.
[t1,t2,...,tn]]) # unitN
Exception: compound_crossspec takes list of data, i.e. [data1,data2,...] as input.
'''
def mean(data, units=False, time=False):
'''
Compute mean of data
**Args**:
data: numpy.ndarray; 1st axis unit, 2nd axis time
units: bool; average over units
time: bool; average over time
**Return**:
if units=False and time=False: error,
if units=True: 1 dim numpy.ndarray; time series
if time=True: 1 dim numpy.ndarray; series of unit means across time
if units=True and time=True: float; unit and time mean
**Examples**:
>>> mean(np.array([[1,2,3],[4,5,6]]),units=True)
Out[1]: np.array([2.5,3.5,4.5])
>>> mean(np.array([[1,2,3],[4,5,6]]),time=True)
Out[1]: np.array([2.,5.])
>>> mean(np.array([[1,2,3],[4,5,6]]),units=True,time=True)
Out[1]: 3.5
'''
assert(units is not False or time is not False)
if units is True and time is False:
return np.mean(data, axis=0)
elif units is False and time is True:
return np.mean(data, axis=1)
elif units is True and time is True:
return np.mean(data)
def compound_mean(data):
'''
Compute the mean of the compound/sum signal.
data is first summed across units and averaged across time.
**Args**:
data: numpy.ndarray; 1st axis unit, 2nd axis time
**Return**:
float; time-averaged compound/sum signal
**Examples**:
>>> compound_mean(np.array([[1,2,3],[4,5,6]]))
Out[1]: 7.0
'''
return np.mean(np.sum(data, axis=0))
def variance(data, units=False, time=False):
'''
Compute the variance of data.
**Args**:
data: numpy.ndarray; 1st axis unit, 2nd axis time
units: bool; variance across units
time: bool; average over time
**Return**:
if units=False and time=False: error,
if units=True: 1 dim numpy.ndarray; time series,
if time=True: 1 dim numpy.ndarray; series of single unit variances across time,
if units=True and time=True: float; mean of single unit variances across time
**Examples**:
>>> variance(np.array([[1,2,3],[4,5,6]]),units=True)
Out[1]: np.array([ 2.25, 2.25, 2.25])
>>> variance(np.array([[1,2,3],[4,5,6]]),time=True)
Out[1]: np.array([ 0.66666667, 0.66666667])
>>> variance(np.array([[1,2,3],[4,5,6]]),units=True,time=True)
Out[1]: 0.66666666666666663
'''
assert(units is not False or time is not False)
if units is True and time is False:
return np.var(data, axis=0)
elif units is False and time is True:
return np.var(data, axis=1)
elif units is True and time is True:
return np.mean(np.var(data, axis=1))
def compound_variance(data):
'''
Compute the variance of the compound/sum signal.
data is first summed across units, then the variance across time is calculated.
**Args**:
data: numpy.ndarray; 1st axis unit, 2nd axis time
**Return**:
float; variance across time of compound/sum signal
**Examples**:
>>> compound_variance(np.array([[1,2,3],[4,5,6]]))
Out[1]: 2.6666666666666665
'''
return np.var(np.sum(data, axis=0))
def spectrogram(data, tbin, twindow, Df=None, units=False, N=None, measure='power'):
'''Calculate (smoothed) spectrogram of data. If units is True, power
spectra are averaged across units.
Parameters:
-----------
data: binned timeseries
tbin: bin size
twindow: size of window use for spectra
Df: width of smoothing kernel
units: if True, average over units
N: population size, if not given, calculated from data
measure: define the measure to be used (power, cross, compound_power)
'''
steps_window = int(np.floor(twindow / tbin))
n_windows = int(np.floor(1. * len(data[0]) / steps_window))
sg = []
freq = []
if measure == 'power':
for i in range(n_windows):
freq, power = powerspec(data[:, i * steps_window:(i + 1) * steps_window], tbin, Df=Df, units=units, N=N)
sg.append(power)
elif measure == 'cross':
for i in range(n_windows):
freq, cross = crossspec(data[:, i * steps_window:(i + 1) * steps_window], tbin, Df=Df, units=units, N=N)
sg.append(cross)
elif measure == 'compound_power':
for i in range(n_windows):
freq, compound_power = compound_powerspec(data[:, i * steps_window:(i + 1) * steps_window], tbin, Df=Df)
sg.append(compound_power)
else:
raise NotImplementedError('Unknow measure: %s.' % measure)
return freq, np.array(sg)
def powerspec(data, tbin, Df=None, units=False, N=None):
'''
Calculate (smoothed) power spectra of all timeseries in data.
If units=True, power spectra are averaged across units.
Note that averaging is done on power spectra rather than data.
Power spectra are normalized by the length T of the time series -> no scaling with T.
For a Poisson process this yields:
**Args**:
data: numpy.ndarray; 1st axis unit, 2nd axis time
tbin: float; binsize in ms
Df: float/None; window width of sliding rectangular filter (smoothing), None -> no smoothing
units: bool; average power spectrum
**Return**:
(freq, POW): tuple
freq: numpy.ndarray; frequencies
POW: if units=False: 2 dim numpy.ndarray; 1st axis unit, 2nd axis frequency
if units=True: 1 dim numpy.ndarray; frequency series
**Examples**:
>>> powerspec(np.array([analog_sig1,analog_sig2]),tbin, Df=Df)
Out[1]: (freq,POW)
>>> POW.shape
Out[2]: (2,len(analog_sig1))
>>> powerspec(np.array([analog_sig1,analog_sig2]),tbin, Df=Df, units=True)
Out[1]: (freq,POW)
>>> POW.shape
Out[2]: (len(analog_sig1),)
'''
if N is None:
N = len(data)
freq, DATA = cthlp.calculate_fft(data, tbin)
df = freq[1] - freq[0]
T = tbin * len(freq)
POW = np.power(np.abs(DATA),2)
if Df is not None:
POW = [cthlp.movav(x, Df, df) for x in POW]
cut = int(Df / df)
freq = freq[cut:]
POW = np.array([x[cut:] for x in POW])
POW = np.abs(POW)
assert(len(freq) == len(POW[0]))
if units is True:
POW = 1./N*np.sum(POW, axis=0)
assert(len(freq) == len(POW))
POW *= 1. / T * 1e3 # normalization, power independent of T
return freq, POW
def compound_powerspec(data, tbin, Df=None):
'''
Calculate the power spectrum of the compound/sum signal.
data is first summed across units, then the power spectrum is calculated.
Power spectrum is normalized by the length T of the time series -> no scaling with T.
**Args**:
data: numpy.ndarray; 1st axis unit, 2nd axis time
tbin: float; binsize in ms
Df: float/None; window width of sliding rectangular filter (smoothing), None -> no smoothing
**Return**:
(freq, POW): tuple
freq: numpy.ndarray; frequencies
POW: 1 dim numpy.ndarray; frequency series
**Examples**:
>>> compound_powerspec(np.array([analog_sig1,analog_sig2]),tbin, Df=Df)
Out[1]: (freq,POW)
>>> POW.shape
Out[2]: (len(analog_sig1),)
'''
return powerspec([np.sum(data, axis=0)], tbin, Df=Df, units=True)
def crossspec(data, tbin, Df=None, units=False, N=None):
'''
Calculate (smoothed) cross spectra of data.
If units=True, cross spectra are averaged across units.
Note that averaging is done on cross spectra rather than data.
Cross spectra are normalized by the length T of the time series -> no scaling with T.
Note that the average cross spectrum (units=True) is calculated efficiently via compound and single unit power spectra.
**Args**:
data: numpy.ndarray; 1st axis unit, 2nd axis time
tbin: float; binsize in ms
Df: float/None; window width of sliding rectangular filter (smoothing), None -> no smoothing
units: bool; average cross spectrum
**Return**:
(freq, CRO): tuple
freq: numpy.ndarray; frequencies
CRO: if units=True: 1 dim numpy.ndarray; frequency series
if units=False: 3 dim numpy.ndarray; 1st axis first unit, 2nd axis second unit, 3rd axis frequency
**Examples**:
>>> crossspec(np.array([analog_sig1,analog_sig2]),tbin, Df=Df)
Out[1]: (freq,CRO)
>>> CRO.shape
Out[2]: (2,2,len(analog_sig1))
>>> crossspec(np.array([analog_sig1,analog_sig2]),tbin, Df=Df, units=True)
Out[1]: (freq,CRO)
>>> CRO.shape
Out[2]: (len(analog_sig1),)
'''
if N is None:
N = len(data)
if units is True:
# smoothing and normalization take place in powerspec
# and compound_powerspec
freq, POW = powerspec(data, tbin, Df=Df, units=True, N=N)
freq_com, CPOW = compound_powerspec(data, tbin, Df=Df)
assert(len(freq) == len(freq_com))
assert(np.min(freq) == np.min(freq_com))
assert(np.max(freq) == np.max(freq_com))
CRO = 1. / (1. * N * (N - 1.)) * (CPOW - 1. * N * POW)
assert(len(freq) == len(CRO))
else:
freq, DATA = cthlp.calculate_fft(data, tbin)
T = tbin * len(freq)
df = freq[1] - freq[0]
if Df is not None:
cut = int(Df / df)
freq = freq[cut:]
CRO = np.zeros((N, N, len(freq)), dtype=complex)
for i in range(N):
for j in range(i + 1):
tempij = DATA[i] * DATA[j].conj()
if Df is not None:
tempij = cthlp.movav(tempij, Df, df)[cut:]
CRO[i, j] = tempij
CRO[j, i] = CRO[i, j].conj()
assert(len(freq) == len(CRO[0, 0]))
CRO *= 1. / T * 1e3 # normalization
return freq, CRO
def compound_crossspec(a_data, tbin, Df=None):
'''
Calculate cross spectra of compound signals.
a_data is a list of datasets (a_data = [data1,data2,...]).
For each dataset in a_data, the compound signal is calculated
and the crossspectra between these compound signals is computed.
**Args**:
a_data: list of numpy.ndarrays; array: 1st axis unit, 2nd axis time
tbin: float; binsize in ms
Df: float/None; window width of sliding rectangular filter (smoothing), None -> no smoothing
**Return**:
(freq, CRO): tuple
freq: numpy.ndarray; frequencies
CRO: 3 dim numpy.ndarray; 1st axis first compound signal, 2nd axis second compound signal, 3rd axis frequency
**Examples**:
>>> compound_crossspec([np.array([analog_sig1,analog_sig2]),np.array([analog_sig3,analog_sig4])],tbin, Df=Df)
Out[1]: (freq,CRO)
>>> CRO.shape
Out[2]: (2,2,len(analog_sig1))
'''
a_mdata = []
for data in a_data:
a_mdata.append(np.sum(data, axis=0)) # calculate compound signals
return crossspec(np.array(a_mdata), tbin, Df, units=False)
def autocorrfunc(freq, power):
'''
Calculate autocorrelation function(s) for given power spectrum/spectra.
For a Poisson process this yields:
**Args**:
freq: 1 dim numpy.ndarray; frequencies
power: 2 dim numpy.ndarray; power spectra, 1st axis units, 2nd axis frequencies
**Return**:
(time,autof): tuple
time: 1 dim numpy.ndarray; times
autof: 2 dim numpy.ndarray; autocorrelation functions, 1st axis units, 2nd axis times
**Examples**:
---
'''
tbin = 1. / (2. * np.max(freq)) * 1e3 # tbin in ms
time = np.arange(-len(freq) / 2. + 1, len(freq) / 2. + 1) * tbin
# T = max(time)
multidata = False
if len(np.shape(power)) > 1:
multidata = True
if multidata:
N = len(power)
autof = np.zeros((N, len(freq)))
for i in range(N):
raw_autof = np.real(np.fft.ifft(power[i]))
mid = int(len(raw_autof) / 2.)
autof[i] = np.hstack([raw_autof[mid + 1:], raw_autof[:mid + 1]])
assert(len(time) == len(autof[0]))
else:
raw_autof = np.real(np.fft.ifft(power))
mid = int(len(raw_autof) / 2.)
autof = np.hstack([raw_autof[mid + 1:], raw_autof[:mid + 1]])
assert(len(time) == len(autof))
#autof *= T*1e-3 # normalization is done in powerspec()
return time, autof
def autocorrfunc_time(spike_trains, tau_max, bin_size, T, units=False):
'''
Calculate autocorrelation function(s) for given spike trains.
**Args**:
spike_trains: 2 dim numpy.ndarray; 1st axis units, 2nd axis spike times
tau_max: maximal time-lag of correlation function
**Return**:
(time,autof): tuple
time: 1 dim numpy.ndarray; times
autof: 2 dim numpy.ndarray; autocorrelation functions, 1st axis units, 2nd axis times
'''
if 2*tau_max >= T :
raise RuntimeError('tau_max has to be smaller than T/2')
nr_units = len(spike_trains)
spike_trains = [np.asarray(st) for st in spike_trains]
# adjust tau_max such that tau_max-bin_size/2 is a multiple of the bin_size
N = int(tau_max/bin_size-0.5)
tau_max = N * bin_size + bin_size/2.
nr_bins = 2*N + 1
if units == False:
auto = np.zeros((nr_units,nr_bins))
else:
auto = np.zeros(nr_bins)
# remove the time intervall tau_max from the beginning and end of the reference spike train, to avoid edge effects due to
# finiteness of spike train
trimmed_spikes = [st[np.where(st>tau_max)] for st in spike_trains]
trimmed_spikes = [ts[np.where(ts<T-tau_max)] for ts in trimmed_spikes]
# loop of spike trains
for i,ts in enumerate(trimmed_spikes):
# loop over spikes in one spike train
for spike in ts:
# get difference time difference between this spike and all other spikes
diff = spike_trains[i]-spike
diff = diff[np.where(abs(diff)<=tau_max)]
# find correct bin
diff = (tau_max+diff)/bin_size
diff = diff.astype(int)
if units == False:
auto[i][diff] += 1
else:
auto[diff] += 1/float(nr_units)
auto = auto * 1000.0
t_start = -tau_max + bin_size/2.
t_end = tau_max - bin_size/2.
time = np.arange(t_start,t_end+bin_size,bin_size)
auto /= (T-2*tau_max)
return time, auto
def crosscorrfunc(freq, cross):
'''
Calculate crosscorrelation function(s) for given cross spectra.
**Args**:
freq: 1 dim numpy.ndarray; frequencies
cross: 3 dim numpy.ndarray; cross spectra, 1st axis units, 2nd axis units, 3rd axis frequencies
**Return**:
(time,crossf): tuple
time: 1 dim numpy.ndarray; times
crossf: 3 dim numpy.ndarray; crosscorrelation functions, 1st axis first unit, 2nd axis second unit, 3rd axis times
**Examples**:
---
'''
tbin = 1. / (2. * np.max(freq)) * 1e3 # tbin in ms
time = np.arange(-len(freq) / 2. + 1, len(freq) / 2. + 1) * tbin
# T = max(time)
multidata = False
# check whether cross contains many cross spectra
if len(np.shape(cross)) > 1:
multidata = True
if multidata:
N = len(cross)
crossf = np.zeros((N, N, len(freq)))
for i in range(N):
for j in range(N):
raw_crossf = np.real(np.fft.ifft(cross[i, j]))
mid = int(len(raw_crossf) / 2.)
crossf[i, j] = np.hstack(
[raw_crossf[mid + 1:], raw_crossf[:mid + 1]])
assert(len(time) == len(crossf[0, 0]))
else:
raw_crossf = np.real(np.fft.ifft(cross))
mid = int(len(raw_crossf) / 2.)
crossf = np.hstack([raw_crossf[mid + 1:], raw_crossf[:mid + 1]])
assert(len(time) == len(crossf))
# crossf *= T*1e-3 # normalization happens in cross spectrum
return time, crossf
def corrcoef(time, crossf, integration_window=0.):
'''
Calculate the correlation coefficient for given auto- and crosscorrelation functions.
Standard settings yield the zero lag correlation coefficient.
Setting integration_window > 0 yields the correlation coefficient of integrated auto- and crosscorrelation functions.
The correlation coefficient between a zero signal with any other signal is defined as 0.
\begin{equation}
corrcoeff_{1,2} = \frac{crossf_{1,2}}{\sqrt{autof_1*autof_2}}
\end{equation}
**Args**:
time: 1 dim numpy.ndarray; times corresponding to signal
crossf: 3 dim numpy.ndarray; crosscorrelation functions, 1st axis first unit, 2nd axis second unit, 3rd axis times
integration_window: float;
**Return**:
cc: 2 dim numpy.ndarray; correlation coefficient between two units
**Examples**:
---
'''
N = len(crossf)
cc = np.zeros(np.shape(crossf)[:-1])
tbin = abs(time[1] - time[0])
lim = int(integration_window / tbin)
if len(time)%2 == 0:
mid = int(len(time)/2-1)
else:
mid = int(np.floor(len(time)/2.))
for i in range(N):
ai = np.sum(crossf[i, i][mid - lim:mid + lim + 1])
offset_autoi = np.mean(crossf[i,i][:mid-1])
for j in range(N):
cij = np.sum(crossf[i, j][mid - lim:mid + lim + 1])
offset_cross = np.mean(crossf[i,j][:mid-1])
aj = np.sum(crossf[j, j][mid - lim:mid + lim + 1])
offset_autoj = np.mean(crossf[j,j][:mid-1])
if ai > 0. and aj > 0.:
cc[i, j] = (cij-offset_cross) / np.sqrt((ai-offset_autoi) * (aj-offset_autoj))
else:
cc[i, j] = 0.
return cc
def coherence(freq, power, freq_cross, cross):
'''
Calculate frequency resolved complex coherence for given power- and crossspectra.
\begin{equation}
coherence_{1,2} = \frac{crossspec_{1,2}}{\sqrt{powerspec_1*powerspec_2}}
\end{equation}
**Args**:
freq: 1 dim numpy.ndarray; frequencies
power: 2 dim numpy.ndarray; power spectra, 1st axis units, 2nd axis frequencies
cross: 3 dim numpy.ndarray; cross spectra, 1st axis units, 2nd axis units, 3rd axis frequencies
**Return**:
(freq,coh): tuple
freq: 1 dim numpy.ndarray; frequencies
coh: 3 dim numpy.ndarray; coherences, 1st axis units, 2nd axis units, 3rd axis frequencies
**Examples**:
---
'''
assert(min(freq) == min(freq_cross))
assert(max(freq) == max(freq_cross))
df = freq[1]-freq[0]
df_cross = freq_cross[1]-freq_cross[0]
assert(df == df_cross)
if len(np.shape(cross)) > 1:
N = len(power)
coh = np.zeros_like(cross)
for i in range(N):
for j in range(N):
coh[i, j] = cross[i, j] / np.sqrt(power[i] * power[j])
assert(len(freq) == len(coh[0, 0]))
else:
coh = cross / power
return freq, coh
def cv(data, units=False):
'''
Calculate coefficient of variation for data. Mean and standard deviation are computed across time.
\begin{equation}
CV = \frac{\sigma}{\mu}
\end{equation}
**Args**:
data: numpy.ndarray; 1st axis unit, 2nd axis time
units: bool; average CV
**Return**:
if units=False: numpy.ndarray; series of unit CVs
if units=True: float; mean CV across units
**Examples**:
>>> cv(np.array([[1,2,3,4,5,6],[11,2,3,3,4,5]]))
Out[1]: np.array([ 0.48795004, 0.63887656])
>>> cv(np.array([[1,2,3,4,5,6],[11,2,3,3,4,5]]),units=True)
Out[1]: 0.56341330073710316
'''
mu = mean(data, time=True)
var = variance(data, time=True)
cv = np.sqrt(var) / mu
if units is True:
return np.mean(cv)
else:
return cv
def fano(data, units=False):
'''
Calculate fano factor for data. Mean and variance are computed across time.
\begin{equation}
FF = \frac{\sigma^2}{\mu}
\end{equation}
**Args**:
data: numpy.ndarray; 1st axis unit, 2nd axis time
units: bool; average FF
**Return**:
if units=False: numpy.ndarray; series of unit FFs
if units=True: float; mean FF across units
**Examples**
>>> fano(np.array([[1,2,3,4,5,6],[11,2,3,3,4,5]]))
Out[1]: np.array([0.83333333, 1.9047619])
>>> fano(np.array([[1,2,3,4,5,6],[11,2,3,3,4,5]]),units=True)
Out[1]: 1.3690476190476191
'''
mu = mean(data, time=True)
var = variance(data, time=True)
ff = var / mu
if units is True:
return np.mean(ff)
else:
return ff
| |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import fnmatch
import glob
import optparse
import os
import posixpath
import shutil
import stat
import sys
import time
import zipfile
if sys.version_info < (2, 6, 0):
sys.stderr.write("python 2.6 or later is required run this script\n")
sys.exit(1)
def IncludeFiles(filters, files):
"""Filter files based on inclusion lists
Return a list of files which match and of the Unix shell-style wildcards
provided, or return all the files if no filter is provided."""
if not filters:
return files
match = set()
for file_filter in filters:
match |= set(fnmatch.filter(files, file_filter))
return [name for name in files if name in match]
def ExcludeFiles(filters, files):
"""Filter files based on exclusions lists
Return a list of files which do not match any of the Unix shell-style
wildcards provided, or return all the files if no filter is provided."""
if not filters:
return files
match = set()
for file_filter in filters:
excludes = set(fnmatch.filter(files, file_filter))
match |= excludes
return [name for name in files if name not in match]
def CopyPath(options, src, dst):
"""CopyPath from src to dst
Copy a fully specified src to a fully specified dst. If src and dst are
both files, the dst file is removed first to prevent error. If and include
or exclude list are provided, the destination is first matched against that
filter."""
if options.includes:
if not IncludeFiles(options.includes, [src]):
return
if options.excludes:
if not ExcludeFiles(options.excludes, [src]):
return
if options.verbose:
print 'cp %s %s' % (src, dst)
# If the source is a single file, copy it individually
if os.path.isfile(src):
# We can not copy over a directory with a file.
if os.path.exists(dst):
if not os.path.isfile(dst):
msg = "cp: cannot overwrite non-file '%s' with file." % dst
raise OSError(msg)
# If the destination exists as a file, remove it before copying to avoid
# 'readonly' issues.
os.remove(dst)
# Now copy to the non-existent fully qualified target
shutil.copy(src, dst)
return
# Otherwise it's a directory, ignore it unless allowed
if os.path.isdir(src):
if not options.recursive:
print "cp: omitting directory '%s'" % src
return
# We can not copy over a file with a directory.
if os.path.exists(dst):
if not os.path.isdir(dst):
msg = "cp: cannot overwrite non-directory '%s' with directory." % dst
raise OSError(msg)
else:
# if it didn't exist, create the directory
os.makedirs(dst)
# Now copy all members
for filename in os.listdir(src):
srcfile = os.path.join(src, filename)
dstfile = os.path.join(dst, filename)
CopyPath(options, srcfile, dstfile)
return
def Copy(args):
"""A Unix cp style copy.
Copies multiple sources to a single destination using the normal cp
semantics. In addition, it support inclusion and exclusion filters which
allows the copy to skip certain types of files."""
parser = optparse.OptionParser(usage='usage: cp [Options] souces... dest')
parser.add_option(
'-R', '-r', '--recursive', dest='recursive', action='store_true',
default=False,
help='copy directories recursively.')
parser.add_option(
'-v', '--verbose', dest='verbose', action='store_true',
default=False,
help='verbose output.')
parser.add_option(
'--include', dest='includes', action='append', default=[],
help='include files matching this expression.')
parser.add_option(
'--exclude', dest='excludes', action='append', default=[],
help='exclude files matching this expression.')
options, files = parser.parse_args(args)
if len(files) < 2:
parser.error('ERROR: expecting SOURCE(s) and DEST.')
srcs = files[:-1]
dst = files[-1]
src_list = []
for src in srcs:
files = glob.glob(src)
if len(files) == 0:
raise OSError('cp: no such file or directory: ' + src)
if files:
src_list.extend(files)
for src in src_list:
# If the destination is a directory, then append the basename of the src
# to the destination.
if os.path.isdir(dst):
CopyPath(options, src, os.path.join(dst, os.path.basename(src)))
else:
CopyPath(options, src, dst)
def Mkdir(args):
"""A Unix style mkdir"""
parser = optparse.OptionParser(usage='usage: mkdir [Options] DIRECTORY...')
parser.add_option(
'-p', '--parents', dest='parents', action='store_true',
default=False,
help='ignore existing parents, create parents as needed.')
parser.add_option(
'-v', '--verbose', dest='verbose', action='store_true',
default=False,
help='verbose output.')
options, dsts = parser.parse_args(args)
if len(dsts) < 1:
parser.error('ERROR: expecting DIRECTORY...')
for dst in dsts:
if options.verbose:
print 'mkdir ' + dst
try:
os.makedirs(dst)
except OSError:
if os.path.isdir(dst):
if options.parents:
continue
raise OSError('mkdir: Already exsists: ' + dst)
else:
raise OSError('mkdir: Failed to create: ' + dst)
return 0
def MovePath(options, src, dst):
"""MovePath from src to dst
Moves the src to the dst much like the Unix style mv command, except it
only handles one source at a time. Because of possible temporary failures
do to locks (such as anti-virus software on Windows), the function will retry
up to five times."""
# if the destination is not an existing directory, then overwrite it
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
# If the destination exists, the remove it
if os.path.exists(dst):
if options.force:
Remove(['-vfr', dst])
if os.path.exists(dst):
raise OSError('mv: FAILED TO REMOVE ' + dst)
else:
raise OSError('mv: already exists ' + dst)
for _ in range(5):
try:
os.rename(src, dst)
return
except OSError as error:
print 'Failed on %s with %s, retrying' % (src, error)
time.sleep(5)
print 'Gave up.'
raise OSError('mv: ' + error)
def Move(args):
parser = optparse.OptionParser(usage='usage: mv [Options] souces... dest')
parser.add_option(
'-v', '--verbose', dest='verbose', action='store_true',
default=False,
help='verbose output.')
parser.add_option(
'-f', '--force', dest='force', action='store_true',
default=False,
help='force, do not error it files already exist.')
options, files = parser.parse_args(args)
if len(files) < 2:
parser.error('ERROR: expecting SOURCE... and DEST.')
srcs = files[:-1]
dst = files[-1]
if options.verbose:
print 'mv %s %s' % (' '.join(srcs), dst)
for src in srcs:
MovePath(options, src, dst)
return 0
def Remove(args):
"""A Unix style rm.
Removes the list of paths. Because of possible temporary failures do to locks
(such as anti-virus software on Windows), the function will retry up to five
times."""
parser = optparse.OptionParser(usage='usage: rm [Options] PATHS...')
parser.add_option(
'-R', '-r', '--recursive', dest='recursive', action='store_true',
default=False,
help='remove directories recursively.')
parser.add_option(
'-v', '--verbose', dest='verbose', action='store_true',
default=False,
help='verbose output.')
parser.add_option(
'-f', '--force', dest='force', action='store_true',
default=False,
help='force, do not error it files does not exist.')
options, files = parser.parse_args(args)
if len(files) < 1:
parser.error('ERROR: expecting FILE...')
try:
for pattern in files:
dst_files = glob.glob(pattern)
# Ignore non existing files when using force
if len(dst_files) == 0 and options.force:
print "rm: Skipping " + pattern
continue
elif len(dst_files) == 0:
raise OSError('rm: no such file or directory: ' + pattern)
for dst in dst_files:
if options.verbose:
print 'rm ' + dst
if os.path.isfile(dst) or os.path.islink(dst):
for i in range(5):
try:
# Check every time, since it may have been deleted after the
# previous failed attempt.
if os.path.isfile(dst) or os.path.islink(dst):
os.remove(dst)
break
except OSError as error:
if i == 5:
print 'Gave up.'
raise OSError('rm: ' + str(error))
print 'Failed remove with %s, retrying' % error
time.sleep(5)
if options.recursive:
for i in range(5):
try:
if os.path.isdir(dst):
shutil.rmtree(dst)
break
except OSError as error:
if i == 5:
print 'Gave up.'
raise OSError('rm: ' + str(error))
print 'Failed rmtree with %s, retrying' % error
time.sleep(5)
except OSError as error:
print error
return 0
def MakeZipPath(os_path, isdir, iswindows):
"""Changes a path into zipfile format.
# doctest doesn't seem to honor r'' strings, so the backslashes need to be
# escaped.
>>> MakeZipPath(r'C:\\users\\foobar\\blah', False, True)
'users/foobar/blah'
>>> MakeZipPath('/tmp/tmpfoobar/something', False, False)
'tmp/tmpfoobar/something'
>>> MakeZipPath('./somefile.txt', False, False)
'somefile.txt'
>>> MakeZipPath('somedir', True, False)
'somedir/'
>>> MakeZipPath('../dir/filename.txt', False, False)
'../dir/filename.txt'
>>> MakeZipPath('dir/../filename.txt', False, False)
'filename.txt'
"""
zip_path = os_path
if iswindows:
import ntpath
# zipfile paths are always posix-style. They also have the drive
# letter and leading slashes removed.
zip_path = ntpath.splitdrive(os_path)[1].replace('\\', '/')
if zip_path.startswith('/'):
zip_path = zip_path[1:]
zip_path = posixpath.normpath(zip_path)
# zipfile also always appends a slash to a directory name.
if isdir:
zip_path += '/'
return zip_path
def OSMakeZipPath(os_path):
return MakeZipPath(os_path, os.path.isdir(os_path), sys.platform == 'win32')
def Zip(args):
"""A Unix style zip.
Compresses the listed files."""
parser = optparse.OptionParser(usage='usage: zip [Options] zipfile list')
parser.add_option(
'-r', dest='recursive', action='store_true',
default=False,
help='recurse into directories')
parser.add_option(
'-q', dest='quiet', action='store_true',
default=False,
help='quiet operation')
options, files = parser.parse_args(args)
if len(files) < 2:
parser.error('ERROR: expecting ZIPFILE and LIST.')
dest_zip = files[0]
src_args = files[1:]
src_files = []
for src_arg in src_args:
globbed_src_args = glob.glob(src_arg)
if len(globbed_src_args) == 0:
if not options.quiet:
print 'zip warning: name not matched: %s' % (src_arg,)
for src_file in globbed_src_args:
src_file = os.path.normpath(src_file)
src_files.append(src_file)
if options.recursive and os.path.isdir(src_file):
for root, dirs, files in os.walk(src_file):
for dirname in dirs:
src_files.append(os.path.join(root, dirname))
for filename in files:
src_files.append(os.path.join(root, filename))
zip_stream = None
# zip_data represents a list of the data to be written or appended to the
# zip_stream. It is a list of tuples:
# (OS file path, zip path/zip file info, and file data)
# In all cases one of the |os path| or the |file data| will be None.
# |os path| is None when there is no OS file to write to the archive (i.e.
# the file data already existed in the archive). |file data| is None when the
# file is new (never existed in the archive) or being updated.
zip_data = []
new_files_to_add = [OSMakeZipPath(src_file) for src_file in src_files]
zip_path_to_os_path_dict = dict((new_files_to_add[i], src_files[i])
for i in range(len(src_files)))
write_mode = 'a'
try:
zip_stream = zipfile.ZipFile(dest_zip, 'r')
files_to_update = set(new_files_to_add).intersection(
set(zip_stream.namelist()))
if files_to_update:
# As far as I can tell, there is no way to update a zip entry using
# zipfile; the best you can do is rewrite the archive.
# Iterate through the zipfile to maintain file order.
write_mode = 'w'
for zip_path in zip_stream.namelist():
if zip_path in files_to_update:
os_path = zip_path_to_os_path_dict[zip_path]
zip_data.append((os_path, zip_path, None))
new_files_to_add.remove(zip_path)
else:
file_bytes = zip_stream.read(zip_path)
file_info = zip_stream.getinfo(zip_path)
zip_data.append((None, file_info, file_bytes))
except IOError:
pass
finally:
if zip_stream:
zip_stream.close()
for zip_path in new_files_to_add:
zip_data.append((zip_path_to_os_path_dict[zip_path], zip_path, None))
if not zip_data:
print 'zip error: Nothing to do! (%s)' % (dest_zip,)
return 1
try:
zip_stream = zipfile.ZipFile(dest_zip, write_mode, zipfile.ZIP_DEFLATED)
for os_path, file_info_or_zip_path, file_bytes in zip_data:
if isinstance(file_info_or_zip_path, zipfile.ZipInfo):
zip_path = file_info_or_zip_path.filename
else:
zip_path = file_info_or_zip_path
if os_path:
st = os.stat(os_path)
if stat.S_ISDIR(st.st_mode):
# Python 2.6 on the buildbots doesn't support writing directories to
# zip files. This was resolved in a later version of Python 2.6.
# We'll work around it by writing an empty file with the correct
# path. (This is basically what later versions do anyway.)
zip_info = zipfile.ZipInfo()
zip_info.filename = zip_path
zip_info.date_time = time.localtime(st.st_mtime)[0:6]
zip_info.compress_type = zip_stream.compression
zip_info.flag_bits = 0x00
zip_info.external_attr = (st[0] & 0xFFFF) << 16L
zip_info.CRC = 0
zip_info.compress_size = 0
zip_info.file_size = 0
zip_stream.writestr(zip_info, '')
else:
zip_stream.write(os_path, zip_path)
else:
zip_stream.writestr(file_info_or_zip_path, file_bytes)
if not options.quiet:
if zip_path in new_files_to_add:
operation = 'adding'
else:
operation = 'updating'
zip_info = zip_stream.getinfo(zip_path)
if (zip_info.compress_type == zipfile.ZIP_STORED or
zip_info.file_size == 0):
print ' %s: %s (stored 0%%)' % (operation, zip_path)
elif zip_info.compress_type == zipfile.ZIP_DEFLATED:
print ' %s: %s (deflated %d%%)' % (operation, zip_path,
100 - zip_info.compress_size * 100 / zip_info.file_size)
finally:
zip_stream.close()
return 0
FuncMap = {
'cp': Copy,
'mkdir': Mkdir,
'mv': Move,
'rm': Remove,
'zip': Zip,
}
def main(args):
if not args:
print 'No command specified'
print 'Available commands: %s' % ' '.join(FuncMap)
return 1
func = FuncMap.get(args[0])
if not func:
print 'Do not recognize command: ' + args[0]
print 'Available commands: %s' % ' '.join(FuncMap)
return 1
return func(args[1:])
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| |
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Tests the RSA discovery provider
:author: Scott Lewis
"""
# Standard library
import json
import threading
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
# Try to import modules
from multiprocessing import Process, Queue
# IronPython fails when creating a queue
Queue()
except ImportError:
# Some interpreters don't have support for multiprocessing
raise unittest.SkipTest("Interpreter doesn't support multiprocessing")
try:
import queue
except ImportError:
import Queue as queue
import pelix
from pelix.rsa import (
SERVICE_REMOTE_SERVICE_ADMIN,
ECF_ENDPOINT_CONTAINERID_NAMESPACE,
)
from pelix.rsa.endpointdescription import EndpointDescription
from pelix.framework import create_framework
from pelix.ipopo.constants import use_ipopo
# RSA
import pelix.rsa as rsa
from pelix.rsa.topologymanagers import TopologyManager
from pelix.rsa.providers.discovery import (
SERVICE_ENDPOINT_ADVERTISER,
EndpointEvent,
)
# Local utilities
from tests.utilities import WrappedProcess
TEST_ETCD_HOSTNAME = "localhost"
TEST_ETCD_TOPPATH = "/etcddiscovery.tests"
ENDPOINT_LISTENER_SCOPE = "({0}=*)".format(ECF_ENDPOINT_CONTAINERID_NAMESPACE)
# ------------------------------------------------------------------------------
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# ------------------------------------------------------------------------------
def start_framework_for_advertise(state_queue):
"""
Starts a Pelix framework to advertise (via etcd) a helloimpl_xmlrpc
remote service instance. The tests can/will then discover this
service advertisement and test the EndpointEventListener notification
:param state_queue: Queue to communicate status and terminate
"""
try:
# Start the framework
framework = create_framework(
[
"pelix.ipopo.core",
"pelix.rsa.remoteserviceadmin", # RSA implementation
"pelix.http.basic", # httpservice
# xmlrpc distribution provider (opt)
"pelix.rsa.providers.distribution.xmlrpc",
# etcd discovery provider (opt)
"pelix.rsa.providers.discovery.discovery_etcd",
"pelix.rsa.topologymanagers.basic",
"samples.rsa.helloimpl_xmlrpc",
],
{
"ecf.xmlrpc.server.hostname": "localhost",
"etcd.hostname": TEST_ETCD_HOSTNAME,
"etcd.toppath": TEST_ETCD_TOPPATH,
},
)
framework.start()
context = framework.get_bundle_context()
# Start an HTTP server, required by XML-RPC
with use_ipopo(context) as ipopo:
ipopo.instantiate(
"pelix.http.service.basic.factory",
"http-server",
{"pelix.http.address": "localhost", "pelix.http.port": 0},
)
bc = framework.get_bundle_context()
rsa = bc.get_service(
bc.get_service_reference("pelix.rsa.remoteserviceadmin", None)
)
# export the hello remote service via rsa
# with the BasicTopologyManager, this will result
# in publish via etcd
rsa.export_service(
bc.get_service_reference("org.eclipse.ecf.examples.hello.IHello"),
{
"service.exported.interfaces": "*",
"service.exported.configs": "ecf.xmlrpc.server",
},
)
# Send that we are now ready
state_queue.put("ready")
# Loop until ready processed
while True:
if state_queue.empty():
break
# Loop until we receive done message
while True:
state = state_queue.get()
if state is None:
break
# stop the framework gracefully
framework.stop()
except Exception as ex:
state_queue.put("Error: {0}".format(ex))
class EtcdDiscoveryListenerTest(unittest.TestCase):
def setUp(self):
"""
Starts a framework in separate process to advertise a helloimpl
remote service. Then starts a local framework to register the
TestEndpointEventListener
"""
print(
"EtcdDiscoveryListenerTest etcd_hostname={0},toppath={1}".format(
TEST_ETCD_HOSTNAME, TEST_ETCD_TOPPATH
)
)
# start external framework that publishes remote service
self.status_queue = Queue()
self.publisher_process = WrappedProcess(
target=start_framework_for_advertise, args=[self.status_queue]
)
self.publisher_process.start()
state = self.status_queue.get(10)
self.assertEqual(state, "ready")
# start a local framework
self.framework = create_framework(
[
"pelix.ipopo.core",
"pelix.rsa.remoteserviceadmin", # RSA implementation
"tests.rsa.endpoint_event_listener",
"pelix.rsa.providers.discovery.discovery_etcd",
],
{
"etcd.hostname": TEST_ETCD_HOSTNAME,
"etcd.toppath": TEST_ETCD_TOPPATH,
},
)
self.framework.start()
# Start the framework and return TestEndpointEventListener
context = self.framework.get_bundle_context()
# Start an HTTP server, required by XML-RPC
with use_ipopo(context) as ipopo:
# create endpoint event listener
self.listener = ipopo.instantiate(
"etcd-test-endpoint-event-listener-factory",
"etcd-test-endpoint-event-listener",
{
TopologyManager.ENDPOINT_LISTENER_SCOPE: ENDPOINT_LISTENER_SCOPE
},
)
def tearDown(self):
"""
Cleans up external publishing framework for next test
"""
self.status_queue.put(None)
self.publisher_process.join(1)
self.status_queue.close()
self.status_queue = None
self.publisher = None
# Stop the framework
self.framework.stop()
pelix.framework.FrameworkFactory.delete_framework()
self.framework = None
def test_etcd_discover(self):
test_done_event = threading.Event()
def test_handler_1(endpoint_event, matched_filter):
self.assertTrue(matched_filter, ENDPOINT_LISTENER_SCOPE)
self.assertIsNotNone(endpoint_event, "endpoint_event is None")
self.assertTrue(isinstance(endpoint_event, EndpointEvent))
ee_type = endpoint_event.get_type()
self.assertTrue(
ee_type == EndpointEvent.ADDED
or ee_type == EndpointEvent.REMOVED
)
ee_ed = endpoint_event.get_endpoint_description()
self.assertTrue(isinstance(ee_ed, EndpointDescription))
self.assertIsNotNone(
ee_ed.get_id(), "endpoint_description id is None"
)
self.assertIsNotNone(
ee_ed.get_framework_uuid(),
"endpoint_description framework uuid is None",
)
interfaces = ee_ed.get_interfaces()
# test that service interfaces is not None and is of type list
self.assertIsNotNone(interfaces)
self.assertTrue(isinstance(interfaces, type([])))
self.assertTrue(
"org.eclipse.ecf.examples.hello.IHello" in interfaces
)
# set the test_done_event, so tester thread will continue
test_done_event.set()
# set the handler to the test code above
self.listener.set_handler(test_handler_1)
# wait as much as 50 seconds to complete
test_done_event.wait(50)
def test_etcd_discover_remove(self):
test_done_event = threading.Event()
def test_handler_2(endpoint_event, matched_filter):
if endpoint_event.get_type() == EndpointEvent.ADDED:
# send shutdown to trigger the removal
self.status_queue.put(None)
elif endpoint_event.get_type() == EndpointEvent.REMOVED:
# do tests
self.assertTrue(matched_filter, ENDPOINT_LISTENER_SCOPE)
self.assertIsNotNone(endpoint_event, "endpoint_event is None")
self.assertTrue(isinstance(endpoint_event, EndpointEvent))
ee_ed = endpoint_event.get_endpoint_description()
self.assertTrue(isinstance(ee_ed, EndpointDescription))
self.assertIsNotNone(
ee_ed.get_id(), "endpoint_description id is None"
)
self.assertIsNotNone(
ee_ed.get_framework_uuid(),
"endpoint_description framework uuid is None",
)
interfaces = ee_ed.get_interfaces()
# test that service interfaces is not None and is of type list
self.assertIsNotNone(interfaces)
self.assertTrue(isinstance(interfaces, type([])))
self.assertTrue(
"org.eclipse.ecf.examples.hello.IHello" in interfaces
)
# finally set the test_done_event, so tester thread will
# continue
test_done_event.set()
# set the handler to the test code above
self.listener.set_handler(test_handler_2)
# wait as much as 60 seconds to complete
test_done_event.wait(60)
class EtcdDiscoveryPublishTest(unittest.TestCase):
def setUp(self):
"""
Prepares a framework
"""
# Create the framework
self.framework = pelix.framework.create_framework(
[
"pelix.ipopo.core",
"pelix.shell.core",
"pelix.http.basic",
"pelix.rsa.remoteserviceadmin",
"pelix.rsa.providers.distribution.xmlrpc",
"pelix.rsa.providers.discovery.discovery_etcd",
],
{
"ecf.xmlrpc.server.hostname": "localhost",
"etcd.hostname": TEST_ETCD_HOSTNAME,
"etcd.toppath": TEST_ETCD_TOPPATH,
},
)
self.framework.start()
context = self.framework.get_bundle_context()
# Start an HTTP server, required by XML-RPC
with use_ipopo(context) as ipopo:
ipopo.instantiate(
"pelix.http.service.basic.factory",
"http-server",
{"pelix.http.address": "localhost", "pelix.http.port": 0},
)
self.advertiser = None
self.rsa = None
self.svc_reg = None
self.export_reg = None
self.eel_reg = None
def tearDown(self):
"""
Cleans up for next test
"""
if self.eel_reg:
self.eel_reg.unregister()
self.eel_reg = None
if self.svc_reg:
self.svc_reg.unregister()
self.svc_reg = None
if self.export_reg:
self.export_reg.close()
self.export_reg = None
if self.advertiser:
self._unget_service(self._get_discovery_advertiser_sr())
self.advertiser = None
if self.rsa:
self._unget_service(self._get_rsa_sr())
self.rsa = None
# Stop the framework
pelix.framework.FrameworkFactory.delete_framework()
self.framework = None
def _get_discovery_advertiser_sr(self):
return self.framework.get_bundle_context().get_service_reference(
SERVICE_ENDPOINT_ADVERTISER
)
def _get_rsa_sr(self):
return self.framework.get_bundle_context().get_service_reference(
SERVICE_REMOTE_SERVICE_ADMIN
)
def _get_service(self, sr):
return self.framework.get_bundle_context().get_service(sr)
def _unget_service(self, sr):
self.framework.get_bundle_context().unget_service(sr)
def _get_advertiser(self):
self.advertiser = self._get_service(self._get_discovery_advertiser_sr())
return self.advertiser
def _get_rsa(self):
self.rsa = self._get_service(self._get_rsa_sr())
return self.rsa
def _register_svc(self):
spec = "test.svc"
svc = object()
return self.framework.get_bundle_context().register_service(
spec, svc, {}
)
def _export_svc(self):
self.svc_reg = self._register_svc()
self.export_reg = self._get_rsa().export_service(
self.svc_reg.get_reference(),
{
rsa.SERVICE_EXPORTED_INTERFACES: "*",
rsa.SERVICE_EXPORTED_CONFIGS: "ecf.xmlrpc.server",
},
)[0]
return self.export_reg
def test_get_discovery_advertiser(self):
disc_adv_sr = self._get_discovery_advertiser_sr()
self.assertIsNotNone(disc_adv_sr, "advertiser ref is null")
self.advertiser = self._get_service(disc_adv_sr)
self.assertIsNotNone(self.advertiser, "advertiser svc is null")
def test_none_advertised(self):
adv = self._get_advertiser()
eps = adv.get_advertised_endpoints()
self.assertDictEqual(
eps, {}, "advertised endpoints not empty eps={0}".format(eps)
)
def test_etcd_session(self):
self.assertIsNotNone(
self._get_advertiser()._sessionid, "etcd._sessionid is null"
)
def test_etcd_client(self):
self.assertIsNotNone(
self._get_advertiser()._client, "etcd._client is null"
)
def test_etcd_remote_exists(self):
adv = self._get_advertiser()
adv._client.get(adv._get_session_path())
def test_etcd_advertise(self):
adv = self._get_advertiser()
export_reg = self._export_svc()
ed = export_reg.get_description()
ed_id = ed.get_id()
ep_adv = adv.advertise_endpoint(ed)
self.assertTrue(ep_adv, "advertise_endpoint failed")
ep_key = adv._get_session_path() + "/" + ed_id
# test for existence of ep id key
adv._client.get(ep_key)
# get advertised endpoints
eps = adv.get_advertised_endpoints()
# should be of length 1
self.assertTrue(len(eps) == 1, "length of eps is not equal 1")
# now unadvertise
adv.unadvertise_endpoint(ed_id)
try:
adv._client.get(ep_key)
self.fail(
"endpoint={0} still advertised after being removed".format(
ed_id
)
)
except Exception: # exception expected
pass
eps = adv.get_advertised_endpoints()
self.assertTrue(
len(eps) == 0,
"length of eps should be 0 and is {0}".format(len(eps)),
)
def test_etcd_advertise_content(self):
adv = self._get_advertiser()
export_reg = self._export_svc()
ed = export_reg.get_description()
ed_id = ed.get_id()
# get encoded version of endpoint description (dict)
encoded_ep = adv._encode_description(ed)
# advertise it
adv.advertise_endpoint(ed)
# get the string directly via http and key
ed_val_str = list(
adv._client.get(adv._get_session_path() + "/" + ed_id).get_subtree()
)[0].value
# decode the string into json object (dict)
val_encoded = json.loads(ed_val_str)
# compare the original dict with the one returned
self.assertDictEqual(
encoded_ep, val_encoded, "encoded endpoints not equal"
)
# also check a couple of fields
self.assertListEqual(
encoded_ep["properties"],
val_encoded["properties"],
"encoded_ed_props and val_encoded_props are not equal",
)
# now unadvertise
adv.unadvertise_endpoint(ed_id)
| |
import logging, threading, sys, os, time, subprocess, string, tempfile, re, traceback, shutil
from galaxy import util, model
from galaxy.model import mapping
from galaxy.model.orm import lazyload
from galaxy.datatypes.tabular import *
from galaxy.datatypes.interval import *
from galaxy.datatypes import metadata
import pkg_resources
pkg_resources.require( "PasteDeploy" )
from paste.deploy.converters import asbool
from Queue import Queue, Empty
log = logging.getLogger( __name__ )
# States for running a job. These are NOT the same as data states
JOB_WAIT, JOB_ERROR, JOB_INPUT_ERROR, JOB_INPUT_DELETED, JOB_OK, JOB_READY, JOB_DELETED, JOB_ADMIN_DELETED = 'wait', 'error', 'input_error', 'input_deleted', 'ok', 'ready', 'deleted', 'admin_deleted'
class JobManager( object ):
"""
Highest level interface to job management.
TODO: Currently the app accesses "job_queue" and "job_stop_queue" directly.
This should be decoupled.
"""
def __init__( self, app ):
self.app = app
if self.app.config.get_bool( "enable_job_running", True ):
# The dispatcher launches the underlying job runners
self.dispatcher = DefaultJobDispatcher( app )
# Queues for starting and stopping jobs
self.job_queue = JobQueue( app, self.dispatcher )
self.job_stop_queue = JobStopQueue( app, self.dispatcher )
else:
self.job_queue = self.job_stop_queue = NoopQueue()
def shutdown( self ):
self.job_queue.shutdown()
self.job_stop_queue.shutdown()
class Sleeper( object ):
"""
Provides a 'sleep' method that sleeps for a number of seconds *unless*
the notify method is called (from a different thread).
"""
def __init__( self ):
self.condition = threading.Condition()
def sleep( self, seconds ):
self.condition.acquire()
self.condition.wait( seconds )
self.condition.release()
def wake( self ):
self.condition.acquire()
self.condition.notify()
self.condition.release()
class JobQueue( object ):
"""
Job manager, waits for jobs to be runnable and then dispatches to
a JobRunner.
"""
STOP_SIGNAL = object()
def __init__( self, app, dispatcher ):
"""Start the job manager"""
self.app = app
# Should we read jobs form the database, or use an in memory queue
self.track_jobs_in_database = app.config.get_bool( 'track_jobs_in_database', False )
# Check if any special scheduling policy should be used. If not, default is FIFO.
sched_policy = app.config.get('job_scheduler_policy', 'FIFO')
# Parse the scheduler policy string. The policy class implements a special queue.
# Ready-to-run jobs are inserted into this queue
if sched_policy != 'FIFO' :
try :
self.use_policy = True
if ":" in sched_policy :
modname , policy_class = sched_policy.split(":")
modfields = modname.split(".")
module = __import__(modname)
for mod in modfields[1:] : module = getattr( module, mod)
# instantiate the policy class
self.squeue = getattr( module , policy_class )(self.app)
else :
self.use_policy = False
log.info("Scheduler policy not defined as expected, defaulting to FIFO")
except AttributeError, detail: # try may throw AttributeError
self.use_policy = False
log.exception("Error while loading scheduler policy class, defaulting to FIFO")
else :
self.use_policy = False
log.info("job scheduler policy is %s" %sched_policy)
# Keep track of the pid that started the job manager, only it
# has valid threads
self.parent_pid = os.getpid()
# Contains new jobs. Note this is not used if track_jobs_in_database is True
self.queue = Queue()
# Contains jobs that are waiting (only use from monitor thread)
## This and new_jobs[] are closest to a "Job Queue"
self.waiting = []
# Helper for interruptable sleep
self.sleeper = Sleeper()
self.running = True
self.dispatcher = dispatcher
self.monitor_thread = threading.Thread( target=self.__monitor )
self.monitor_thread.start()
log.info( "job manager started" )
if app.config.get_bool( 'enable_job_recovery', True ):
self.__check_jobs_at_startup()
def __check_jobs_at_startup( self ):
"""
Checks all jobs that are in the 'new', 'queued' or 'running' state in
the database and requeues or cleans up as necessary. Only run as the
job manager starts.
"""
model = self.app.model
for job in model.Job.filter( model.Job.c.state==model.Job.states.NEW ).all():
log.debug( "no runner: %s is still in new state, adding to the jobs queue" %job.id )
self.queue.put( ( job.id, job.tool_id ) )
for job in model.Job.filter( (model.Job.c.state == model.Job.states.RUNNING) | (model.Job.c.state == model.Job.states.QUEUED) ).all():
if job.job_runner_name is None:
log.debug( "no runner: %s is still in queued state, adding to the jobs queue" %job.id )
self.queue.put( ( job.id, job.tool_id ) )
else:
job_wrapper = JobWrapper( job, self.app.toolbox.tools_by_id[ job.tool_id ], self )
self.dispatcher.recover( job, job_wrapper )
def __monitor( self ):
"""
Continually iterate the waiting jobs, checking is each is ready to
run and dispatching if so.
"""
# HACK: Delay until after forking, we need a way to do post fork notification!!!
time.sleep( 10 )
while self.running:
try:
self.__monitor_step()
except:
log.exception( "Exception in monitor_step" )
# Sleep
self.sleeper.sleep( 1 )
def __monitor_step( self ):
"""
Called repeatedly by `monitor` to process waiting jobs. Gets any new
jobs (either from the database or from its own queue), then iterates
over all new and waiting jobs to check the state of the jobs each
depends on. If the job has dependencies that have not finished, it
it goes to the waiting queue. If the job has dependencies with errors,
it is marked as having errors and removed from the queue. Otherwise,
the job is dispatched.
"""
# Get an orm session
session = mapping.Session()
# Pull all new jobs from the queue at once
new_jobs = []
if self.track_jobs_in_database:
for j in session.query( model.Job ).options( lazyload( "external_output_metadata" ), lazyload( "parameters" ) ).filter( model.Job.c.state == model.Job.states.NEW ).all():
job = JobWrapper( j, self.app.toolbox.tools_by_id[ j.tool_id ], self )
new_jobs.append( job )
else:
try:
while 1:
message = self.queue.get_nowait()
if message is self.STOP_SIGNAL:
return
# Unpack the message
job_id, tool_id = message
# Create a job wrapper from it
job_entity = session.query( model.Job ).get( job_id )
job = JobWrapper( job_entity, self.app.toolbox.tools_by_id[ tool_id ], self )
# Append to watch queue
new_jobs.append( job )
except Empty:
pass
# Iterate over new and waiting jobs and look for any that are
# ready to run
new_waiting = []
for job in ( new_jobs + self.waiting ):
try:
# Clear the session for each job so we get fresh states for
# job and all datasets
session.clear()
# Get the real job entity corresponding to the wrapper (if we
# are tracking in the database this is probably cached in
# the session from the origianl query above)
job_entity = session.query( model.Job ).get( job.job_id )
# Check the job's dependencies, requeue if they're not done
job_state = self.__check_if_ready_to_run( job, job_entity )
if job_state == JOB_WAIT:
if not self.track_jobs_in_database:
new_waiting.append( job )
elif job_state == JOB_ERROR:
log.info( "job %d ended with an error" % job.job_id )
elif job_state == JOB_INPUT_ERROR:
log.info( "job %d unable to run: one or more inputs in error state" % job.job_id )
elif job_state == JOB_INPUT_DELETED:
log.info( "job %d unable to run: one or more inputs deleted" % job.job_id )
elif job_state == JOB_READY:
# If special queuing is enabled, put the ready jobs in the special queue
if self.use_policy :
self.squeue.put( job )
log.debug( "job %d put in policy queue" % job.job_id )
else: # or dispatch the job directly
self.dispatcher.put( job )
log.debug( "job %d dispatched" % job.job_id)
elif job_state == JOB_DELETED:
msg = "job %d deleted by user while still queued" % job.job_id
job.info = msg
log.debug( msg )
elif job_state == JOB_ADMIN_DELETED:
job.fail( job_entity.info )
log.info( "job %d deleted by admin while still queued" % job.job_id )
else:
msg = "unknown job state '%s' for job %d" % ( job_state, job.job_id )
job.info = msg
log.error( msg )
except Exception, e:
job.info = "failure running job %d: %s" % ( job.job_id, str( e ) )
log.exception( "failure running job %d" % job.job_id )
# Update the waiting list
self.waiting = new_waiting
# If special (e.g. fair) scheduling is enabled, dispatch all jobs
# currently in the special queue
if self.use_policy :
while 1:
try:
sjob = self.squeue.get()
self.dispatcher.put( sjob )
log.debug( "job %d dispatched" % sjob.job_id )
except Empty:
# squeue is empty, so stop dispatching
break
except Exception, e: # if something else breaks while dispatching
job.fail( "failure running job %d: %s" % ( sjob.job_id, str( e ) ) )
log.exception( "failure running job %d" % sjob.job_id )
# Done with the session
mapping.Session.remove()
def __check_if_ready_to_run( self, job_wrapper, job ):
"""
Check if a job is ready to run by verifying that each of its input
datasets is ready (specifically in the OK state). If any input dataset
has an error, fail the job and return JOB_INPUT_ERROR. If any input
dataset is deleted, fail the job and return JOB_INPUT_DELETED. If all
input datasets are in OK state, return JOB_READY indicating that the
job can be dispatched. Otherwise, return JOB_WAIT indicating that input
datasets are still being prepared.
"""
if job.state == model.Job.states.DELETED:
return JOB_DELETED
elif job.state == model.Job.states.ERROR:
return JOB_ADMIN_DELETED
for dataset_assoc in job.input_datasets:
idata = dataset_assoc.dataset
if not idata:
continue
# don't run jobs for which the input dataset was deleted
if idata.deleted:
job_wrapper.fail( "input data %d (file: %s) was deleted before the job started" % ( idata.hid, idata.file_name ) )
return JOB_INPUT_DELETED
# an error in the input data causes us to bail immediately
elif idata.state == idata.states.ERROR:
job_wrapper.fail( "input data %d is in error state" % ( idata.hid ) )
return JOB_INPUT_ERROR
elif idata.state != idata.states.OK:
# need to requeue
return JOB_WAIT
return JOB_READY
def put( self, job_id, tool ):
"""Add a job to the queue (by job identifier)"""
if not self.track_jobs_in_database:
self.queue.put( ( job_id, tool.id ) )
self.sleeper.wake()
def shutdown( self ):
"""Attempts to gracefully shut down the worker thread"""
if self.parent_pid != os.getpid():
# We're not the real job queue, do nothing
return
else:
log.info( "sending stop signal to worker thread" )
self.running = False
if not self.track_jobs_in_database:
self.queue.put( self.STOP_SIGNAL )
self.sleeper.wake()
log.info( "job queue stopped" )
self.dispatcher.shutdown()
class JobWrapper( object ):
"""
Wraps a 'model.Job' with convience methods for running processes and
state management.
"""
def __init__(self, job, tool, queue ):
self.job_id = job.id
# This is immutable, we cache it for the scheduling policy to use if needed
self.session_id = job.session_id
self.tool = tool
self.queue = queue
self.app = queue.app
self.extra_filenames = []
self.command_line = None
self.galaxy_lib_dir = None
# With job outputs in the working directory, we need the working
# directory to be set before prepare is run, or else premature deletion
# and job recovery fail.
self.working_directory = \
os.path.join( self.app.config.job_working_directory, str( self.job_id ) )
self.output_paths = None
self.external_output_metadata = metadata.JobExternalOutputMetadataWrapper( job ) #wrapper holding the info required to restore and clean up from files used for setting metadata externally
def get_param_dict( self ):
"""
Restore the dictionary of parameters from the database.
"""
job = model.Job.get( self.job_id )
param_dict = dict( [ ( p.name, p.value ) for p in job.parameters ] )
param_dict = self.tool.params_from_strings( param_dict, self.app )
return param_dict
def prepare( self ):
"""
Prepare the job to run by creating the working directory and the
config files.
"""
mapping.context.current.clear() #this prevents the metadata reverting that has been seen in conjunction with the PBS job runner
if not os.path.exists( self.working_directory ):
os.mkdir( self.working_directory )
# Restore parameters from the database
job = model.Job.get( self.job_id )
incoming = dict( [ ( p.name, p.value ) for p in job.parameters ] )
incoming = self.tool.params_from_strings( incoming, self.app )
# Do any validation that could not be done at job creation
self.tool.handle_unvalidated_param_values( incoming, self.app )
# Restore input / output data lists
inp_data = dict( [ ( da.name, da.dataset ) for da in job.input_datasets ] )
out_data = dict( [ ( da.name, da.dataset ) for da in job.output_datasets ] )
# These can be passed on the command line if wanted as $userId $userEmail
if job.history.user: # check for anonymous user!
userId = '%d' % job.history.user.id
userEmail = str(job.history.user.email)
else:
userId = 'Anonymous'
userEmail = 'Anonymous'
incoming['userId'] = userId
incoming['userEmail'] = userEmail
# Build params, done before hook so hook can use
param_dict = self.tool.build_param_dict( incoming, inp_data, out_data, self.get_output_fnames(), self.working_directory )
# Certain tools require tasks to be completed prior to job execution
# ( this used to be performed in the "exec_before_job" hook, but hooks are deprecated ).
if self.tool.tool_type is not None:
out_data = self.tool.exec_before_job( self.queue.app, inp_data, out_data, param_dict )
# Run the before queue ("exec_before_job") hook
self.tool.call_hook( 'exec_before_job', self.queue.app, inp_data=inp_data,
out_data=out_data, tool=self.tool, param_dict=incoming)
mapping.context.current.flush()
# Build any required config files
config_filenames = self.tool.build_config_files( param_dict, self.working_directory )
# FIXME: Build the param file (might return None, DEPRECATED)
param_filename = self.tool.build_param_file( param_dict, self.working_directory )
# Build the job's command line
self.command_line = self.tool.build_command_line( param_dict )
# FIXME: for now, tools get Galaxy's lib dir in their path
if self.command_line and self.command_line.startswith( 'python' ):
self.galaxy_lib_dir = os.path.abspath( "lib" ) # cwd = galaxy root
# We need command_line persisted to the db in order for Galaxy to re-queue the job
# if the server was stopped and restarted before the job finished
job.command_line = self.command_line
job.flush()
# Return list of all extra files
extra_filenames = config_filenames
if param_filename is not None:
extra_filenames.append( param_filename )
self.param_dict = param_dict
self.extra_filenames = extra_filenames
return extra_filenames
def fail( self, message, exception=False ):
"""
Indicate job failure by setting state and message on all output
datasets.
"""
job = model.Job.get( self.job_id )
job.refresh()
# if the job was deleted, don't fail it
if not job.state == model.Job.states.DELETED:
# Check if the failure is due to an exception
if exception:
# Save the traceback immediately in case we generate another
# below
job.traceback = traceback.format_exc()
# Get the exception and let the tool attempt to generate
# a better message
etype, evalue, tb = sys.exc_info()
m = self.tool.handle_job_failure_exception( evalue )
if m:
message = m
if self.app.config.outputs_to_working_directory:
for dataset_path in self.get_output_fnames():
try:
shutil.move( dataset_path.false_path, dataset_path.real_path )
log.debug( "fail(): Moved %s to %s" % ( dataset_path.false_path, dataset_path.real_path ) )
except ( IOError, OSError ), e:
log.error( "fail(): Missing output file in working directory: %s" % e )
for dataset_assoc in job.output_datasets:
dataset = dataset_assoc.dataset
dataset.refresh()
dataset.state = dataset.states.ERROR
dataset.blurb = 'tool error'
dataset.info = message
dataset.set_size()
dataset.flush()
job.state = model.Job.states.ERROR
job.command_line = self.command_line
job.info = message
job.flush()
# If the job was deleted, just clean up
self.cleanup()
def change_state( self, state, info = False ):
job = model.Job.get( self.job_id )
job.refresh()
for dataset_assoc in job.output_datasets:
dataset = dataset_assoc.dataset
dataset.refresh()
dataset.state = state
if info:
dataset.info = info
dataset.flush()
if info:
job.info = info
job.state = state
job.flush()
def get_state( self ):
job = model.Job.get( self.job_id )
job.refresh()
return job.state
def set_runner( self, runner_url, external_id ):
job = model.Job.get( self.job_id )
job.refresh()
job.job_runner_name = runner_url
job.job_runner_external_id = external_id
job.flush()
def finish( self, stdout, stderr ):
"""
Called to indicate that the associated command has been run. Updates
the output datasets based on stderr and stdout from the command, and
the contents of the output files.
"""
# default post job setup
mapping.context.current.clear()
job = model.Job.get( self.job_id )
# if the job was deleted, don't finish it
if job.state == job.states.DELETED:
self.cleanup()
return
elif job.state == job.states.ERROR:
# Job was deleted by an administrator
self.fail( job.info )
return
if stderr:
job.state = "error"
else:
job.state = 'ok'
if self.app.config.outputs_to_working_directory:
for dataset_path in self.get_output_fnames():
try:
shutil.move( dataset_path.false_path, dataset_path.real_path )
log.debug( "finish(): Moved %s to %s" % ( dataset_path.false_path, dataset_path.real_path ) )
except ( IOError, OSError ):
self.fail( "Job %s's output dataset(s) could not be read" % job.id )
return
for dataset_assoc in job.output_datasets:
#should this also be checking library associations? - can a library item be added from a history before the job has ended? - lets not allow this to occur
for dataset in dataset_assoc.dataset.dataset.history_associations: #need to update all associated output hdas, i.e. history was shared with job running
dataset.blurb = 'done'
dataset.peek = 'no peek'
dataset.info = stdout + stderr
dataset.set_size()
if stderr:
dataset.blurb = "error"
elif dataset.has_data():
# Only set metadata values if they are missing...
dataset.set_meta( overwrite = False )
is_multi_byte = self.tool.is_multi_byte
if is_multi_byte:
dataset.set_multi_byte_peek()
else:
dataset.set_peek()
else:
dataset.blurb = "empty"
dataset.flush()
if stderr:
dataset_assoc.dataset.dataset.state = model.Dataset.states.ERROR
else:
dataset_assoc.dataset.dataset.state = model.Dataset.states.OK
dataset_assoc.dataset.dataset.flush()
# Save stdout and stderr
if len( stdout ) > 32768:
log.error( "stdout for job %d is greater than 32K, only first part will be logged to database" % job.id )
job.stdout = stdout[:32768]
if len( stderr ) > 32768:
log.error( "stderr for job %d is greater than 32K, only first part will be logged to database" % job.id )
job.stderr = stderr[:32768]
# custom post process setup
inp_data = dict( [ ( da.name, da.dataset ) for da in job.input_datasets ] )
out_data = dict( [ ( da.name, da.dataset ) for da in job.output_datasets ] )
param_dict = dict( [ ( p.name, p.value ) for p in job.parameters ] ) # why not re-use self.param_dict here? ##dunno...probably should, this causes tools.parameters.basic.UnvalidatedValue to be used in following methods instead of validated and transformed values during i.e. running workflows
param_dict = self.tool.params_from_strings( param_dict, self.app )
# Check for and move associated_files
self.tool.collect_associated_files(out_data, self.working_directory)
# Create generated output children and primary datasets and add to param_dict
collected_datasets = {'children':self.tool.collect_child_datasets(out_data),'primary':self.tool.collect_primary_datasets(out_data)}
param_dict.update({'__collected_datasets__':collected_datasets})
# Certain tools require tasks to be completed after job execution
# ( this used to be performed in the "exec_after_process" hook, but hooks are deprecated ).
if self.tool.tool_type is not None:
self.tool.exec_after_process( self.queue.app, inp_data, out_data, param_dict )
# Call 'exec_after_process' hook
self.tool.call_hook( 'exec_after_process', self.queue.app, inp_data=inp_data,
out_data=out_data, param_dict=param_dict,
tool=self.tool, stdout=stdout, stderr=stderr )
# TODO
# validate output datasets
job.command_line = self.command_line
mapping.context.current.flush()
log.debug( 'job %d ended' % self.job_id )
self.cleanup()
def cleanup( self ):
# remove temporary files
try:
for fname in self.extra_filenames:
os.remove( fname )
if self.working_directory is not None:
shutil.rmtree( self.working_directory )
if self.app.config.set_metadata_externally:
self.external_output_metadata.cleanup_external_metadata()
except:
log.exception( "Unable to cleanup job %d" % self.job_id )
def get_command_line( self ):
return self.command_line
def get_session_id( self ):
return self.session_id
def get_input_fnames( self ):
job = model.Job.get( self.job_id )
filenames = []
for da in job.input_datasets: #da is JobToInputDatasetAssociation object
if da.dataset:
filenames.append( da.dataset.file_name )
#we will need to stage in metadata file names also
#TODO: would be better to only stage in metadata files that are actually needed (found in command line, referenced in config files, etc.)
for key, value in da.dataset.metadata.items():
if isinstance( value, model.MetadataFile ):
filenames.append( value.file_name )
return filenames
def get_output_fnames( self ):
if self.output_paths is not None:
return self.output_paths
class DatasetPath( object ):
def __init__( self, real_path, false_path = None ):
self.real_path = real_path
self.false_path = false_path
def __str__( self ):
if false_path is None:
return self.real_path
else:
return self.false_path
job = model.Job.get( self.job_id )
if self.app.config.outputs_to_working_directory:
self.output_paths = []
for name, data in [ ( da.name, da.dataset.dataset ) for da in job.output_datasets ]:
false_path = os.path.abspath( os.path.join( self.working_directory, "galaxy_dataset_%d.dat" % data.id ) )
self.output_paths.append( DatasetPath( data.file_name, false_path ) )
else:
self.output_paths = [ DatasetPath( da.dataset.file_name ) for da in job.output_datasets ]
return self.output_paths
def check_output_sizes( self ):
sizes = []
output_paths = self.get_output_fnames()
for outfile in [ str( o ) for o in output_paths ]:
sizes.append( ( outfile, os.stat( outfile ).st_size ) )
return sizes
def setup_external_metadata( self, exec_dir = None, tmp_dir = None, dataset_files_path = None, **kwds ):
if tmp_dir is None:
#this dir should should relative to the exec_dir
tmp_dir = self.app.config.new_file_path
if dataset_files_path is None:
dataset_files_path = self.app.model.Dataset.file_path
job = model.Job.get( self.job_id )
return self.external_output_metadata.setup_external_metadata( [ output_dataset_assoc.dataset for output_dataset_assoc in job.output_datasets ], exec_dir = exec_dir, tmp_dir = tmp_dir, dataset_files_path = dataset_files_path, **kwds )
class DefaultJobDispatcher( object ):
def __init__( self, app ):
self.app = app
self.job_runners = {}
start_job_runners = ["local"]
if app.config.start_job_runners is not None:
start_job_runners.extend( app.config.start_job_runners.split(",") )
for runner_name in start_job_runners:
if runner_name == "local":
import runners.local
self.job_runners[runner_name] = runners.local.LocalJobRunner( app )
elif runner_name == "pbs":
import runners.pbs
self.job_runners[runner_name] = runners.pbs.PBSJobRunner( app )
elif runner_name == "sge":
import runners.sge
self.job_runners[runner_name] = runners.sge.SGEJobRunner( app )
else:
log.error( "Unable to start unknown job runner: %s" %runner_name )
def put( self, job_wrapper ):
runner_name = ( job_wrapper.tool.job_runner.split(":", 1) )[0]
log.debug( "dispatching job %d to %s runner" %( job_wrapper.job_id, runner_name ) )
self.job_runners[runner_name].put( job_wrapper )
def stop( self, job ):
runner_name = ( job.job_runner_name.split(":", 1) )[0]
log.debug( "stopping job %d in %s runner" %( job.id, runner_name ) )
self.job_runners[runner_name].stop_job( job )
def recover( self, job, job_wrapper ):
runner_name = ( job.job_runner_name.split(":", 1) )[0]
log.debug( "recovering job %d in %s runner" %( job.id, runner_name ) )
self.job_runners[runner_name].recover( job, job_wrapper )
def shutdown( self ):
for runner in self.job_runners.itervalues():
runner.shutdown()
class JobStopQueue( object ):
"""
A queue for jobs which need to be terminated prematurely.
"""
STOP_SIGNAL = object()
def __init__( self, app, dispatcher ):
self.app = app
self.dispatcher = dispatcher
# Keep track of the pid that started the job manager, only it
# has valid threads
self.parent_pid = os.getpid()
# Contains new jobs. Note this is not used if track_jobs_in_database is True
self.queue = Queue()
# Contains jobs that are waiting (only use from monitor thread)
self.waiting = []
# Helper for interruptable sleep
self.sleeper = Sleeper()
self.running = True
self.monitor_thread = threading.Thread( target=self.monitor )
self.monitor_thread.start()
log.info( "job stopper started" )
def monitor( self ):
"""
Continually iterate the waiting jobs, stop any that are found.
"""
# HACK: Delay until after forking, we need a way to do post fork notification!!!
time.sleep( 10 )
while self.running:
try:
self.monitor_step()
except:
log.exception( "Exception in monitor_step" )
# Sleep
self.sleeper.sleep( 1 )
def monitor_step( self ):
"""
Called repeatedly by `monitor` to stop jobs.
"""
# Pull all new jobs from the queue at once
jobs = []
try:
while 1:
( job_id, error_msg ) = self.queue.get_nowait()
if job_id is self.STOP_SIGNAL:
return
# Append to watch queue
jobs.append( ( job_id, error_msg ) )
except Empty:
pass
for job_id, error_msg in jobs:
job = model.Job.get( job_id )
job.refresh()
# if desired, error the job so we can inform the user.
if error_msg is not None:
job.state = job.states.ERROR
job.info = error_msg
else:
job.state = job.states.DELETED
job.flush()
# if job is in JobQueue or FooJobRunner's put method,
# job_runner_name will be unset and the job will be dequeued due to
# state change above
if job.job_runner_name is not None:
# tell the dispatcher to stop the job
self.dispatcher.stop( job )
def put( self, job_id, error_msg=None ):
self.queue.put( ( job_id, error_msg ) )
def shutdown( self ):
"""Attempts to gracefully shut down the worker thread"""
if self.parent_pid != os.getpid():
# We're not the real job queue, do nothing
return
else:
log.info( "sending stop signal to worker thread" )
self.running = False
self.queue.put( ( self.STOP_SIGNAL, None ) )
self.sleeper.wake()
log.info( "job stopper stopped" )
class NoopQueue( object ):
"""
Implements the JobQueue / JobStopQueue interface but does nothing
"""
def put( self, *args ):
return
def shutdown( self ):
return
| |
######################################################################
#
# Copyright (C) 2013
# Associated Universities, Inc. Washington DC, USA,
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Library General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
# License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 675 Massachusetts Ave, Cambridge, MA 02139, USA.
#
# Correspondence concerning VLA Pipelines should be addressed as follows:
# Please register and submit helpdesk tickets via: https://help.nrao.edu
# Postal address:
# National Radio Astronomy Observatory
# VLA Pipeline Support Office
# PO Box O
# Socorro, NM, USA
#
######################################################################
# DO TEST GAIN CALIBRATIONS TO SEE IF MORE FLAGGING IS NEEDED AND TO
# ESTABLISH SHORT AND LONG SOLINTS
# (Needs some work to automate; note also that plotcal holds onto
# testgaincal.g in the table cache unless it has been exited using
# the gui, so only plot the final versions)
logprint ("Starting EVLA_pipe_testgains.py", logfileout='logs/testgains.log')
time_list=runtiming('testgains', 'start')
QA2_testgains='Pass'
print ""
print "Finding a reference antenna for gain calibrations"
print ""
refantspw=''
refantfield=calibrator_field_select_string
# NB: would use ms_active below instead of calibrators.ms when selection
# to exclude flagged data is implemented
findrefant=RefAntHeuristics(vis='calibrators.ms',field=refantfield,geometry=True,flagging=True)
RefAntOutput=findrefant.calculate()
refAnt=str(RefAntOutput[0])+','+str(RefAntOutput[1])+','+str(RefAntOutput[2])+','+str(RefAntOutput[3])
logprint ("The pipeline will use antenna(s) "+refAnt+" as the reference", logfileout='logs/testgains.log')
logprint ("Doing test gain calibration", logfileout='logs/testgains.log')
# First determine short solint for gain calibrator, and see if it is
# shorter or longer than gain_solint1 (determined on BPd cals)
# Start with solint='int'
syscommand='rm -rf testgaincal.g'
os.system(syscommand)
soltime=int_time
solint='int'
tst_gcal_spw=''
combtime='scan'
flaggedSolnResult1=testgains('calibrators.ms','testgaincal.g',tst_gcal_spw,calibrator_scan_select_string,solint,refAnt,minBL_for_cal,combtime)
logprint("For solint = "+solint+" fraction of flagged solutions = "+str(flaggedSolnResult1['all']['fraction']), logfileout='logs/testBPdcals.log')
logprint("Median fraction of flagged solutions per antenna = "+str(flaggedSolnResult1['antmedian']['fraction']), logfileout='logs/testBPdcals.log')
if (flaggedSolnResult1['all']['total'] > 0):
fracFlaggedSolns1=flaggedSolnResult1['antmedian']['fraction']
else:
fracFlaggedSolns1=1.0
shortsol2=soltime
if (fracFlaggedSolns1 > 0.05):
soltime=3.0*int_time
solint=str(soltime)+'s'
flaggedSolnResult3=testgains('calibrators.ms','testgaincal3.g',tst_gcal_spw,calibrator_scan_select_string,solint,refAnt,minBL_for_cal,combtime)
logprint("For solint = "+solint+" fraction of flagged solutions = "+str(flaggedSolnResult3['all']['fraction']), logfileout='logs/testBPdcals.log')
logprint("Median fraction of flagged solutions per antenna = "+str(flaggedSolnResult3['antmedian']['fraction']), logfileout='logs/testBPdcals.log')
if (flaggedSolnResult3['all']['total'] > 0):
fracFlaggedSolns3=flaggedSolnResult3['antmedian']['fraction']
else:
fracFlaggedSolns3=1.0
if (fracFlaggedSolns3 < fracFlaggedSolns1):
shortsol2=soltime
syscommand='rm -rf testgaincal.g'
os.system(syscommand)
syscommand='mv testgaincal3.g testgaincal.g'
os.system(syscommand)
if (fracFlaggedSolns3 > 0.05):
soltime=10.0*int_time
solint=str(soltime)+'s'
flaggedSolnResult10=testgains('calibrators.ms','testgaincal10.g',tst_gcal_spw,calibrator_scan_select_string,solint,refAnt,minBL_for_cal,combtime)
logprint("For solint = "+solint+" fraction of flagged solutions = "+str(flaggedSolnResult10['all']['fraction']), logfileout='logs/testBPdcals.log')
logprint("Median fraction of flagged solutions per antenna = "+str(flaggedSolnResult10['antmedian']['fraction']), logfileout='logs/testBPdcals.log')
if (flaggedSolnResult10['all']['total'] > 0):
fracFlaggedSolns10=flaggedSolnResult10['antmedian']['fraction']
else:
fracFlaggedSolns10=1.0
if (fracFlaggedSolns10 < fracFlaggedSolns3):
shortsol2=soltime
syscommand='rm -rf testgaincal.g'
os.system(syscommand)
syscommand='mv testgaincal10.g testgaincal.g'
os.system(syscommand)
if (fracFlaggedSolns10 > 0.05):
solint='inf'
combtime=''
flaggedSolnResultScan=testgains('calibrators.ms','testgaincalscan.g',tst_gcal_spw,calibrator_scan_select_string,solint,refAnt,minBL_for_cal,combtime)
logprint("For solint = "+solint+" fraction of flagged solutions = "+str(flaggedSolnResultScan['all']['fraction']), logfileout='logs/testBPdcals.log')
logprint("Median fraction of flagged solutions per antenna = "+str(flaggedSolnResultScan['antmedian']['fraction']), logfileout='logs/testBPdcals.log')
if (flaggedSolnResultScan['all']['total'] > 0):
fracFlaggedSolnsScan=flaggedSolnResultScan['antmedian']['fraction']
else:
fracFlaggedSolnsScan=1.0
if (fracFlaggedSolnsScan < fracFlaggedSolns10):
shortsol2=longsolint
syscommand='rm -rf testgaincal.g'
os.system(syscommand)
syscommand='mv testgaincalscan.g testgaincal.g'
os.system(syscommand)
if (fracFlaggedSolnsScan > 0.05):
logprint ("Warning, large fraction of flagged solutions, there might be something wrong with your data", logfileout='logs/testBPdcals.log')
# determine max (shortsol1, shortsol2)
short_solint=max(shortsol1,shortsol2)
new_gain_solint1=str(short_solint)+'s'
logprint ("Using short solint = "+new_gain_solint1, logfileout='logs/testBPdcals.log')
# Plot solutions
logprint ("Plotting gain solutions", logfileout='logs/testgains.log')
nplots=int(numAntenna/3)
if ((numAntenna%3)>0):
nplots = nplots + 1
tb.open('testgaincal.g')
cpar=tb.getcol('CPARAM')
flgs=tb.getcol('FLAG')
tb.close()
amps=np.abs(cpar)
good=np.logical_not(flgs)
maxamp=np.max(amps[good])
plotmax=maxamp
for ii in range(nplots):
filename='testgaincal_amp'+str(ii)+'.png'
syscommand='rm -rf '+filename
os.system(syscommand)
antPlot=str(ii*3)+'~'+str(ii*3+2)
default('plotcal')
caltable='testgaincal.g'
xaxis='time'
yaxis='amp'
poln=''
field=''
antenna=antPlot
spw=''
timerange=''
subplot=311
overplot=False
clearpanel='Auto'
iteration='antenna'
plotrange=[0,0,0,plotmax]
showflags=False
plotsymbol='o'
plotcolor='blue'
markersize=5.0
fontsize=10.0
showgui=False
figfile=filename
async=False
plotcal()
for ii in range(nplots):
filename='testgaincal_phase'+str(ii)+'.png'
syscommand='rm -rf '+filename
os.system(syscommand)
antPlot=str(ii*3)+'~'+str(ii*3+2)
default('plotcal')
caltable='testgaincal.g'
xaxis='time'
yaxis='phase'
poln=''
field=''
antenna=antPlot
spw=''
timerange=''
subplot=311
overplot=False
clearpanel='Auto'
iteration='antenna'
plotrange=[0,0,-180,180]
showflags=False
plotsymbol='o-'
plotcolor='blue'
markersize=5.0
fontsize=10.0
showgui=False
figfile=filename
async=False
plotcal()
logprint ("Plotting finished", logfileout='logs/testgains.log')
# Calculate fractions of flagged solutions for final QA2
flaggedGainSolns=getCalFlaggedSoln('testgaincal.g')
if (flaggedGainSolns['all']['total'] == 0):
QA2_testgains='Fail'
elif (flaggedGainSolns['antmedian']['fraction'] > 0.1):
QA2_testgains='Partial'
logprint ("QA2 score: "+QA2_testgains, logfileout='logs/testgains.log')
logprint ("Finished EVLA_pipe_testgains.py", logfileout='logs/testgains.log')
time_list=runtiming('testgains', 'end')
pipeline_save()
| |
import cmd
import requests
import json
import os
import re
import time
import datetime as dt
from functools import wraps, partial
import logging
import threading
import pprint
from chatbot.utils import norm
import uuid
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
try:
import readline
except ImportError:
readline = None
logger = logging.getLogger('hr.chatbot.client')
def get_client_id():
import subprocess
user = subprocess.check_output('id -nu', shell=True).strip()
host = subprocess.check_output('hostname', shell=True).strip()
return '{}@{}'.format(user, host)
ERRORS = {
1: 'Wrong Character Name',
2: 'No Answer',
3: 'Invalid Session',
4: 'Invalid Question'
}
HISTFILE = os.path.expanduser('~/.hr/chatbot/client_history')
HISTFILE_SIZE = 1000
class Client(cmd.Cmd, object):
VERSION = 'v2.0'
def __init__(self, client_key, botname, client_id=None,
username=None,
response_listener=None,
host='localhost', port='8001', test=False,
*args, **kwargs):
"""
client_key: key of the client id for authentication
client_id: client id
username: user name
botname: bot name
response_listener: the instance that has implemented on_response
host: the host name of chatbot server
port: the port of the chatbot server
test: whether if the session is a test session or not
"""
super(Client, self).__init__(*args, **kwargs)
self.user = username or get_client_id()
self.client_id = client_id or get_client_id()
self.client_key = client_key
if response_listener:
assert hasattr(response_listener, 'on_response') and \
callable(response_listener.on_response)
self.response_listener = response_listener
self.test = test
self.marker = 'default'
self.run_id = ''
self.prompt = '[%s]: ' % self.user
self.botname = botname
self.chatbot_ip = host
self.chatbot_port = port
self.chatbot_url = 'http://{}:{}'.format(
self.chatbot_ip, self.chatbot_port)
self.root_url = '{}/{}'.format(self.chatbot_url, self.VERSION)
self.lang = 'en-US'
self.session = None
self.last_response = None
self.last_response_time = None
self.last_tier_response = None
self.timer = None
self.timeout = None
self.weights = None
if self.ping():
self.do_conn()
else:
self.stdout.write(
"Chatbot server is not responding. Server url {}\n".format(self.chatbot_url))
self.ignore_indicator = False
def retry(times):
def wrap(f):
@wraps(f)
def wrap_f(*args, **kwargs):
error = None
for i in range(times):
try:
return f(*args, **kwargs)
except Exception as ex:
logger.exception(ex)
self = args[0]
self.start_session()
error = ex.message
continue
raise Exception(error)
return wrap_f
return wrap
def start_session(self, new=False):
params = {
"client_id": self.client_id,
"Auth": self.client_key,
"botname": self.botname,
"user": self.user,
"test": self.test,
"refresh": new
}
response = None
try:
response = requests.get(
'{}/start_session'.format(self.root_url), params=params)
except Exception as ex:
self.stdout.write('{}\n'.format(ex))
if response is None:
self.stdout.write(
"Can't get session\nPlease check the url {}\n".format(self.chatbot_url))
return
if response.status_code != 200:
self.stdout.write("Request error: {}\n".format(response.status_code))
return
session = response.json().get('sid')
if self.session == session:
self.stdout.write("Resume session {}\n".format(self.session))
else:
self.session = session
self.stdout.write("Init session {}\n".format(self.session))
self.set_weights(self.weights)
@retry(3)
def ask(self, question, query=False, request_id=None):
self.cancel_timer()
params = {
"question": question.strip(),
"session": self.session,
"lang": self.lang,
"Auth": self.client_key,
"query": query,
"marker": self.marker,
"run_id": self.run_id,
}
headers = {
'X-Request-ID': request_id or str(uuid.uuid1())
}
r = requests.get('{}/chat'.format(self.root_url), params=params, headers=headers)
if r.status_code != 200:
self.stdout.write("Request error: {}\n".format(r.status_code))
ret = r.json().get('ret')
if ret != 0:
self.stdout.write("QA error: error code {}, botname {}, question {}, lang {}\n".format(
ret, self.botname, question, self.lang))
raise Exception("QA error: {}({})".format(ERRORS.get(ret, 'Unknown'), ret))
response = r.json()
if question == '[loopback]':
self.timer = threading.Timer(self.timeout, self.ask, (question, ))
self.timer.start()
logger.info("Start {} timer with timeout {}".format(
question, self.timeout))
try:
self.process_response(response)
except Exception as ex:
logger.exception(ex)
return response
def feedback(self, text, label, lang):
params = {
"Auth": self.client_key,
"text": text,
"label": label,
"session": self.session,
"lang": lang,
}
r = requests.get('{}/feedback'.format(self.root_url), params=params)
if r.status_code != 200:
self.stdout.write("Request error: {}\n".format(r.status_code))
ret = r.json().get('ret')
if ret != 0:
self.stdout.write("Write feedback failed")
else:
self.stdout.write("Write feedback successed")
def list_chatbot(self):
params = {'Auth': self.client_key, 'lang': self.lang, 'session': self.session}
r = requests.get(
'{}/chatbots'.format(self.root_url), params=params)
chatbots = r.json().get('response')
return chatbots
def list_chatbot_names(self):
params = {'Auth': self.client_key, 'lang': self.lang, 'session': self.session}
r = requests.get(
'{}/bot_names'.format(self.root_url), params=params)
names = r.json().get('response')
return names
def process_response(self, response):
if response is not None:
self.last_response = response
self.last_response_time = dt.datetime.utcnow()
tier_response = response['default_response']
if not tier_response:
return
answer = tier_response['text']
if not self.ignore_indicator:
self.process_indicator(answer)
tier_response['text'] = norm(answer)
self.last_tier_response = tier_response
if self.response_listener is None:
self.stdout.write('{}[by {}]: {}\n'.format(
self.botname, tier_response.get('botid'),
tier_response.get('text')))
else:
try:
threading.Timer(0, self.response_listener.on_response, (self.session, response)).start()
except Exception as ex:
logger.error(ex)
def cancel_timer(self):
if self.timer is not None:
self.timer.cancel()
self.timer = None
logger.info("Timer canceled")
else:
logger.debug("Timer is None")
def preloop(self):
if readline and os.path.exists(HISTFILE):
readline.read_history_file(HISTFILE)
def postloop(self):
if readline:
readline.set_history_length(HISTFILE_SIZE)
readline.write_history_file(HISTFILE)
def emptyline(self):
pass
def default(self, line):
try:
if line:
self.ask(line)
except Exception as ex:
self.stdout.write('{}\n'.format(ex))
def do_list(self, line):
chatbots = []
try:
chatbots = self.list_chatbot()
chatbots = ['{}/{}: weight: {} level: {} dynamic level: {}'.format(
n, c, w, l, d) for n, c, w, l, d in chatbots]
self.stdout.write('\n'.join(chatbots))
self.stdout.write('\n')
except Exception as ex:
self.stdout.write('{}\n'.format(ex))
def help_list(self):
self.stdout.write("List chatbot names\n")
do_l = do_list
help_l = help_list
def do_select(self, line):
try:
names = self.list_chatbot_names()
if line in names:
self.botname = line
self.start_session()
self.stdout.write("Select chatbot {}\n".format(self.botname))
else:
self.stdout.write("No such chatbot {}\n".format(line))
except Exception as ex:
self.stdout.write('{}\n'.format(ex))
return
def help_select(self):
self.stdout.write("Select chatbot\n")
def do_conn(self, line=None):
if line:
try:
self.chatbot_ip, self.chatbot_port = line.split(':')
self.chatbot_url = 'http://{}:{}'.format(
self.chatbot_ip, self.chatbot_port)
self.root_url = '{}/{}'.format(self.chatbot_url, self.VERSION)
except Exception:
self.stdout.write("Wrong conn argument\n")
self.help_conn()
return
self.stdout.write("Connecting {}\n".format(self.chatbot_url))
self.start_session()
def help_conn(self):
s = """
Connect to chatbot server
Syntax: conn [url:port]
For example, conn
conn 127.0.0.1:8001
"""
self.stdout.write(s)
def do_ip(self, line):
self.chatbot_ip = line
self.chatbot_url = 'http://{}:{}'.format(
self.chatbot_ip, self.chatbot_port)
self.root_url = '{}/{}'.format(self.chatbot_url, self.VERSION)
def help_ip(self):
s = """
Set the IP address of chatbot server
Syntax: ip xxx.xxx.xxx.xxx
For example, ip 127.0.0.1
"""
self.stdout.write(s)
def do_port(self, line):
self.chatbot_port = line
self.chatbot_url = 'http://{}:{}'.format(
self.chatbot_ip, self.chatbot_port)
self.root_url = '{}/{}'.format(self.chatbot_url, self.VERSION)
def help_port(self):
s = """
Set the port of chatbot server
Syntax: port xxx
For example, port 8001
"""
self.stdout.write(s)
def do_q(self, line):
self.stdout.write("Bye\n")
return True
def help_q(self):
self.stdout.write("Quit\n")
def do_lang(self, line):
lang = line.strip()
if lang in ['en-US']:
self.lang = lang
self.stdout.write("Set lang to {}\n".format(self.lang))
else:
self.stdout.write(
"Current lang {}. \nSet lang by 'lang <languange code>'\n".format(self.lang))
def help_lang(self):
self.stdout.write("Set language.\n")
def do_c(self, line):
self.reset_session()
def reset_session(self):
try:
params = {
"session": "{}".format(self.session),
'Auth': self.client_key
}
r = requests.get(
'{}/reset_session'.format(self.root_url), params=params)
ret = r.json().get('ret')
response = r.json().get('response')
self.stdout.write(response)
self.stdout.write('\n')
except Exception as ex:
self.stdout.write('{}\n'.format(ex))
def help_c(self):
self.stdout.write("Clean the memory of the dialog\n")
def set_marker(self, marker):
if marker:
self.marker = marker
def set_run_id(self, run_id):
if run_id:
self.run_id = run_id
def do_rw(self, line):
try:
line = "".join(line.split())
if '=' not in line and line != 'reset':
line = ','.join(['{}={}'.format(i, w) for i, w in enumerate(line.split(','))])
params = {
"param": line,
"Auth": self.client_key,
"lang": self.lang,
"session": self.session
}
r = requests.get(
'{}/set_weights'.format(self.root_url), params=params)
ret = r.json().get('ret')
response = r.json().get('response')
self.stdout.write(response)
self.stdout.write('\n')
if not ret:
self.help_rw()
logger.warn("Set weights failed")
else:
logger.warn("Set weights successfully")
except Exception as ex:
self.stdout.write('{}\n'.format(ex))
def get_weights(self):
chatbots = self.list_chatbot()
weights = {c: w for c, w, l, d in chatbots}
return weights
def set_weights(self, weights):
self.weights = weights
if weights is not None:
self.do_rw(weights)
else:
self.do_rw('reset')
def help_rw(self):
s = """
Update the weights of the current chain.
rw w1,w2,w3,...
---------------
Set the weights to tiers accordingly.
For example, rw .2, .4, .5
rw index1=w1,[index2=w2],...
--------------------------
Set the weight of tier with index1 to w1, and set the weight of tier with
index2 to w2. And set 0 weight to other tiers. The index starts from 0.
For example, rw 0=0.1, 2=0.3
rw id1=w1,[id2=w2],...
--------------------------
Set the weight of tier with id=id1 to w1, and set the weight of tier with
id=id2 to w2. And set 0 weight to other tiers.
For example, rw generic=0.1
rw reset
--------
Reset the weight of tiers to their defaults.
"""
self.stdout.write(s)
def ping(self):
try:
r = requests.get('{}/ping'.format(self.root_url))
response = r.json().get('response')
if response == 'pong':
return True
except Exception:
return False
def do_ping(self, line):
if self.ping():
self.stdout.write('pong')
self.stdout.write('\n')
def help_ping(self):
self.stdout.write('Ping the server\n')
def do_trace(self, line):
if self.last_tier_response:
trace = self.last_tier_response.get('trace', None)
if trace:
if isinstance(trace, list):
trace = ['{}: {}: {}'.format(x, y, z) for x, y, z in trace]
trace = '\n'.join(trace)
self.stdout.write(trace)
self.stdout.write('\n')
do_t = do_trace
def help_trace(self):
self.stdout.write('Print the trace of last reponse\n')
help_t = help_trace
def _rate(self, rate):
params = {
"session": self.session,
"rate": rate,
"index": -1,
"Auth": self.client_key
}
r = requests.get('{}/rate'.format(self.root_url), params=params)
ret = r.json().get('ret')
response = r.json().get('response')
return ret, response
def do_gd(self, line):
ret, response = self._rate('good')
if ret:
self.stdout.write("[Thanks for rating]\n")
else:
self.stdout.write("[Rating failed]\n")
def help_gd(self):
self.stdout.write('Rate the last response as GOOD result\n')
def do_bd(self, line):
ret, response = self._rate('bad')
if ret:
self.stdout.write("[Thanks for rating]\n")
else:
self.stdout.write("[Rating failed]\n")
def help_bd(self):
self.stdout.write('Rate the last response as BAD result\n')
def do_dump(self, line):
params = {
"session": self.session,
"Auth": self.client_key
}
r = requests.get(
'{}/session_history'.format(self.root_url), params=params)
if r.status_code == 200:
fname = '{}.csv'.format(self.session)
with open(fname, 'w') as f:
f.write(r.text)
self.stdout.write('Done\n')
self.stdout.write('Dump to {}\n'.format(fname))
else:
self.stdout.write('Failed, error code {}\n'.format(r.status_code))
def help_dump(self):
self.stdout.write('Dump chat history\n')
do_d = do_dump
help_d = help_dump
def do_summary(self, line):
if line:
try:
lookback = int(line)
except Exception as ex:
self.stdout.write('{}\n'.format(ex))
return
else:
lookback = 7
params = {
"Auth": self.client_key,
"lookback": lookback
}
r = requests.get('{}/stats'.format(self.root_url), params=params)
ret = r.json().get('ret')
response = r.json().get('response')
if ret:
self.stdout.write(
'Customers satisfaction degree {customers_satisfaction_degree:.4f}\n'
'Number of records {number_of_records}\n'
'Number of rates {number_of_rates}\n'
'Number of good rates {number_of_good_rates}\n'
'Number of bad rates {number_of_bad_rates}\n'.format(**response))
else:
self.stdout.write('{}\n'.format(response['err_msg']))
def help_summary(self):
self.stdout.write('Report the summary of the chat history\n')
self.stdout.write('Usage: summary [lookback days]\n')
self.stdout.write('lookback days: -1 means all\n')
def process_indicator(self, reply):
cmd, timeout = None, None
for match in re.findall(r'\[.*\]', reply):
match = match.strip()
match = match.replace(' ', '')
if match == '[loopback=0]':
self.cancel_timer()
return
match = match.replace(']', '')
match = match.replace('[', '')
if '=' in match:
cmd, timeout = match.split('=')
self.timeout = float(timeout)/1000
else:
cmd = match
cmd = '[{}]'.format(cmd)
if self.timeout is not None and cmd is not None:
self.cancel_timer()
self.timer = threading.Timer(self.timeout, self.ask, (cmd, ))
self.timer.start()
logger.info("Start {} timer with timeout {}".format(
cmd, self.timeout))
def do_list_sessions(self, line):
params = {
"Auth": self.client_key
}
r = requests.get(
'{}/sessions'.format(self.root_url), params=params)
sessions = r.json().get('response')
if sessions:
self.stdout.write('sessions:\n{}\n'.format('\n'.join(sessions)))
else:
self.stdout.write('no session\n')
do_ls = do_list_sessions
def help_list_sessions(self):
self.stdout.write('List the current sessions\n')
help_ls = help_list_sessions
def do_ns(self, line):
self.start_session(True)
def help_ns(self):
self.stdout.write('Start new session\n')
def do_user(self, line=None):
self.user = line or get_client_id()
self.prompt = '[%s]: ' % self.user
self.start_session()
set_user = do_user
def help_user(self):
s = """
Change user
Syntax: user <user name>
"""
self.stdout.write(s)
@retry(1)
def do_sc(self, line):
try:
for tok in line.split(','):
k, v = tok.split('=')
self.stdout.write('{}={}\n'.format(k, v))
except Exception as ex:
self.stdout.write('Wrong format\n')
self.help_sc()
return
params = {
"Auth": self.client_key,
"context": line,
"session": self.session
}
r = requests.get(
'{}/set_context'.format(self.root_url), params=params)
response = r.json().get('response')
self.stdout.write(response)
self.stdout.write('\n')
set_context = do_sc
def help_sc(self):
s = """
Set chatbot context
Syntax: sc key=value,key2=value2,...
"""
self.stdout.write(s)
@retry(1)
def do_rc(self, line):
params = {
"Auth": self.client_key,
"keys": line,
"session": self.session
}
r = requests.get(
'{}/remove_context'.format(self.root_url), params=params)
response = r.json().get('response')
self.stdout.write(response)
self.stdout.write('\n')
def help_rc(self):
s = """
Remove chatbot context
Syntax: rc key,key2,key3,...
"""
self.stdout.write(s)
remove_context = do_rc
def get_context(self):
if not self.session:
self.start_session()
params = {
"Auth": self.client_key,
"session": self.session,
"lang": self.lang,
}
response = requests.get(
'{}/get_context'.format(self.root_url), params=params)
return response.json().get('response')
@retry(1)
def do_gc(self, line=None):
response = self.get_context()
self.stdout.write(pprint.pformat(response))
self.stdout.write('\n')
def help_gc(self):
self.stdout.write('Get chatbot context\n')
def do_said(self, line):
if not self.session:
self.start_session()
params = {
"Auth": self.client_key,
"session": self.session,
"message": line
}
r = requests.get(
'{}/said'.format(self.root_url), params=params)
ret = r.json().get('ret')
response = r.json().get('response')
self.stdout.write(response)
self.stdout.write('\n')
def help_said(self):
self.stdout.write('Set the chatbot state as the message was said\n')
def set_config(self, **kwargs):
params = {
"Auth": self.client_key,
}
params.update(kwargs)
r = requests.get(
'{}/update_config'.format(self.root_url), params=params)
ret = r.json().get('ret')
response = r.json().get('response')
logger.info(response)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
HR_CHATBOT_AUTHKEY = os.environ.get('HR_CHATBOT_AUTHKEY', 'AAAAB3NzaC')
if len(sys.argv)>1:
client = Client(HR_CHATBOT_AUTHKEY, botname=sys.argv[1])
else:
client = Client(HR_CHATBOT_AUTHKEY)
client.cmdloop()
| |
from app import synapseCRM
from flask import render_template, Markup, flash, request, redirect, url_for, make_response, Response
import csv
from app.model import Database, Hash, DataSetter
from app.lib import Signup, Login, Contacts, Dashboard, Analysis,Support, Marketing
global keyId
global EID
keyId = '_USER_'
EID = '_E_'
NAME = '_NAME_'
# ----- INDEX ------------------------------------------------------
@synapseCRM.route('/', methods=['GET', 'POST'])
def index():
#VerifyRfid will check RFID if not go to login
if Database().VerifyRfid(DataSetter.getCookie(keyId), DataSetter.getCookie(EID)) == True:
return render_template('dashboard.html', user=DataSetter.getCookie(NAME), dashboard = Dashboard())
elif request.method == "POST":
return Login.auth_login(request.form['email'], request.form['password'], Dashboard())
else:
return render_template('login.html', msg="Sign in to start your session", user=DataSetter.getCookie(NAME))
@synapseCRM.route('/dashboard', methods=['GET', 'POST'])
def dashboard():
#VerifyRfid will check RFID if not go to login
if Database().VerifyRfid(DataSetter.getCookie(keyId), DataSetter.getCookie(EID)) == True:
return render_template('dashboard.html', user=DataSetter.getCookie(NAME), dashboard = Dashboard())
else:
return render_template('login.html')
@synapseCRM.route('/marketing', methods=['GET', 'POST'])
def marketing():
#VerifyRfid will check RFID if not go to login
if Database().VerifyRfid(DataSetter.getCookie(keyId), DataSetter.getCookie(EID)) == True:
return render_template('marketing.html', user=DataSetter.getCookie(NAME))
else:
return render_template('login.html')
@synapseCRM.route('/marketing/<gets>', methods=['GET'])
def marketing_api(gets):
#VerifyRfid will check RFID if not go to login
if Database().VerifyRfid(DataSetter.getCookie(keyId), DataSetter.getCookie(EID)) == True:
return Response(response=Marketing().result(gets), status=200,mimetype="application/json")
else:
return redirect(url_for('login'))
@synapseCRM.route('/main_analysis', methods=['GET'])
def main_analysis():
#VerifyRfid will check RFID if not go to login
if Database().VerifyRfid(DataSetter.getCookie(keyId), DataSetter.getCookie(EID)) == True:
return render_template('analysis.html', user=DataSetter.getCookie(NAME))
else:
return render_template('login.html')
@synapseCRM.route('/analysis/<gets>', methods=['GET'])
def analysis(gets):
#VerifyRfid will check RFID if not go to login
if Database().VerifyRfid(DataSetter.getCookie(keyId), DataSetter.getCookie(EID)) == True:
analysis = Analysis()
if gets == "main":
res = render_template('analysis.html')
else:
res = Response(response=analysis.result(gets), status=200,mimetype="application/json")
return res
else:
return redirect(url_for('login'))
@synapseCRM.route('/testdash')
def testdash():
pass
return render_template('dashboard.html')
@synapseCRM.route('/checkcookie')
def cookiecheck():
pass
if Database().VerifyRfid(DataSetter.getCookie(keyId), DataSetter.getCookie(EID)) == True:
rfid = DataSetter.getCookie(keyId)
return "THis is the RFID Of the current user :" + rfid + " AND Username is : " + Database().get_username(rfid)
else:
return "Cookie not set for any user, set it by logging in"
# ------- LOGIN AND SIGNUP -----------------------------------------
@synapseCRM.route('/signup', methods=['GET', 'POST'])
def signup():
if request.method == "POST":
user = request.form['username']
email = request.form['email']
password = request.form['password']
companyName = request.form['companyName']
bussinessType = request.form['bussinessType']
category = request.form['category']
status = Signup().ValidatePush(username=user, email=email, password=password, companyName=companyName, bussinessType=bussinessType, category=category)
if str(status) == '1':
return render_template('signup_successful.html');
else:
return render_template('signup.html', msg="Please check again.")
else:
return render_template('signup.html')
@synapseCRM.route('/logout', methods=['GET'])
def logout():
return DataSetter.delCookie('login.html', keyId)
@synapseCRM.route('/login', methods=['GET','POST'])
def login():
#TODO:
if DataSetter.getCookie(keyId):
pass#return redirect(url_for('/'))
if request.method == "POST":
dashboard = Dashboard()
user = request.form['email']
pwd = request.form['password']
auth_status = Login().auth_user(user,pwd, dashboard)
#auth_status 1 is Success, 0 is wrong password, 0.2 is invalid user
else:
return render_template('login.html')
# ---------- CONTACTS -------------------------------------------------------
@synapseCRM.route('/contacts')
def contacts():
if Database().VerifyRfid(DataSetter.getCookie(keyId), DataSetter.getCookie(EID)) == True:
#VerifyRfid will check RFID if not go to login
data = Contacts().get_contacts()
return render_template('contacts.html',rows=data, user=DataSetter.getCookie(NAME))
else:
return render_template('login.html')
@synapseCRM.route('/contact/add')
def add_contact():
if Database().VerifyRfid(DataSetter.getCookie(keyId), DataSetter.getCookie(EID)) == True:
#VerifyRfid will check RFID if not go to login
# your code here
return render_template('addcontact.html', user=DataSetter.getCookie(NAME))
else:
return render_template('login.html')
@synapseCRM.route('/contact/edit/<int:cid>')
def edit_contact(cid):
if Database().VerifyRfid(DataSetter.getCookie(keyId), DataSetter.getCookie(EID)) == True:
return render_template('editcontact.html',contact = Contacts().get_contact(cid), user=DataSetter.getCookie(NAME))
else:
return render_template('login.html')
# ---- SALES AND SUPPORT ----------------------
@synapseCRM.route('/sales')
def sales():
if Database().VerifyRfid(DataSetter.getCookie(keyId), DataSetter.getCookie(EID)) == True:
data = Contacts().get_contacts()
return render_template('sales.html',rows=data, user=DataSetter.getCookie(NAME))
else:
return render_template('login.html')
@synapseCRM.route('/support')
def support():
if Database().VerifyRfid(DataSetter.getCookie(keyId), DataSetter.getCookie(EID)) == True:
data = Support().get_cases()
return render_template('support.html',rows=data, user=DataSetter.getCookie(NAME))
else:
return render_template('login.html')
@synapseCRM.route('/case/<int:caseid>')
def case(caseid):
if Database().VerifyRfid(DataSetter.getCookie(keyId), DataSetter.getCookie(EID)) == True:
return render_template("casedetails.html",case = Support().get_case(caseid),user=DataSetter.getCookie(NAME))
else:
return render_template('login.html')
@synapseCRM.route('/case/add')
def add_case():
if Database().VerifyRfid(DataSetter.getCookie(keyId), DataSetter.getCookie(EID)) == True:
return render_template('addcase.html', user=DataSetter.getCookie(NAME))
else:
return render_template('login.html')
@synapseCRM.route('/case/edit/<int:caseid>')
def edit_case(caseid):
if Database().VerifyRfid(DataSetter.getCookie(keyId), DataSetter.getCookie(EID)) == True:
return render_template('editcase.html',case = Support().get_case(caseid), user=DataSetter.getCookie(NAME))
else:
return render_template('login.html')
#----------------------------------------------------------
@synapseCRM.route('/ajax',methods = ['POST'])
def ajax():
if Database().VerifyRfid(DataSetter.getCookie(keyId), DataSetter.getCookie(EID)) == True:
flag = request.form['flag']
if flag == 'add_contact':
first_name = request.form['first_name']
last_name = request.form['last_name']
city = request.form['city']
state = request.form['state']
country = request.form['country']
occupation = request.form['occupation']
phone = request.form['phone']
Contacts().add_contact(first_name,last_name,city,state,country,occupation,phone)
return '1'
elif flag == 'edit_contact':
cid = request.form['cid']
first_name = request.form['first_name']
last_name = request.form['last_name']
city = request.form['city']
state = request.form['state']
country = request.form['country']
occupation = request.form['occupation']
phone = request.form['phone']
Contacts().edit_contact(cid,first_name,last_name,city,state,country,occupation,phone)
return '1'
elif flag == 'delete_contact':
cid = request.form['cid']
Contacts().delete_contact(cid)
return '1'
# ----------------------------------
elif flag == 'add_case':
customerid = request.form['customerid']
name = request.form['name']
product = request.form['product']
info = request.form['info']
Support().add_case(customerid,name,product,info)
return '1'
elif flag == 'edit_case':
caseid = request.form['caseid']
customerid = request.form['customerid']
name = request.form['name']
product = request.form['product']
info = request.form['info']
Support().edit_case(caseid,customerid,name,product,info)
return '1'
elif flag == 'delete_case':
caseid = request.form['caseid']
Support().delete_case(caseid)
return '1'
else:
return '0'
else:
return '0'
# ------ TESTING STUFF --------------------------------------------------------
@synapseCRM.route('/testsetup', methods = ['GET', 'POST']) # Setup the Database During Setup
def db_setup():
if request.method == 'POST':
flag = request.form['flag']
db = request.form['db']
if flag == '1':
status = Database(db=db).initial_setup()
return str(status)
else:
return render_template('setuptest.html')
@synapseCRM.route('/customer')
def customerportal():
return render_template('customer.html',user="customer")
@synapseCRM.errorhandler(404)
def page_not_found(error):
return render_template('notfound.html'), 404
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The InverseGamma distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
__all__ = [
"InverseGamma",
"InverseGammaWithSoftplusConcentrationRate",
]
class InverseGamma(distribution.Distribution):
"""InverseGamma distribution.
The `InverseGamma` distribution is defined over positive real numbers using
parameters `concentration` (aka "alpha") and `rate` (aka "beta").
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; alpha, beta, x > 0) = x**(-alpha - 1) exp(-beta / x) / Z
Z = Gamma(alpha) beta**-alpha
```
where:
* `concentration = alpha`,
* `rate = beta`,
* `Z` is the normalizing constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The cumulative density function (cdf) is,
```none
cdf(x; alpha, beta, x > 0) = GammaInc(alpha, beta / x) / Gamma(alpha)
```
where `GammaInc` is the [upper incomplete Gamma function](
https://en.wikipedia.org/wiki/Incomplete_gamma_function).
The parameters can be intuited via their relationship to mean and stddev,
```none
concentration = alpha = (mean / stddev)**2
rate = beta = mean / stddev**2
```
Distribution parameters are automatically broadcast in all functions; see
examples for details.
WARNING: This distribution may draw 0-valued samples for small concentration
values. See note in `tf.random_gamma` docstring.
#### Examples
```python
dist = InverseGamma(concentration=3.0, rate=2.0)
dist2 = InverseGamma(concentration=[3.0, 4.0], rate=[2.0, 3.0])
```
"""
def __init__(self,
concentration,
rate,
validate_args=False,
allow_nan_stats=True,
name="InverseGamma"):
"""Construct InverseGamma with `concentration` and `rate` parameters.
The parameters `concentration` and `rate` must be shaped in a way that
supports broadcasting (e.g. `concentration + rate` is a valid operation).
Args:
concentration: Floating point tensor, the concentration params of the
distribution(s). Must contain only positive values.
rate: Floating point tensor, the inverse scale params of the
distribution(s). Must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `concentration` and `rate` are different dtypes.
"""
parameters = locals()
with ops.name_scope(name, values=[concentration, rate]) as ns:
with ops.control_dependencies([
check_ops.assert_positive(concentration),
check_ops.assert_positive(rate),
] if validate_args else []):
self._concentration = array_ops.identity(
concentration, name="concentration")
self._rate = array_ops.identity(rate, name="rate")
contrib_tensor_util.assert_same_float_dtype(
[self._concentration, self._rate])
super(InverseGamma, self).__init__(
dtype=self._concentration.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
is_continuous=True,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
parameters=parameters,
graph_parents=[self._concentration,
self._rate],
name=ns)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("concentration", "rate"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def concentration(self):
"""Concentration parameter."""
return self._concentration
@property
def rate(self):
"""Rate parameter."""
return self._rate
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.concentration),
array_ops.shape(self.rate))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.concentration.get_shape(),
self.rate.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
@distribution_util.AppendDocstring(
"""Note: See `tf.random_gamma` docstring for sampling details and
caveats.""")
def _sample_n(self, n, seed=None):
return 1. / random_ops.random_gamma(
shape=[n],
alpha=self.concentration,
beta=self.rate,
dtype=self.dtype,
seed=seed)
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _log_cdf(self, x):
return math_ops.log(self._cdf(x))
def _cdf(self, x):
x = self._maybe_assert_valid_sample(x)
# Note that igammac returns the upper regularized incomplete gamma
# function Q(a, x), which is what we want for the CDF.
return math_ops.igammac(self.concentration, self.rate / x)
def _log_unnormalized_prob(self, x):
x = self._maybe_assert_valid_sample(x)
return -(1. + self.concentration) * math_ops.log(x) - self.rate / x
def _log_normalization(self):
return (math_ops.lgamma(self.concentration)
- self.concentration * math_ops.log(self.rate))
def _entropy(self):
return (self.concentration
+ math_ops.log(self.rate)
+ math_ops.lgamma(self.concentration)
- ((1. + self.concentration) *
math_ops.digamma(self.concentration)))
@distribution_util.AppendDocstring(
"""The mean of an inverse gamma distribution is
`rate / (concentration - 1)`, when `concentration > 1`, and `NaN`
otherwise. If `self.allow_nan_stats` is `False`, an exception will be
raised rather than returning `NaN`""")
def _mean(self):
mean = self.rate / (self.concentration - 1.)
if self.allow_nan_stats:
nan = array_ops.fill(
self.batch_shape_tensor(),
np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
name="nan")
return array_ops.where(self.concentration > 1., mean, nan)
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones([], self.dtype), self.concentration,
message="mean undefined when any concentration <= 1"),
], mean)
@distribution_util.AppendDocstring(
"""Variance for inverse gamma is defined only for `concentration > 2`. If
`self.allow_nan_stats` is `False`, an exception will be raised rather
than returning `NaN`.""")
def _variance(self):
var = (math_ops.square(self.rate)
/ math_ops.square(self.concentration - 1.)
/ (self.concentration - 2.))
if self.allow_nan_stats:
nan = array_ops.fill(
self.batch_shape_tensor(),
np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
name="nan")
return array_ops.where(self.concentration > 2., var, nan)
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
constant_op.constant(2., dtype=self.dtype),
self.concentration,
message="variance undefined when any concentration <= 2"),
], var)
@distribution_util.AppendDocstring(
"""The mode of an inverse gamma distribution is `rate / (concentration +
1)`.""")
def _mode(self):
return self.rate / (1. + self.concentration)
def _maybe_assert_valid_sample(self, x):
contrib_tensor_util.assert_same_float_dtype(
tensors=[x], dtype=self.dtype)
if not self.validate_args:
return x
return control_flow_ops.with_dependencies([
check_ops.assert_positive(x),
], x)
class InverseGammaWithSoftplusConcentrationRate(InverseGamma):
"""`InverseGamma` with softplus of `concentration` and `rate`."""
def __init__(self,
concentration,
rate,
validate_args=False,
allow_nan_stats=True,
name="InverseGammaWithSoftplusConcentrationRate"):
parameters = locals()
with ops.name_scope(name, values=[concentration, rate]) as ns:
super(InverseGammaWithSoftplusConcentrationRate, self).__init__(
concentration=nn.softplus(concentration,
name="softplus_concentration"),
rate=nn.softplus(rate, name="softplus_rate"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
self._parameters = parameters
| |
#
# See the LICENSE file
#
import datetime
import struct
import cartridge
import material
#
# CartridgeManager is used to create, encrypt and decrypt Stratasys cartridge
#
# Typical structure on the EEPROM
# offset : len
# 0x00 : 0x08 - Canister serial number (double) (part of the key, written *on* the canister as S/N)
# 0x08 : 0x08 - Material type (double)
# 0x10 : 0x14 - Manufacturing lot (string)
# 0x24 : 0x02 - Version? (must be 1)
# 0x28 : 0x08 - Manufacturing date (date yymmddhhmmss)
# 0x30 : 0x08 - Use date (date yymmddhhmmss)
# 0x38 : 0x08 - Initial material quantity (double)
# 0x40 : 0x02 - Plain content CRC (uint16)
# 0x46 : 0x02 - Crypted content CRC (uint16)
# 0x48 : 0x08 - Key (unencrypted, 8 bytes)
# 0x50 : 0x02 - Key CRC (unencrypted, uint16)
# 0x58 : 0x08 - Current material quantity (double)
# 0x60 : 0x02 - Current material quantity crypted CRC (unencrypted, uint16)
# 0x62 : 0x02 - Current material quantity CRC (unencrypted, uint16)
# ~~~~~~~~~~~~~
# 14 0x00: 0x48 - crypted/plaintext (start, len)
# 15 0x58: 0x10 - unknown, looks like DEX IV, but why?
# 16 0x48: 0x10 - ^
class Manager:
def __init__(self, crypto, checksum):
self.crypto = crypto
self.checksum = checksum
#
# Encode a cartridge object into a data that can be burn onto a cartridge
#
def encode(self, machine_number, eeprom_uid, cartridge):
cartridge_packed = self.pack(cartridge)
cartridge_crypted = self.encrypt(machine_number, eeprom_uid, cartridge_packed)
return cartridge_crypted
#
# Decode a eeprom to a cartridge object
#
def decode(self, machine_number, eeprom_uid, cartridge_crypted):
cartridge_packed = self.decrypt(machine_number, eeprom_uid, cartridge_crypted)
cartridge = self.unpack(cartridge_packed)
return cartridge
#
# Pack a cartridge into a format suitable to be encrypted then burn
# onto the cartridge EEPROM
#
def pack(self, cartridge):
eeprom = bytearray(0x71)
# serial number
struct.pack_into("<d", eeprom, 0x0, cartridge.serial_number)
# material id
struct.pack_into("<d", eeprom, 0x08, material.get_id_from_name(cartridge.material_name))
# manufacturing lot
struct.pack_into("<20s", eeprom, 0x10, cartridge.manufacturing_lot)
# version (not sure)
struct.pack_into("<H", eeprom, 0x24, cartridge.version)
# manufacturing date
struct.pack_into("<HBBBBH", eeprom, 0x28,
cartridge.manufacturing_date.year - 1900,
cartridge.manufacturing_date.month,
cartridge.manufacturing_date.day,
cartridge.manufacturing_date.hour,
cartridge.manufacturing_date.minute,
cartridge.manufacturing_date.second)
# last use date
struct.pack_into("<HBBBBH", eeprom, 0x30,
cartridge.use_date.year - 1900,
cartridge.use_date.month,
cartridge.use_date.day,
cartridge.use_date.hour,
cartridge.use_date.minute,
cartridge.use_date.second)
struct.pack_into("<d", eeprom, 0x38, cartridge.initial_material_quantity)
# plaintext checksum
struct.pack_into("<H", eeprom, 0x40, self.checksum.checksum(eeprom[0x00:0x40]))
# key
struct.pack_into("<8s", eeprom, 0x48, str(cartridge.key_fragment.decode("hex")))
# key checksum
struct.pack_into("<H", eeprom, 0x50, self.checksum.checksum(eeprom[0x48:0x50]))
# current material quantity
struct.pack_into("<d", eeprom, 0x58, cartridge.current_material_quantity)
# Checksum current material quantity
struct.pack_into("<H", eeprom, 0x62, self.checksum.checksum(eeprom[0x58:0x60]))
# signature (not sure, not usedu)
struct.pack_into("<9s", eeprom, 0x68, cartridge.signature)
return eeprom
#
# Unpack a decrypted cartridge into a catridge object
#
def unpack(self, cartridge_packed):
# Validating plaintext checksum
if self.checksum.checksum(cartridge_packed[0x00:0x40]) != struct.unpack("<H", str(cartridge_packed[0x40:0x42]))[0]:
raise Exception("invalid content checksum: should have " + hex(struct.unpack("<H", str(cartridge_packed[0x40:0x42]))[0]) + " but have " + hex(self.checksum.checksum(cartridge_packed[0x00:0x40])))
# Validating current material quantity checksum
if self.checksum.checksum(cartridge_packed[0x58:0x60]) != struct.unpack("<H", str(cartridge_packed[0x62:0x64]))[0]:
raise Exception("invalid current material quantity checksum")
cartridge_packed = buffer(cartridge_packed)
# Serial number
serial_number = struct.unpack_from("<d", cartridge_packed, 0x0)[0]
# Material
material_name = material.get_name_from_id(int(struct.unpack_from("<d", cartridge_packed, 0x08)[0]))
# Manufacturing lot
manufacturing_lot = struct.unpack_from("<20s", cartridge_packed, 0x10)[0].split('\x00')[0]
# Manufacturing datetime
(mfg_datetime_year,
mfg_datetime_month,
mfg_datetime_day,
mfg_datetime_hour,
mfg_datetime_minute,
mfg_datetime_second) = struct.unpack_from("<HBBBBH", cartridge_packed, 0x28)
mfg_datetime = datetime.datetime(mfg_datetime_year + 1900,
mfg_datetime_month,
mfg_datetime_day,
mfg_datetime_hour,
mfg_datetime_minute,
mfg_datetime_second)
# Last use datetime
(use_datetime_year,
use_datetime_month,
use_datetime_day,
use_datetime_hour,
use_datetime_minute,
use_datetime_second) = struct.unpack_from("<HBBBBH", cartridge_packed, 0x30)
use_datetime = datetime.datetime(use_datetime_year + 1900,
use_datetime_month,
use_datetime_day,
use_datetime_hour,
use_datetime_minute,
use_datetime_second)
# Initial material quantity
initial_material_quantity = struct.unpack_from("<d", cartridge_packed, 0x38)[0]
# Version
version = struct.unpack_from("<H", cartridge_packed, 0x24)[0]
# Key fragment
key_fragment = str(struct.unpack_from("<8s", cartridge_packed, 0x48)[0]).encode("hex")
# Current material quantity
current_material_quantity = struct.unpack_from("<d", cartridge_packed, 0x58)[0]
# Signature
signature = struct.unpack_from("<9s", cartridge_packed, 0x68)[0]
return cartridge.Cartridge(serial_number, material_name, manufacturing_lot, mfg_datetime, use_datetime, initial_material_quantity, current_material_quantity, key_fragment, version, signature)
#
# Encrypt a packed cartridge into a crypted cartridge
#
def encrypt(self, machine_number, eeprom_uid, cartridge_packed):
cartridge_crypted = cartridge_packed
# Validate key fragment checksum
# TODO
# Build the key
key = self.build_key(cartridge_packed[0x48:0x50], machine_number, eeprom_uid)
# Encrypt content
struct.pack_into("<64s", cartridge_crypted, 0x00, str(self.crypto.encrypt(key, cartridge_packed[0x00:0x40])))
# Checksum crypted content
struct.pack_into("<H", cartridge_crypted, 0x46, self.checksum.checksum(cartridge_packed[0x00:0x40]))
# Encrypt current material quantity
struct.pack_into("<8s", cartridge_crypted, 0x58, str(self.crypto.encrypt(key, cartridge_packed[0x58:0x60])))
# Checksum crypted current material quantity
struct.pack_into("<H", cartridge_crypted, 0x60, self.checksum.checksum(cartridge_packed[0x58:0x60]))
return cartridge_crypted
#
# Decrypt a crypted cartridge into a packed cartridge
#
def decrypt(self, machine_number, eeprom_uid, cartridge_crypted):
cartridge_packed = cartridge_crypted
# Validate key fragment checksum
# TODO
# Build the key
key = self.build_key(cartridge_crypted[0x48:0x50], machine_number, eeprom_uid)
# Validate crypted content checksum
if self.checksum.checksum(cartridge_crypted[0x00:0x40]) != struct.unpack("<H", str(cartridge_crypted[0x46:0x48]))[0]:
raise Exception("invalid crypted content checksum")
# Decrypt content
cartridge_packed[0x00:0x40] = self.crypto.decrypt(key, cartridge_crypted[0x00:0x40])
# Validate crypted current material quantity checksum
if self.checksum.checksum(cartridge_crypted[0x58:0x60]) != struct.unpack("<H", str(cartridge_crypted[0x60:0x62]))[0]:
raise Exception("invalid current material quantity checksum")
# Decrypt current material quantity
cartridge_packed[0x58:0x60] = self.crypto.decrypt(key, cartridge_crypted[0x58:0x60])
return cartridge_packed
#
# Build a key used to encrypt/decrypt a cartridge
#
def build_key(self, cartridge_key, machine_number, eeprom_uid):
machine_number = bytearray(machine_number.decode("hex"))
eeprom_uid = bytearray(eeprom_uid.decode("hex"))
key = bytearray(16)
key[0] = ~cartridge_key[0] & 0xff
key[1] = ~cartridge_key[2] & 0xff
key[2] = ~eeprom_uid[5] & 0xff
key[3] = ~cartridge_key[6] & 0xff
key[4] = ~machine_number[0] & 0xff
key[5] = ~machine_number[2] & 0xff
key[6] = ~eeprom_uid[1] & 0xff
key[7] = ~machine_number[6] & 0xff
key[8] = ~machine_number[7] & 0xff
key[9] = ~eeprom_uid[6] & 0xff
key[10] = ~machine_number[3] & 0xff
key[11] = ~machine_number[1] & 0xff
key[12] = ~cartridge_key[7] & 0xff
key[13] = ~eeprom_uid[2] & 0xff
key[14] = ~cartridge_key[3] & 0xff
key[15] = ~cartridge_key[1] & 0xff
return key
| |
import copy
import functools
import io
import sys
from steel.common import args, meta, data
__all__ = ['Field', 'FullyDecoded', 'Condition']
class Trigger:
def __init__(self):
self.cache = {}
self.functions = set()
def __call__(self, func):
# Used as a decorator
self.functions.add(func)
def __get__(self, instance, owner):
if owner is None:
return self
if instance not in self.cache:
self.cache[instance] = BoundTrigger(instance, self.functions)
return self.cache[instance]
class BoundTrigger:
def __init__(self, field, functions):
self.field = field
self.functions = set(functools.partial(func, field) for func in functions)
def __iter__(self):
return iter(self.functions)
def __call__(self, func):
# Used as a decorator
self.functions.add(func)
def apply(self, *args, **kwargs):
# Called from within the appropriate code
for func in self.functions:
func(*args, **kwargs)
class Field(metaclass=meta.DeclarativeFieldMetaclass):
size = args.Argument(resolve_field=True)
offset = args.Argument(default=None, resolve_field=True)
choices = args.Argument(default=())
default = args.Argument(default=args.NotProvided)
after_encode = Trigger()
after_decode = Trigger()
def getter(self, func):
# For compatibility with typical property usage
self._getters.append(func)
return self
def setter(self, func):
# For compatibility with typical property usage
self._setters.append(func)
return self
@after_encode
def update_size(self, obj, value):
if isinstance(self.size, Field):
setattr(obj, self.size.name, len(value))
def __init__(self, label='', **kwargs):
self.label = label
self._parent = None
self.instance = None
self._getters = []
self._setters = []
for name, arg in self.arguments.items():
if name in kwargs:
value = kwargs.pop(name)
elif arg.has_default:
value = arg.default
else:
raise TypeError("The %s argument is required for %s fields" % (arg.name, self.__class__.__name__))
setattr(self, name, value)
if kwargs:
raise TypeError("%s is not a valid argument for %s fields" % (list(kwargs.keys())[0], self.__class__.__name__))
# Once the base values are all in place, arguments can be initialized properly
for name, arg in self.arguments.items():
if hasattr(self, name):
value = getattr(self, name)
else:
value = None
setattr(self, name, arg.initialize(self, value))
def resolve(self, instance):
if self._parent is not None:
instance = self._parent.resolve(instance)
return getattr(instance, self.name)
def read(self, obj):
# If the size can be determined easily, read
# that number of bytes and return it directly.
if self.size is not None:
return obj.read(self.size)
# Otherwise, the field needs to supply its own
# technique for determining how much data to read.
raise NotImplementedError()
def write(self, obj, value):
# By default, this doesn't do much, but individual
# fields can/should override it if necessary
obj.write(value)
def set_name(self, name):
self.name = name
label = self.label or name.replace('_', ' ')
self.label = label.title()
def attach_to_class(self, cls):
cls._fields[self.name] = self
def validate(self, obj, value):
# This should raise a ValueError if the value is invalid
# It should simply return without an error if it's valid
with self.for_instance(obj):
# First, make sure the value can be encoded
self.encode(value)
# Then make sure it's a valid option, if applicable
if self.choices and value not in set(v for v, desc in self.choices):
raise ValueError("%r is not a valid choice" % value)
def _extract(self, instance):
with self.for_instance(instance):
try:
return self.read(instance), None
except FullyDecoded as obj:
return obj.bytes, obj.value
def read_value(self, file):
try:
bytes = self.read(file)
value = self.decode(bytes)
return bytes, value
except FullyDecoded as obj:
return obj.bytes, obj.value
def for_instance(self, instance):
return meta.AttributeInstance(self, instance)
def __get__(self, instance, owner):
if not instance:
return self
# Customizes the field for this particular instance
# Use field instead of self for the rest of the method
with self.for_instance(instance):
try:
value = instance._extract(self)
except IOError:
if self.default is not args.NotProvided:
return self.default
raise AttributeError("Attribute %r has no data" % self.name)
if self.name not in instance.__dict__:
value = self.decode(value)
self.after_decode.apply(instance, value)
for getter in self._getters:
value = getter(instance, value)
instance.__dict__[self.name] = value
return instance.__dict__[self.name]
def __set__(self, instance, value):
for setter in self._setters:
value = setter(instance, value)
with self.for_instance(instance):
instance.__dict__[self.name] = value
instance._raw_values[self.name] = self.encode(value)
self.after_encode.apply(instance, value)
def __repr__(self):
return '<%s: %s>' % (self.name, type(self).__name__)
def __hash__(self):
return id(self)
def __eq__(self, other):
return Condition(self, other, lambda a, b: a == b)
def __ne__(self, other):
return Condition(self, other, lambda a, b: a != b)
class FullyDecoded(Exception):
def __init__(self, bytes, value):
self.bytes = bytes
self.value = value
class Condition:
def __init__(self, a, b, compare):
self.a = a
self.b = b
self.compare = compare
def __enter__(self):
# Hack to add the condition to the class without
# having to explicitly give it a (useless) name
frame = sys._getframe(1)
locals = frame.f_locals
locals[self.get_available_name(locals.keys())] = self
# This has to come after the frame hack, so that the condition gets
# placed in the outer namespace, not in the inner 'with' block
data.field_stack.append([])
# Return it anyway, just to check if someone does try to give it a name
return self
def __exit__(self, exception_type, exception, traceback):
self.fields = data.field_stack.pop()
# Don't suppress the exception, if any
return False
def get_available_name(self, locals):
i = 0
while True:
name = '_condition_%s' % i
if name not in locals:
return name
i += 1
def set_name(self, name):
if hasattr(self, 'name'):
raise TypeError('Field conditions must not use the "as" form')
self.name = name
def attach_to_class(self, cls):
cls._fields[self.name] = self
def for_instance(self, instance):
if instance is None:
return self
field = copy.copy(self)
if hasattr(field.a, 'for_instance'):
field.a = field.a.for_instance(instance)
if hasattr(field.b, 'for_instance'):
field.b = field.b.for_instance(instance)
return field
def __get__(self, instance, owner):
if not instance:
return self
if self.name in instance.__dict__:
# This condition has already been processed, so don't try getting it again
return None
# Customizes the field for this particular instance
# Use field instead of self for the rest of the method
with self.for_instance(instance):
a = self.a
if hasattr(a, 'resolve'):
a = a.resolve(instance)
b = self.b
if hasattr(b, 'resolve'):
b = b.resolve(instance)
if self.compare(a, b):
# The comparison succeeded, so the fields should be processed
raw_bytes = b''
for f in self.fields:
with f.for_instance(instance):
bytes, value = f.read_value(instance)
raw_bytes += bytes
instance.__dict__[f.name] = value
f.after_decode.apply(instance, value)
instance._raw_values[self.name] = raw_bytes
return None
def __set__(self, instance, value):
instance.__dict__[self.name] = value
instance._raw_values[self.name] = b''
| |
from __future__ import absolute_import
from functools import partial
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success
from zerver.decorator import api_key_only_webhook_view, REQ, has_request_variables
from zerver.lib.webhooks.git import get_push_commits_event_message, EMPTY_SHA,\
get_remove_branch_event_message, get_pull_request_event_message,\
SUBJECT_WITH_PR_INFO_TEMPLATE
from zerver.models import Client, UserProfile
from django.http import HttpRequest, HttpResponse
from six import text_type
from typing import Dict, Any, Iterable, Optional
class UnknownEventType(Exception):
pass
def get_push_event_body(payload):
# type: (Dict[str, Any]) -> text_type
if payload.get('after') == EMPTY_SHA:
return get_remove_branch_event_body(payload)
return get_normal_push_event_body(payload)
def get_normal_push_event_body(payload):
# type: (Dict[str, Any]) -> text_type
compare_url = u'{}/compare/{}...{}'.format(
get_repository_homepage(payload),
payload['before'],
payload['after']
)
commits = [
{
'sha': commit.get('id'),
'message': commit.get('message'),
'url': commit.get('url')
}
for commit in payload.get('commits')
]
return get_push_commits_event_message(
get_user_name(payload),
compare_url,
get_branch_name(payload),
commits
)
def get_remove_branch_event_body(payload):
# type: (Dict[str, Any]) -> text_type
return get_remove_branch_event_message(
get_user_name(payload),
get_branch_name(payload)
)
def get_tag_push_event_body(payload):
# type: (Dict[str, Any]) -> text_type
return u"{} {} {} tag.".format(
get_user_name(payload),
"pushed" if payload.get('checkout_sha') else "removed",
get_tag_name(payload)
)
def get_issue_created_event_body(payload):
# type: (Dict[str, Any]) -> text_type
body = get_issue_event_body(payload, "created")
assignee_name = payload.get('assignee', {}).get('name')
if assignee_name:
body = u"{} (assigned to {}).".format(body[:-1], assignee_name)
return body
def get_issue_event_body(payload, action):
# type: (Dict[str, Any], text_type) -> text_type
return "{} {} [Issue #{}]({}).".format(
get_issue_user_name(payload),
action,
get_object_iid(payload),
get_object_url(payload)
)
def get_merge_request_updated_event_body(payload):
# type: (Dict[str, Any]) -> text_type
if payload.get('object_attributes').get('oldrev'):
return get_merge_request_event_body(payload, "added commit(s) to")
return get_merge_request_open_or_updated_body(payload, "updated")
def get_merge_request_event_body(payload, action):
# type: (Dict[str, Any], text_type) -> text_type
pull_request = payload.get('object_attributes')
return get_pull_request_event_message(
get_issue_user_name(payload),
action,
pull_request.get('url'),
type='MR',
)
def get_merge_request_open_or_updated_body(payload, action):
# type: (Dict[str, Any], text_type) -> text_type
pull_request = payload.get('object_attributes')
return get_pull_request_event_message(
get_issue_user_name(payload),
action,
pull_request.get('url'),
pull_request.get('source_branch'),
pull_request.get('target_branch'),
pull_request.get('description'),
payload.get('assignee', {}).get('username'),
type='MR',
)
def get_commented_commit_event_body(payload):
# type: (Dict[str, Any]) -> text_type
return u"{} added [comment]({}) to [Commit]({}).".format(
get_issue_user_name(payload),
payload.get('object_attributes').get('url'),
payload.get('commit').get('url')
)
def get_commented_merge_request_event_body(payload):
# type: (Dict[str, Any]) -> text_type
return u"{} added [comment]({}) to [Merge Request #{}]({}/merge_requests/{}).".format(
get_issue_user_name(payload),
payload.get('object_attributes').get('url'),
payload.get('merge_request').get('iid'),
payload.get('project').get('web_url'),
payload.get('merge_request').get('iid'),
)
def get_commented_issue_event_body(payload):
# type: (Dict[str, Any]) -> text_type
return u"{} added [comment]({}) to [Issue #{}]({}/issues/{}).".format(
get_issue_user_name(payload),
payload.get('object_attributes').get('url'),
payload.get('issue').get('iid'),
payload.get('project').get('web_url'),
payload.get('issue').get('iid'),
)
def get_commented_snippet_event_body(payload):
# type: (Dict[str, Any]) -> text_type
return u"{} added [comment]({}) to [Snippet #{}]({}/snippets/{}).".format(
get_issue_user_name(payload),
payload.get('object_attributes').get('url'),
payload.get('snippet').get('id'),
payload.get('project').get('web_url'),
payload.get('snippet').get('id'),
)
def get_wiki_page_event_body(payload, action):
# type: (Dict[str, Any], text_type) -> text_type
return u"{} {} [Wiki Page \"{}\"]({}).".format(
get_issue_user_name(payload),
action,
payload.get('object_attributes').get('title'),
payload.get('object_attributes').get('url'),
)
def get_build_hook_event_body(payload):
# type: (Dict[str, Any]) -> text_type
build_status = payload.get('build_status')
if build_status == 'created':
action = 'was created'
elif build_status == 'running':
action = 'started'
else:
action = 'changed status to {}'.format(build_status)
return u"Build {} from {} stage {}.".format(
payload.get('build_name'),
payload.get('build_stage'),
action
)
def get_pipeline_event_body(payload):
# type: (Dict[str, Any]) -> text_type
pipeline_status = payload.get('object_attributes').get('status')
if pipeline_status == 'pending':
action = 'was created'
elif pipeline_status == 'running':
action = 'started'
else:
action = 'changed status to {}'.format(pipeline_status)
builds_status = u""
for build in payload.get('builds'):
builds_status += u"* {} - {}\n".format(build.get('name'), build.get('status'))
return u"Pipeline {} with build(s):\n{}.".format(action, builds_status[:-1])
def get_repo_name(payload):
# type: (Dict[str, Any]) -> text_type
return payload['project']['name']
def get_user_name(payload):
# type: (Dict[str, Any]) -> text_type
return payload['user_name']
def get_issue_user_name(payload):
# type: (Dict[str, Any]) -> text_type
return payload['user']['name']
def get_repository_homepage(payload):
# type: (Dict[str, Any]) -> text_type
return payload['repository']['homepage']
def get_branch_name(payload):
# type: (Dict[str, Any]) -> text_type
return payload['ref'].replace('refs/heads/', '')
def get_tag_name(payload):
# type: (Dict[str, Any]) -> text_type
return payload['ref'].replace('refs/tags/', '')
def get_object_iid(payload):
# type: (Dict[str, Any]) -> text_type
return payload['object_attributes']['iid']
def get_object_url(payload):
# type: (Dict[str, Any]) -> text_type
return payload['object_attributes']['url']
EVENT_FUNCTION_MAPPER = {
'Push Hook': get_push_event_body,
'Tag Push Hook': get_tag_push_event_body,
'Issue Hook open': get_issue_created_event_body,
'Issue Hook close': partial(get_issue_event_body, action='closed'),
'Issue Hook reopen': partial(get_issue_event_body, action='reopened'),
'Issue Hook update': partial(get_issue_event_body, action='updated'),
'Note Hook Commit': get_commented_commit_event_body,
'Note Hook MergeRequest': get_commented_merge_request_event_body,
'Note Hook Issue': get_commented_issue_event_body,
'Note Hook Snippet': get_commented_snippet_event_body,
'Merge Request Hook open': partial(get_merge_request_open_or_updated_body, action='created'),
'Merge Request Hook update': get_merge_request_updated_event_body,
'Merge Request Hook merge': partial(get_merge_request_event_body, action='merged'),
'Merge Request Hook close': partial(get_merge_request_event_body, action='closed'),
'Wiki Page Hook create': partial(get_wiki_page_event_body, action='created'),
'Wiki Page Hook update': partial(get_wiki_page_event_body, action='updated'),
'Build Hook': get_build_hook_event_body,
'Pipeline Hook': get_pipeline_event_body,
}
@api_key_only_webhook_view("Gitlab")
@has_request_variables
def api_gitlab_webhook(request, user_profile, client,
stream=REQ(default='gitlab'),
payload=REQ(argument_type='body')):
# type: (HttpRequest, UserProfile, Client, text_type, Dict[str, Any]) -> HttpResponse
event = get_event(request, payload)
body = get_body_based_on_event(event)(payload)
subject = get_subject_based_on_event(event, payload)
check_send_message(user_profile, client, 'stream', [stream], subject, body)
return json_success()
def get_body_based_on_event(event):
# type: (str) -> Any
return EVENT_FUNCTION_MAPPER[event]
def get_subject_based_on_event(event, payload):
# type: (str, Dict[str, Any]) -> text_type
if event == 'Push Hook':
return u"{} / {}".format(get_repo_name(payload), get_branch_name(payload))
elif event == 'Build Hook':
return u"{} / {}".format(payload.get('repository').get('name'), get_branch_name(payload))
elif event == 'Pipeline Hook':
return u"{} / {}".format(
get_repo_name(payload),
payload.get('object_attributes').get('ref').replace('refs/heads/', ''))
elif event.startswith('Merge Request Hook'):
return SUBJECT_WITH_PR_INFO_TEMPLATE.format(
repo=get_repo_name(payload),
type='MR',
id=payload.get('object_attributes').get('iid'),
title=payload.get('object_attributes').get('title')
)
return get_repo_name(payload)
def get_event(request, payload):
# type: (HttpRequest, Dict[str, Any]) -> str
event = request.META['HTTP_X_GITLAB_EVENT']
if event == 'Issue Hook':
action = payload.get('object_attributes').get('action')
event = "{} {}".format(event, action)
elif event == 'Note Hook':
action = payload.get('object_attributes').get('noteable_type')
event = "{} {}".format(event, action)
elif event == 'Merge Request Hook':
action = payload.get('object_attributes').get('action')
event = "{} {}".format(event, action)
elif event == 'Wiki Page Hook':
action = payload.get('object_attributes').get('action')
event = "{} {}".format(event, action)
if event in list(EVENT_FUNCTION_MAPPER.keys()):
return event
raise UnknownEventType(u'Event {} is unknown and cannot be handled'.format(event))
| |
# -*- coding: utf-8 -*-
"""
profiling.__main__
~~~~~~~~~~~~~~~~~~
The command-line interface to profile a script or view profiling results.
.. sourcecode:: console
$ python -m profiling --help
"""
from __future__ import absolute_import
from collections import OrderedDict
from datetime import datetime
import importlib
import os
try:
import cPickle as pickle
except ImportError:
import pickle
import signal
import socket
from stat import S_ISREG, S_ISSOCK
import sys
import threading
import traceback
import click
from six import PY2, exec_
from .profiler import Profiler
from .remote import INTERVAL, PICKLE_PROTOCOL, recv_stats
from .remote.background import BackgroundProfiler
from .remote.select import SelectProfilingServer
from .viewer import StatisticsViewer
__all__ = ['main', 'profile', 'view']
@click.group()
def main():
pass
def get_title(src_name, src_type=None):
"""Normalizes a source name as a string to be used for viewer's title."""
if src_type == 'tcp':
return '{0}:{1}'.format(*src_name)
return os.path.basename(src_name)
def make_viewer(mono=False):
"""Makes a :class:`profiling.viewer.StatisticsViewer` with common options.
"""
viewer = StatisticsViewer()
viewer.use_vim_command_map()
viewer.use_game_command_map()
loop = viewer.loop()
if mono:
loop.screen.set_terminal_properties(1)
return (viewer, loop)
def spawn_thread(func, *args, **kwargs):
"""Spawns a daemon thread. The thread executes the given function by the
given arguments.
"""
thread = threading.Thread(target=func, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return thread
#: Just returns the first argument.
noop = lambda x: x
# custom parameter types
class Script(click.File):
"""A parameter type for Python script."""
def __init__(self):
super(Script, self).__init__('rb')
def convert(self, value, param, ctx):
with super(Script, self).convert(value, param, ctx) as f:
filename = f.name
code = compile(f.read(), filename, 'exec')
globals_ = {'__file__': filename,
'__name__': '__main__',
'__package__': None}
return (filename, code, globals_)
def get_metavar(self, param):
return 'PYTHON'
class Timer(click.ParamType):
"""A parameter type to choose profiling timer."""
timers = OrderedDict([
# timer name: (timer module name, timer class name)
(None, ('.timers', 'Timer')),
('thread', ('.timers.thread', 'ThreadTimer')),
('yappi', ('.timers.thread', 'YappiTimer')),
('greenlet', ('.timers.greenlet', 'GreenletTimer')),
])
def import_timer_class(self, name):
try:
module_name, class_name = self.timers[name]
except KeyError:
raise ValueError('No such timer: {0}'.format(name))
module = importlib.import_module(module_name, __package__)
timer_class = getattr(module, class_name)
return timer_class
def convert(self, value, param, ctx):
if value == 'default':
value = None
timer_class = self.import_timer_class(value)
return timer_class()
def get_metavar(self, param):
return 'TIMER'
class Address(click.ParamType):
"""A parameter type for IP address."""
def convert(self, value, param, ctx):
host, port = value.split(':')
port = int(port)
return (host, port)
def get_metavar(self, param):
return 'HOST:PORT'
class ViewerSource(click.ParamType):
"""A parameter type for :class:`profiling.viewer.StatisticsViewer` source.
"""
def convert(self, value, param, ctx):
src_type = False
try:
mode = os.stat(value).st_mode
except OSError:
try:
src_name = Address().convert(value, param, ctx)
except ValueError:
pass
else:
src_type = 'tcp'
else:
src_name = value
if S_ISSOCK(mode):
src_type = 'sock'
elif S_ISREG(mode):
src_type = 'dump'
if not src_type:
raise ValueError('A dump file or a socket addr required.')
return (src_type, src_name)
def get_metavar(self, param):
return 'SOURCE'
class SignalNumber(click.IntRange):
"""A parameter type for signal number."""
def __init__(self):
super(SignalNumber, self).__init__(0, 255)
def get_metavar(self, param):
return 'SIGNO'
# common parameters
class Params(object):
def __init__(self, params):
self.params = params
def __call__(self, f):
for param in self.params:
f = param(f)
return f
def extend(self, params):
return type(self)(self.params + params)
profiler_params = Params([
click.argument('script', type=Script()),
click.option('-t', '--timer', type=Timer(),
help='Choose CPU time measurer.'),
click.option('--pickle-protocol', type=int, default=PICKLE_PROTOCOL,
help='Pickle protocol to dump profiling result.'),
])
live_profiler_params = profiler_params.extend([
click.option('-i', '--interval', type=float, default=INTERVAL,
help='How often update the profiling result.'),
])
viewer_params = Params([
click.option('--mono', is_flag=True, help='Disable coloring.'),
])
# sub-commands
@main.command()
@profiler_params
@click.option('-d', '--dump', 'dump_filename',
type=click.Path(writable=True),
help='Profiling result dump filename.')
@viewer_params
def profile(script, timer, pickle_protocol, dump_filename, mono):
"""Profile a Python script."""
filename, code, globals_ = script
sys.argv[:] = [filename]
# start profiling.
frame = sys._getframe()
profiler = Profiler(timer, top_frame=frame, top_code=code)
profiler.start()
# exec the script.
try:
exec_(code, globals_)
except:
# don't profile print_exc().
profiler.stop()
traceback.print_exc()
else:
profiler.stop()
if PY2:
# in Python 2, exec's cpu time is duplicated with actual cpu time.
stat = profiler.stats.get_child(frame.f_code)
stat.remove_child(exec_.func_code)
if dump_filename is None:
# show the result using a viewer.
viewer, loop = make_viewer(mono)
viewer.set_stats(profiler.stats, get_title(filename))
try:
loop.run()
except KeyboardInterrupt:
pass
else:
# save the result.
stats = profiler.result()
with open(dump_filename, 'w') as f:
pickle.dump(stats, f, pickle_protocol)
click.echo('To view statistics:')
click.echo(' $ python -m profiling view ', nl=False)
click.secho(dump_filename, underline=True)
@main.command('live-profile')
@live_profiler_params
@viewer_params
def live_profile(script, timer, interval, pickle_protocol, mono):
"""Profile a Python script continuously."""
filename, code, globals_ = script
sys.argv[:] = [filename]
parent_sock, child_sock = socket.socketpair()
pid = os.fork()
sys.stderr.write('hello')
if pid == 0:
# child
devnull = os.open(os.devnull, os.O_RDWR)
for f in [sys.stdin, sys.stdout, sys.stderr]:
os.dup2(devnull, f.fileno())
frame = sys._getframe()
profiler = BackgroundProfiler(timer, frame, code)
profiler.prepare()
server_args = (noop, interval, pickle_protocol)
server = SelectProfilingServer(None, profiler, *server_args)
server.clients.add(child_sock)
spawn_thread(server.connected, child_sock)
try:
exec_(code, globals_)
finally:
child_sock.close()
else:
# parent
viewer, loop = make_viewer(mono)
title = get_title(filename)
client = ProfilingClient(viewer, loop.event_loop, parent_sock, title)
client.start()
try:
loop.run()
except KeyboardInterrupt:
pass
finally:
parent_sock.close()
os.kill(pid, signal.SIGINT)
@main.command('remote-profile')
@live_profiler_params
@click.option('-b', '--bind', 'addr', type=Address(), default='127.0.0.1:8912',
help='IP address to serve profiling results.')
@click.option('--start-signo', type=SignalNumber(), default=signal.SIGUSR1)
@click.option('--stop-signo', type=SignalNumber(), default=signal.SIGUSR2)
@click.option('-v', '--verbose', is_flag=True,
help='Print profiling server logs.')
def remote_profile(script, timer, interval, pickle_protocol,
addr, start_signo, stop_signo, verbose):
"""Launch a server to profile continuously. The default address is
127.0.0.1:8912.
"""
filename, code, globals_ = script
sys.argv[:] = [filename]
# create listener.
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listener.bind(addr)
listener.listen(1)
# be verbose or quiet.
if verbose:
log = lambda x: click.echo(click.style(' > ', fg='cyan') + x)
bound_addr = listener.getsockname()
log('Listening on {0}:{1} for profiling...'.format(*bound_addr))
else:
log = noop
# start profiling server.
frame = sys._getframe()
profiler = BackgroundProfiler(timer, frame, code, start_signo, stop_signo)
profiler.prepare()
server_args = (log, interval, pickle_protocol)
server = SelectProfilingServer(listener, profiler, *server_args)
spawn_thread(server.serve_forever)
# exec the script.
try:
exec_(code, globals_)
except KeyboardInterrupt:
pass
@main.command()
@click.argument('src', type=ViewerSource())
@viewer_params
def view(src, mono):
"""Inspect statistics by TUI view."""
src_type, src_name = src
title = get_title(src_name, src_type)
viewer, loop = make_viewer(mono)
if src_type == 'dump':
with open(src_name) as f:
stats = pickle.load(f)
time = datetime.fromtimestamp(os.path.getmtime(src_name))
viewer.set_stats(stats, title, time)
elif src_type in ('tcp', 'sock'):
family = {'tcp': socket.AF_INET, 'sock': socket.AF_UNIX}[src_type]
client = FailoverProfilingClient(
viewer, loop.event_loop, src_name, family, title=title)
client.start()
try:
loop.run()
except KeyboardInterrupt:
pass
# profiling clients for urwid
class ProfilingClient(object):
"""A client of profiling server which is running behind the `Urwid`_ event
loop.
.. _Urwid: http://urwid.org/
"""
def __init__(self, viewer, event_loop, sock, title=None):
self.viewer = viewer
self.event_loop = event_loop
self.sock = sock
self.title = title
def start(self):
self.viewer.activate()
self.event_loop.watch_file(self.sock.fileno(), self.handle)
def handle(self):
self.viewer.activate()
try:
stats = recv_stats(self.sock)
except socket.error as exc:
self.erred(exc.errno)
return
self.set_stats(stats)
def erred(self, errno):
self.viewer.inactivate()
def set_stats(self, stats):
self.viewer.set_stats(stats, self.title, datetime.now())
class FailoverProfilingClient(ProfilingClient):
"""A profiling client but it tries to reconnect constantly."""
failover_interval = 1
def __init__(self, viewer, event_loop, addr=None, family=socket.AF_INET,
title=None):
self.addr = addr
self.family = family
base = super(FailoverProfilingClient, self)
base.__init__(viewer, event_loop, None, title)
def connect(self):
errno = self.sock.connect_ex(self.addr)
if errno == 0:
# connected immediately.
pass
elif errno == 115:
# will be connected.
pass
elif errno == 2:
# no such socket file.
self.create_connection(self.failover_interval)
return
else:
raise ValueError('Unexpected socket errno: {0}'.format(errno))
self.event_loop.watch_file(self.sock.fileno(), self.handle)
def disconnect(self, errno):
self.event_loop.remove_watch_file(self.sock.fileno())
self.sock.close()
# try to reconnect.
self.create_connection(self.failover_interval if errno == 111 else 0)
def create_connection(self, delay=0):
self.sock = socket.socket(self.family)
self.sock.setblocking(0)
self.event_loop.alarm(delay, self.connect)
def start(self):
self.create_connection()
def erred(self, errno):
super(FailoverProfilingClient, self).erred(errno)
self.disconnect(errno)
if __name__ == '__main__':
main()
| |
"""
Custom exception classes.
These vary in use case from "we needed a specific data structure layout in
exceptions used for message-passing" to simply "we needed to express an error
condition in a way easily told apart from other, truly unexpected errors".
"""
from traceback import format_exception
from pprint import pformat
from .util import six
from .util import encode_output
class CollectionNotFound(Exception):
def __init__(self, name, start):
self.name = name
self.start = start
class Failure(Exception):
"""
Exception subclass representing failure of a command execution.
"Failure" may mean the command executed and the shell indicated an unusual
result (usually, a non-zero exit code), or it may mean something else, like
a ``sudo`` command which was aborted when the supplied password failed
authentication.
Two attributes allow introspection to determine the nature of the problem:
* ``result``: a `.Result` instance with info about the command being
executed and, if it ran to completion, how it exited.
* ``reason``: ``None``, if the command finished; or an exception instance
if e.g. a `.StreamWatcher` raised `WatcherError`.
This class is only rarely raised by itself; most of the time `.Runner.run`
(or a wrapper of same, such as `.Context.sudo`) will raise a specific
subclass like `UnexpectedExit` or `AuthFailure`.
"""
def __init__(self, result, reason=None):
self.result = result
self.reason = reason
def _tail(stream):
# TODO: make configurable
# TODO: preserve alternate line endings? Mehhhh
tail = "\n\n" + "\n".join(stream.splitlines()[-10:])
# NOTE: no trailing \n preservation; easier for below display if normalized
return tail
class UnexpectedExit(Failure):
"""
A shell command ran to completion but exited with an unexpected exit code.
Its string representation displays the following:
- Command executed;
- Exit code;
- The last 10 lines of stdout, if it was hidden;
- The last 10 lines of stderr, if it was hidden and non-empty (e.g.
pty=False; when pty=True, stderr never happens.)
"""
def __str__(self):
already_printed = ' already printed'
if 'stdout' not in self.result.hide:
stdout = already_printed
else:
stdout = encode_output(
_tail(self.result.stdout),
self.result.encoding,
)
if self.result.pty:
stderr = " n/a (PTYs have no stderr)"
else:
if 'stderr' not in self.result.hide:
stderr = already_printed
else:
stderr = encode_output(
_tail(self.result.stderr),
self.result.encoding,
)
command = self.result.command
exited = self.result.exited
template = """Encountered a bad command exit code!
Command: {!r}
Exit code: {}
Stdout:{}
Stderr:{}
"""
return template.format(command, exited, stdout, stderr)
def __repr__(self):
# TODO: expand?
template = "<{}: cmd={!r} exited={}>"
return template.format(
self.__class__.__name__,
self.result.command,
self.result.exited,
)
class AuthFailure(Failure):
"""
An authentication failure, e.g. due to an incorrect ``sudo`` password.
.. note::
`.Result` objects attached to these exceptions typically lack exit code
information, since the command was never fully executed - the exception
was raised instead.
"""
def __init__(self, result, prompt):
self.result = result
self.prompt = prompt
def __str__(self):
err = "The password submitted to prompt {!r} was rejected."
return err.format(self.prompt)
class ParseError(Exception):
"""
An error arising from the parsing of command-line flags/arguments.
Ambiguous input, invalid task names, invalid flags, etc.
"""
def __init__(self, msg, context=None):
super(ParseError, self).__init__(msg)
self.context = context
class Exit(Exception):
"""
Simple stand-in for SystemExit that lets us gracefully exit.
Removes lots of scattered sys.exit calls, improves testability.
"""
def __init__(self, code=0):
self.code = code
class PlatformError(Exception):
"""
Raised when an illegal operation occurs for the current platform.
E.g. Windows users trying to use functionality requiring the ``pty``
module.
Typically used to present a clearer error message to the user.
"""
pass
class AmbiguousEnvVar(Exception):
"""
Raised when loading env var config keys has an ambiguous target.
"""
pass
class UncastableEnvVar(Exception):
"""
Raised on attempted env var loads whose default values are too rich.
E.g. trying to stuff ``MY_VAR="foo"`` into ``{'my_var': ['uh', 'oh']}``
doesn't make any sense until/if we implement some sort of transform option.
"""
pass
class UnknownFileType(Exception):
"""
A config file of an unknown type was specified and cannot be loaded.
"""
pass
def _printable_kwargs(kwargs):
"""
Return print-friendly version of a thread-related ``kwargs`` dict.
Extra care is taken with ``args`` members which are very long iterables -
those need truncating to be useful.
"""
printable = {}
for key, value in six.iteritems(kwargs):
item = value
if key == 'args':
item = []
for arg in value:
new_arg = arg
if hasattr(arg, '__len__') and len(arg) > 10:
msg = "<... remainder truncated during error display ...>"
new_arg = arg[:10] + [msg]
item.append(new_arg)
printable[key] = item
return printable
class ThreadException(Exception):
"""
One or more exceptions were raised within background threads.
The real underlying exceptions are stored in the `exceptions` attribute;
see its documentation for data structure details.
.. note::
Threads which did not encounter an exception, do not contribute to this
exception object and thus are not present inside `exceptions`.
"""
#: A tuple of `ExceptionWrappers <invoke.util.ExceptionWrapper>` containing
#: the initial thread constructor kwargs (because `threading.Thread`
#: subclasses should always be called with kwargs) and the caught exception
#: for that thread as seen by `sys.exc_info` (so: type, value, traceback).
#:
#: .. note::
#: The ordering of this attribute is not well-defined.
#:
#: .. note::
#: Thread kwargs which appear to be very long (e.g. IO
#: buffers) will be truncated when printed, to avoid huge
#: unreadable error display.
exceptions = tuple()
def __init__(self, exceptions):
self.exceptions = tuple(exceptions)
def __str__(self):
details = []
for x in self.exceptions:
# Build useful display
detail = "Thread args: {}\n\n{}"
details.append(detail.format(
pformat(_printable_kwargs(x.kwargs)),
"\n".join(format_exception(x.type, x.value, x.traceback)),
))
args = (
len(self.exceptions),
", ".join(x.type.__name__ for x in self.exceptions),
"\n\n".join(details),
)
return """
Saw {} exceptions within threads ({}):
{}
""".format(*args)
class WatcherError(Exception):
"""
Generic parent exception class for `.StreamWatcher`-related errors.
Typically, one of these exceptions indicates a `.StreamWatcher` noticed
something anomalous in an output stream, such as an authentication response
failure.
`.Runner` catches these and attaches them to `.Failure` exceptions so they
can be referenced by intermediate code and/or act as extra info for end
users.
"""
pass
class ResponseNotAccepted(WatcherError):
"""
A responder/watcher class noticed a 'bad' response to its submission.
Mostly used by `.FailingResponder` and subclasses, e.g. "oh dear I
autosubmitted a sudo password and it was incorrect."
"""
pass
| |
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
This script can be used to make the output from
apache::thrift::profile_print_info() more human-readable.
It translates each executable file name and address into the corresponding
source file name, line number, and function name. By default, it also
demangles C++ symbol names.
"""
import optparse
import os
import re
import subprocess
import sys
class AddressInfo(object):
"""
A class to store information about a particular address in an object file.
"""
def __init__(self, obj_file, address):
self.objectFile = obj_file
self.address = address
self.sourceFile = None
self.sourceLine = None
self.funtion = None
g_addrs_by_filename = {}
def get_address(filename, address):
"""
Retrieve an AddressInfo object for the specified object file and address.
Keeps a global list of AddressInfo objects. Two calls to get_address()
with the same filename and address will always return the same AddressInfo
object.
"""
global g_addrs_by_filename
try:
by_address = g_addrs_by_filename[filename]
except KeyError:
by_address = {}
g_addrs_by_filename[filename] = by_address
try:
addr_info = by_address[address]
except KeyError:
addr_info = AddressInfo(filename, address)
by_address[address] = addr_info
return addr_info
def translate_file_addresses(filename, addresses, options):
"""
Use addr2line to look up information for the specified addresses.
All of the addresses must belong to the same object file.
"""
# Do nothing if we can't find the file
if not os.path.isfile(filename):
return
args = ['addr2line']
if options.printFunctions:
args.append('-f')
args.extend(['-e', filename])
proc = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
for address in addresses:
assert address.objectFile == filename
proc.stdin.write(address.address + '\n')
if options.printFunctions:
function = proc.stdout.readline()
function = function.strip()
if not function:
raise Exception('unexpected EOF from addr2line')
address.function = function
file_and_line = proc.stdout.readline()
file_and_line = file_and_line.strip()
if not file_and_line:
raise Exception('unexpected EOF from addr2line')
idx = file_and_line.rfind(':')
if idx < 0:
msg = 'expected file and line number from addr2line; got %r' % \
(file_and_line,)
msg += '\nfile=%r, address=%r' % (filename, address.address)
raise Exception(msg)
address.sourceFile = file_and_line[:idx]
address.sourceLine = file_and_line[idx+1:]
(remaining_out, cmd_err) = proc.communicate()
retcode = proc.wait()
if retcode != 0:
raise subprocess.CalledProcessError(retcode, args)
def lookup_addresses(options):
"""
Look up source file information for all of the addresses currently stored
in the global list of AddressInfo objects.
"""
global g_addrs_by_filename
for (file, addresses) in g_addrs_by_filename.items():
translate_file_addresses(file, addresses.values(), options)
class Entry(object):
"""
An entry in the thrift profile output.
Contains a header line, and a backtrace.
"""
def __init__(self, header):
self.header = header
self.bt = []
def addFrame(self, filename, address):
# If libc was able to determine the symbols names, the filename
# argument will be of the form <filename>(<function>+<offset>)
# So, strip off anything after the last '('
idx = filename.rfind('(')
if idx >= 0:
filename = filename[:idx]
addr = get_address(filename, address)
self.bt.append(addr)
def write(self, f, options):
f.write(self.header)
f.write('\n')
n = 0
for address in self.bt:
f.write(' #%-2d %s:%s\n' % (n, address.sourceFile,
address.sourceLine))
n += 1
if options.printFunctions:
if address.function:
f.write(' %s\n' % (address.function,))
else:
f.write(' ??\n')
def process_file(in_file, out_file, options):
"""
Read thrift profile output from the specified input file, and print
prettier information on the output file.
"""
#
# A naive approach would be to read the input line by line,
# and each time we come to a filename and address, pass it to addr2line
# and print the resulting information. Unfortunately, addr2line can be
# quite slow, especially with large executables.
#
# This approach is much faster. We read in all of the input, storing
# the addresses in each file that need to be resolved. We then call
# addr2line just once for each file. This is much faster than calling
# addr2line once per address.
#
virt_call_regex = re.compile(r'^\s*T_VIRTUAL_CALL: (\d+) calls on (.*):$')
gen_prot_regex = re.compile(
r'^\s*T_GENERIC_PROTOCOL: (\d+) calls to (.*) with a (.*):$')
bt_regex = re.compile(r'^\s*#(\d+)\s*(.*) \[(0x[0-9A-Za-z]+)\]$')
# Parse all of the input, and store it as Entry objects
entries = []
current_entry = None
while True:
line = in_file.readline()
if not line:
break
if line == '\n' or line.startswith('Thrift virtual call info:'):
continue
virt_call_match = virt_call_regex.match(line)
if virt_call_match:
num_calls = int(virt_call_match.group(1))
type_name = virt_call_match.group(2)
if options.cxxfilt:
# Type names reported by typeid() are internal names.
# By default, c++filt doesn't demangle internal type names.
# (Some versions of c++filt have a "-t" option to enable this.
# Other versions don't have this argument, but demangle type
# names passed as an argument, but not on stdin.)
#
# If the output is being filtered through c++filt, prepend
# "_Z" to the type name to make it look like an external name.
type_name = '_Z' + type_name
header = 'T_VIRTUAL_CALL: %d calls on "%s"' % \
(num_calls, type_name)
if current_entry is not None:
entries.append(current_entry)
current_entry = Entry(header)
continue
gen_prot_match = gen_prot_regex.match(line)
if gen_prot_match:
num_calls = int(gen_prot_match.group(1))
type_name1 = gen_prot_match.group(2)
type_name2 = gen_prot_match.group(3)
if options.cxxfilt:
type_name1 = '_Z' + type_name1
type_name2 = '_Z' + type_name2
header = 'T_GENERIC_PROTOCOL: %d calls to "%s" with a "%s"' % \
(num_calls, type_name1, type_name2)
if current_entry is not None:
entries.append(current_entry)
current_entry = Entry(header)
continue
bt_match = bt_regex.match(line)
if bt_match:
if current_entry is None:
raise Exception('found backtrace frame before entry header')
frame_num = int(bt_match.group(1))
filename = bt_match.group(2)
address = bt_match.group(3)
current_entry.addFrame(filename, address)
continue
raise Exception('unexpected line in input: %r' % (line,))
# Add the last entry we were processing to the list
if current_entry is not None:
entries.append(current_entry)
current_entry = None
# Look up all of the addresses
lookup_addresses(options)
# Print out the entries, now that the information has been translated
for entry in entries:
entry.write(out_file, options)
out_file.write('\n')
def start_cppfilt():
(read_pipe, write_pipe) = os.pipe()
# Fork. Run c++filt in the parent process,
# and then continue normal processing in the child.
pid = os.fork()
if pid == 0:
# child
os.dup2(write_pipe, sys.stdout.fileno())
os.close(read_pipe)
os.close(write_pipe)
return
else:
# parent
os.dup2(read_pipe, sys.stdin.fileno())
os.close(read_pipe)
os.close(write_pipe)
cmd = ['c++filt']
os.execvp(cmd[0], cmd)
def main(argv):
parser = optparse.OptionParser(usage='%prog [options] [<file>]')
parser.add_option('--no-functions', help='Don\'t print function names',
dest='printFunctions', action='store_false',
default=True)
parser.add_option('--no-demangle',
help='Don\'t demangle C++ symbol names',
dest='cxxfilt', action='store_false',
default=True)
(options, args) = parser.parse_args(argv[1:])
num_args = len(args)
if num_args == 0:
in_file = sys.stdin
elif num_args == 1:
in_file = open(argv[1], 'r')
else:
parser.print_usage(sys.stderr)
print >> sys.stderr, 'trailing arguments: %s' % (' '.join(args[1:],))
return 1
if options.cxxfilt:
start_cppfilt()
process_file(in_file, sys.stdout, options)
if __name__ == '__main__':
rc = main(sys.argv)
sys.exit(rc)
| |
#
# The MIT License (MIT)
#
# Copyright (c) 2018 Rafid Khalid Al-Humaimidi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""
Django settings for HadithHouseApi project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import socket
import sys
from enum import Enum
import time
from django.utils.deprecation import MiddlewareMixin
from HadithHouseApi.server_settings import get_db_settings, get_debug, \
get_allowed_hosts
test_mode = 'test' in sys.argv
collectstatic_mode = 'collectstatic' in sys.argv
migrate_mode = 'migrate' in sys.argv
OFFLINE_MODE = False
class JavaScriptFrameworkMode(Enum):
"""
Specifies the JS framework to use. This is temporary while I try different
JS frameworks to replace AngularJS.
"""
ANGULARJS = 0
REACTJS = 1
ANGULAR = 2
def __int__(self):
return self.value
JS_FRAMEWORK_MODE = JavaScriptFrameworkMode.ANGULARJS
def is_test_mode():
"""
Determines whether the applicaiton is being currently running in test mode,
i.e. python manage.py test.
:return: True or false.
"""
return test_mode
def is_offline_mode():
"""
Determines whether the application is running in offline mode. In this mode,
the website uses locally fetched JS libraries and uses a sqlite local
database.
:return: True or False.
"""
return OFFLINE_MODE
def is_collectstatic_mode():
"""
Determines whether the code is being executed during a call to
"python manage.py collectstatic".
:return: True or false.
"""
return collectstatic_mode
def is_migrate_mode():
"""
Determines whether the code is being executed during a call to
"python manage.py migrate".
:return: True or false.
"""
return migrate_mode
if is_test_mode() or is_collectstatic_mode():
# We are running in test mode or collecting static files. Hence, avoid using
# the real log directory to avoid breaking Jenkins build, as there is no
# log directory on Jenkins.
import tempfile
def get_log_dir():
return tempfile.gettempdir()
else:
from HadithHouseApi.server_settings import get_log_dir
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(8rs1@c-&_9z(8ur%ydax^gf-p5)58y%94huyaa2&p1b-%1uwj'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = get_debug()
DJANGO_LOG_LEVEL = DEBUG
ALLOWED_HOSTS = get_allowed_hosts()
PRODUCTION_HOSTS = (
'www.hadithhouse.net',
)
DEVELOPMENT_HOSTS = (
'dev.hadithhouse.net',
)
def get_environment():
host = socket.getfqdn().lower()
if host in PRODUCTION_HOSTS:
return 'production'
elif host in DEVELOPMENT_HOSTS:
return 'development'
else:
return 'local'
SERVER_EMAIL = 'noreply@hadithhouse.net'
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'django_filters',
'corsheaders',
'hadiths',
)
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'HadithHouseApi.urls'
APPEND_SLASH = False
WSGI_APPLICATION = 'HadithHouseApi.wsgi.application'
CORS_ORIGIN_ALLOW_ALL = False
CORS_ORIGIN_REGEX_WHITELIST = [
'^(https?://)?(\w+\.)?hadithhouse\.net(:(8080|8000))?/?(.+)?$'
]
ADMINS = (
('Rafid Al-Humaimidi', 'admin@hadithhouse.net'),
)
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
# This
if is_test_mode():
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'HadithHouse-Test.db'),
}
}
elif is_offline_mode():
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'HadithHouse-OfflineMode.db'),
}
}
else:
DATABASES = get_db_settings()
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
# insert your TEMPLATE_DIRS here
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
'debug': get_debug()
},
},
]
if is_test_mode() or is_collectstatic_mode() or is_migrate_mode():
# To separate log files generated during test/db-migration/etc from real
# log files, we use different names. In addition to the benefit of
# isolation, the main benefit of this is to avoid permission-related
# errors caused by trying to access log files with the same
# name but owned by a different account. For example, during deployment,
# a log file named 'django.log' would be produced and owned by 'deployer'.
# Now when apache2 starts and try to access the same file, a permission
# error is generated. Thus, it is essential to name the files produced by
# these two scenarios differently.
mode = 'test' if is_test_mode() \
else 'migrate' if is_migrate_mode() \
else 'collectstatic' if is_collectstatic_mode() \
else 'other'
timestamp = str(int(time.time() * 1000))
django_log_filename = os.path.join(
get_log_dir(), 'django.%s.%s.log' % (mode, timestamp))
django_requests_log_filename = os.path.join(
get_log_dir(), 'django.%s.%s.request.log' % (mode, timestamp))
django_db_backends_log_file = os.path.join(
get_log_dir(), 'django.%s.%s.db.backends.log' % (mode, timestamp))
else:
django_log_filename = os.path.join(
get_log_dir(), 'django.log')
django_requests_log_filename = os.path.join(
get_log_dir(), 'django.request.log')
django_db_backends_log_file = os.path.join(
get_log_dir(), 'django.db.backends.log')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(asctime)s %(message)s'
},
},
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'django_log_file': {
'level': 'DEBUG',
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': django_log_filename,
'when': 'D',
'interval': 1,
'backupCount': 30,
'utc': True,
'formatter': 'simple',
'encoding': 'utf-8'
},
'django_requests_log_file': {
'level': 'DEBUG',
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': django_requests_log_filename,
'when': 'D',
'interval': 1,
'backupCount': 30,
'utc': True,
'formatter': 'simple',
'encoding': 'utf-8'
},
'django_db_backends_log_file': {
'level': 'DEBUG',
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': django_db_backends_log_file,
'when': 'D',
'interval': 1,
'backupCount': 30,
'utc': True,
'formatter': 'simple',
'encoding': 'utf-8'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django': {
'handlers': ['django_log_file'],
'propagate': True,
},
'django.request': {
'handlers': ['django_requests_log_file', 'mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'django.db.backends': {
'level': 'DEBUG',
'handlers': ['django_db_backends_log_file'],
'propagate': False,
},
}
}
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.backends.DjangoFilterBackend',),
'DEFAULT_PAGINATION_CLASS': 'hadiths.pagination.DefaultPagination',
'PAGE_SIZE': 10,
'DEFAULT_AUTHENTICATION_CLASSES': (
'hadiths.auth.FacebookAuthentication',
),
'EXCEPTION_HANDLER': 'HadithHouseApi.exception_handler.hadithhouse_exception_handler'
}
if OFFLINE_MODE:
REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'] = (
'hadiths.auth.FacebookOfflineAuthentication',)
| |
"""Python Enumerations"""
import sys as _sys
__all__ = ['Enum', 'IntEnum', 'unique']
version = 1, 1, 3
pyver = float('%s.%s' % _sys.version_info[:2])
try:
any
except NameError:
def any(iterable):
for element in iterable:
if element:
return True
return False
try:
from collections import OrderedDict
except ImportError:
OrderedDict = None
try:
basestring
except NameError:
# In Python 2 basestring is the ancestor of both str and unicode
# in Python 3 it's just str, but was missing in 3.1
basestring = str
try:
unicode
except NameError:
# In Python 3 unicode no longer exists (it's just str)
unicode = str
class _RouteClassAttributeToGetattr(object):
"""Route attribute access on a class to __getattr__.
This is a descriptor, used to define attributes that act differently when
accessed through an instance and through a class. Instance access remains
normal, but access to an attribute through a class will be routed to the
class's __getattr__ method; this is done by raising AttributeError.
"""
def __init__(self, fget=None):
self.fget = fget
def __get__(self, instance, ownerclass=None):
if instance is None:
raise AttributeError()
return self.fget(instance)
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def __delete__(self, instance):
raise AttributeError("can't delete attribute")
def _is_descriptor(obj):
"""Returns True if obj is a descriptor, False otherwise."""
return (
hasattr(obj, '__get__') or
hasattr(obj, '__set__') or
hasattr(obj, '__delete__'))
def _is_dunder(name):
"""Returns True if a __dunder__ name, False otherwise."""
return (name[:2] == name[-2:] == '__' and
name[2:3] != '_' and
name[-3:-2] != '_' and
len(name) > 4)
def _is_sunder(name):
"""Returns True if a _sunder_ name, False otherwise."""
return (name[0] == name[-1] == '_' and
name[1:2] != '_' and
name[-2:-1] != '_' and
len(name) > 2)
def _make_class_unpicklable(cls):
"""Make the given class un-picklable."""
def _break_on_call_reduce(self, protocol=None):
raise TypeError('%r cannot be pickled' % self)
cls.__reduce_ex__ = _break_on_call_reduce
cls.__module__ = '<unknown>'
class _EnumDict(dict):
"""Track enum member order and ensure member names are not reused.
EnumMeta will use the names found in self._member_names as the
enumeration member names.
"""
def __init__(self):
super(_EnumDict, self).__init__()
self._member_names = []
def __setitem__(self, key, value):
"""Changes anything not dundered or not a descriptor.
If a descriptor is added with the same name as an enum member, the name
is removed from _member_names (this may leave a hole in the numerical
sequence of values).
If an enum member name is used twice, an error is raised; duplicate
values are not checked for.
Single underscore (sunder) names are reserved.
Note: in 3.x __order__ is simply discarded as a not necessary piece
leftover from 2.x
"""
if pyver >= 3.0 and key == '__order__':
return
if _is_sunder(key):
raise ValueError('_names_ are reserved for future Enum use')
elif _is_dunder(key):
pass
elif key in self._member_names:
# descriptor overwriting an enum?
raise TypeError('Attempted to reuse key: %r' % key)
elif not _is_descriptor(value):
if key in self:
# enum overwriting a descriptor?
raise TypeError('Key already defined as: %r' % self[key])
self._member_names.append(key)
super(_EnumDict, self).__setitem__(key, value)
# Dummy value for Enum as EnumMeta explicity checks for it, but of course until
# EnumMeta finishes running the first time the Enum class doesn't exist. This
# is also why there are checks in EnumMeta like `if Enum is not None`
Enum = None
class EnumMeta(type):
"""Metaclass for Enum"""
@classmethod
def __prepare__(metacls, cls, bases):
return _EnumDict()
def __new__(metacls, cls, bases, classdict):
# an Enum class is final once enumeration items have been defined; it
# cannot be mixed with other types (int, float, etc.) if it has an
# inherited __new__ unless a new __new__ is defined (or the resulting
# class will fail).
if type(classdict) is dict:
original_dict = classdict
classdict = _EnumDict()
for k, v in original_dict.items():
classdict[k] = v
member_type, first_enum = metacls._get_mixins_(bases)
__new__, save_new, use_args = metacls._find_new_(classdict, member_type,
first_enum)
# save enum items into separate mapping so they don't get baked into
# the new class
members = dict((k, classdict[k]) for k in classdict._member_names)
for name in classdict._member_names:
del classdict[name]
# py2 support for definition order
__order__ = classdict.get('__order__')
if __order__ is None:
if pyver < 3.0:
try:
__order__ = [name for (name, value) in sorted(members.items(), key=lambda item: item[1])]
except TypeError:
__order__ = [name for name in sorted(members.keys())]
else:
__order__ = classdict._member_names
else:
del classdict['__order__']
if pyver < 3.0:
__order__ = __order__.replace(',', ' ').split()
aliases = [name for name in members if name not in __order__]
__order__ += aliases
# check for illegal enum names (any others?)
invalid_names = set(members) & set(['mro'])
if invalid_names:
raise ValueError('Invalid enum member name(s): %s' % (
', '.join(invalid_names), ))
# save attributes from super classes so we know if we can take
# the shortcut of storing members in the class dict
base_attributes = set([a for b in bases for a in b.__dict__])
# create our new Enum type
enum_class = super(EnumMeta, metacls).__new__(metacls, cls, bases, classdict)
enum_class._member_names_ = [] # names in random order
if OrderedDict is not None:
enum_class._member_map_ = OrderedDict()
else:
enum_class._member_map_ = {} # name->value map
enum_class._member_type_ = member_type
# Reverse value->name map for hashable values.
enum_class._value2member_map_ = {}
# instantiate them, checking for duplicates as we go
# we instantiate first instead of checking for duplicates first in case
# a custom __new__ is doing something funky with the values -- such as
# auto-numbering ;)
if __new__ is None:
__new__ = enum_class.__new__
for member_name in __order__:
value = members[member_name]
if not isinstance(value, tuple):
args = (value, )
else:
args = value
if member_type is tuple: # special case for tuple enums
args = (args, ) # wrap it one more time
if not use_args or not args:
enum_member = __new__(enum_class)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = value
else:
enum_member = __new__(enum_class, *args)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = member_type(*args)
value = enum_member._value_
enum_member._name_ = member_name
enum_member.__objclass__ = enum_class
enum_member.__init__(*args)
# If another member with the same value was already defined, the
# new member becomes an alias to the existing one.
for name, canonical_member in enum_class._member_map_.items():
if canonical_member.value == enum_member._value_:
enum_member = canonical_member
break
else:
# Aliases don't appear in member names (only in __members__).
enum_class._member_names_.append(member_name)
# performance boost for any member that would not shadow
# a DynamicClassAttribute (aka _RouteClassAttributeToGetattr)
if member_name not in base_attributes:
setattr(enum_class, member_name, enum_member)
# now add to _member_map_
enum_class._member_map_[member_name] = enum_member
try:
# This may fail if value is not hashable. We can't add the value
# to the map, and by-value lookups for this value will be
# linear.
enum_class._value2member_map_[value] = enum_member
except TypeError:
pass
# If a custom type is mixed into the Enum, and it does not know how
# to pickle itself, pickle.dumps will succeed but pickle.loads will
# fail. Rather than have the error show up later and possibly far
# from the source, sabotage the pickle protocol for this class so
# that pickle.dumps also fails.
#
# However, if the new class implements its own __reduce_ex__, do not
# sabotage -- it's on them to make sure it works correctly. We use
# __reduce_ex__ instead of any of the others as it is preferred by
# pickle over __reduce__, and it handles all pickle protocols.
unpicklable = False
if '__reduce_ex__' not in classdict:
if member_type is not object:
methods = ('__getnewargs_ex__', '__getnewargs__',
'__reduce_ex__', '__reduce__')
if not any(m in member_type.__dict__ for m in methods):
_make_class_unpicklable(enum_class)
unpicklable = True
# double check that repr and friends are not the mixin's or various
# things break (such as pickle)
for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'):
class_method = getattr(enum_class, name)
obj_method = getattr(member_type, name, None)
enum_method = getattr(first_enum, name, None)
if name not in classdict and class_method is not enum_method:
if name == '__reduce_ex__' and unpicklable:
continue
setattr(enum_class, name, enum_method)
# method resolution and int's are not playing nice
# Python's less than 2.6 use __cmp__
if pyver < 2.6:
if issubclass(enum_class, int):
setattr(enum_class, '__cmp__', getattr(int, '__cmp__'))
elif pyver < 3.0:
if issubclass(enum_class, int):
for method in (
'__le__',
'__lt__',
'__gt__',
'__ge__',
'__eq__',
'__ne__',
'__hash__',
):
setattr(enum_class, method, getattr(int, method))
# replace any other __new__ with our own (as long as Enum is not None,
# anyway) -- again, this is to support pickle
if Enum is not None:
# if the user defined their own __new__, save it before it gets
# clobbered in case they subclass later
if save_new:
setattr(enum_class, '__member_new__', enum_class.__dict__['__new__'])
setattr(enum_class, '__new__', Enum.__dict__['__new__'])
return enum_class
def __bool__(cls):
"""
classes/types should always be True.
"""
return True
def __call__(cls, value, names=None, module=None, type=None, start=1):
"""Either returns an existing member, or creates a new enum class.
This method is used both when an enum class is given a value to match
to an enumeration member (i.e. Color(3)) and for the functional API
(i.e. Color = Enum('Color', names='red green blue')).
When used for the functional API: `module`, if set, will be stored in
the new class' __module__ attribute; `type`, if set, will be mixed in
as the first base class.
Note: if `module` is not set this routine will attempt to discover the
calling module by walking the frame stack; if this is unsuccessful
the resulting class will not be pickleable.
"""
if names is None: # simple value lookup
return cls.__new__(cls, value)
# otherwise, functional API: we're creating a new Enum type
return cls._create_(value, names, module=module, type=type, start=start)
def __contains__(cls, member):
return isinstance(member, cls) and member.name in cls._member_map_
def __delattr__(cls, attr):
# nicer error message when someone tries to delete an attribute
# (see issue19025).
if attr in cls._member_map_:
raise AttributeError(
"%s: cannot delete Enum member." % cls.__name__)
super(EnumMeta, cls).__delattr__(attr)
def __dir__(self):
return (['__class__', '__doc__', '__members__', '__module__'] +
self._member_names_)
@property
def __members__(cls):
"""Returns a mapping of member name->value.
This mapping lists all enum members, including aliases. Note that this
is a copy of the internal mapping.
"""
return cls._member_map_.copy()
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
if _is_dunder(name):
raise AttributeError(name)
try:
return cls._member_map_[name]
except KeyError:
raise AttributeError(name)
def __getitem__(cls, name):
return cls._member_map_[name]
def __iter__(cls):
return (cls._member_map_[name] for name in cls._member_names_)
def __reversed__(cls):
return (cls._member_map_[name] for name in reversed(cls._member_names_))
def __len__(cls):
return len(cls._member_names_)
__nonzero__ = __bool__
def __repr__(cls):
return "<enum %r>" % cls.__name__
def __setattr__(cls, name, value):
"""Block attempts to reassign Enum members.
A simple assignment to the class namespace only changes one of the
several possible ways to get an Enum member from the Enum class,
resulting in an inconsistent Enumeration.
"""
member_map = cls.__dict__.get('_member_map_', {})
if name in member_map:
raise AttributeError('Cannot reassign members.')
super(EnumMeta, cls).__setattr__(name, value)
def _create_(cls, class_name, names=None, module=None, type=None, start=1):
"""Convenience method to create a new Enum class.
`names` can be:
* A string containing member names, separated either with spaces or
commas. Values are auto-numbered from 1.
* An iterable of member names. Values are auto-numbered from 1.
* An iterable of (member name, value) pairs.
* A mapping of member name -> value.
"""
if pyver < 3.0:
# if class_name is unicode, attempt a conversion to ASCII
if isinstance(class_name, unicode):
try:
class_name = class_name.encode('ascii')
except UnicodeEncodeError:
raise TypeError('%r is not representable in ASCII' % class_name)
metacls = cls.__class__
if type is None:
bases = (cls, )
else:
bases = (type, cls)
classdict = metacls.__prepare__(class_name, bases)
__order__ = []
# special processing needed for names?
if isinstance(names, basestring):
names = names.replace(',', ' ').split()
if isinstance(names, (tuple, list)) and isinstance(names[0], basestring):
names = [(e, i+start) for (i, e) in enumerate(names)]
# Here, names is either an iterable of (name, value) or a mapping.
item = None # in case names is empty
for item in names:
if isinstance(item, basestring):
member_name, member_value = item, names[item]
else:
member_name, member_value = item
classdict[member_name] = member_value
__order__.append(member_name)
# only set __order__ in classdict if name/value was not from a mapping
if not isinstance(item, basestring):
classdict['__order__'] = ' '.join(__order__)
enum_class = metacls.__new__(metacls, class_name, bases, classdict)
# TODO: replace the frame hack if a blessed way to know the calling
# module is ever developed
if module is None:
try:
module = _sys._getframe(2).f_globals['__name__']
except (AttributeError, ValueError):
pass
if module is None:
_make_class_unpicklable(enum_class)
else:
enum_class.__module__ = module
return enum_class
@staticmethod
def _get_mixins_(bases):
"""Returns the type for creating enum members, and the first inherited
enum class.
bases: the tuple of bases that was given to __new__
"""
if not bases or Enum is None:
return object, Enum
# double check that we are not subclassing a class with existing
# enumeration members; while we're at it, see if any other data
# type has been mixed in so we can use the correct __new__
member_type = first_enum = None
for base in bases:
if (base is not Enum and
issubclass(base, Enum) and
base._member_names_):
raise TypeError("Cannot extend enumerations")
# base is now the last base in bases
if not issubclass(base, Enum):
raise TypeError("new enumerations must be created as "
"`ClassName([mixin_type,] enum_type)`")
# get correct mix-in type (either mix-in type of Enum subclass, or
# first base if last base is Enum)
if not issubclass(bases[0], Enum):
member_type = bases[0] # first data type
first_enum = bases[-1] # enum type
else:
for base in bases[0].__mro__:
# most common: (IntEnum, int, Enum, object)
# possible: (<Enum 'AutoIntEnum'>, <Enum 'IntEnum'>,
# <class 'int'>, <Enum 'Enum'>,
# <class 'object'>)
if issubclass(base, Enum):
if first_enum is None:
first_enum = base
else:
if member_type is None:
member_type = base
return member_type, first_enum
if pyver < 3.0:
@staticmethod
def _find_new_(classdict, member_type, first_enum):
"""Returns the __new__ to be used for creating the enum members.
classdict: the class dictionary given to __new__
member_type: the data type whose __new__ will be used by default
first_enum: enumeration to check for an overriding __new__
"""
# now find the correct __new__, checking to see of one was defined
# by the user; also check earlier enum classes in case a __new__ was
# saved as __member_new__
__new__ = classdict.get('__new__', None)
if __new__:
return None, True, True # __new__, save_new, use_args
N__new__ = getattr(None, '__new__')
O__new__ = getattr(object, '__new__')
if Enum is None:
E__new__ = N__new__
else:
E__new__ = Enum.__dict__['__new__']
# check all possibles for __member_new__ before falling back to
# __new__
for method in ('__member_new__', '__new__'):
for possible in (member_type, first_enum):
try:
target = possible.__dict__[method]
except (AttributeError, KeyError):
target = getattr(possible, method, None)
if target not in [
None,
N__new__,
O__new__,
E__new__,
]:
if method == '__member_new__':
classdict['__new__'] = target
return None, False, True
if isinstance(target, staticmethod):
target = target.__get__(member_type)
__new__ = target
break
if __new__ is not None:
break
else:
__new__ = object.__new__
# if a non-object.__new__ is used then whatever value/tuple was
# assigned to the enum member name will be passed to __new__ and to the
# new enum member's __init__
if __new__ is object.__new__:
use_args = False
else:
use_args = True
return __new__, False, use_args
else:
@staticmethod
def _find_new_(classdict, member_type, first_enum):
"""Returns the __new__ to be used for creating the enum members.
classdict: the class dictionary given to __new__
member_type: the data type whose __new__ will be used by default
first_enum: enumeration to check for an overriding __new__
"""
# now find the correct __new__, checking to see of one was defined
# by the user; also check earlier enum classes in case a __new__ was
# saved as __member_new__
__new__ = classdict.get('__new__', None)
# should __new__ be saved as __member_new__ later?
save_new = __new__ is not None
if __new__ is None:
# check all possibles for __member_new__ before falling back to
# __new__
for method in ('__member_new__', '__new__'):
for possible in (member_type, first_enum):
target = getattr(possible, method, None)
if target not in (
None,
None.__new__,
object.__new__,
Enum.__new__,
):
__new__ = target
break
if __new__ is not None:
break
else:
__new__ = object.__new__
# if a non-object.__new__ is used then whatever value/tuple was
# assigned to the enum member name will be passed to __new__ and to the
# new enum member's __init__
if __new__ is object.__new__:
use_args = False
else:
use_args = True
return __new__, save_new, use_args
########################################################
# In order to support Python 2 and 3 with a single
# codebase we have to create the Enum methods separately
# and then use the `type(name, bases, dict)` method to
# create the class.
########################################################
temp_enum_dict = {}
temp_enum_dict['__doc__'] = "Generic enumeration.\n\n Derive from this class to define new enumerations.\n\n"
def __new__(cls, value):
# all enum instances are actually created during class construction
# without calling this method; this method is called by the metaclass'
# __call__ (i.e. Color(3) ), and by pickle
if type(value) is cls:
# For lookups like Color(Color.red)
value = value.value
#return value
# by-value search for a matching enum member
# see if it's in the reverse mapping (for hashable values)
try:
if value in cls._value2member_map_:
return cls._value2member_map_[value]
except TypeError:
# not there, now do long search -- O(n) behavior
for member in cls._member_map_.values():
if member.value == value:
return member
raise ValueError("%s is not a valid %s" % (value, cls.__name__))
temp_enum_dict['__new__'] = __new__
del __new__
def __repr__(self):
return "<%s.%s: %r>" % (
self.__class__.__name__, self._name_, self._value_)
temp_enum_dict['__repr__'] = __repr__
del __repr__
def __str__(self):
return "%s.%s" % (self.__class__.__name__, self._name_)
temp_enum_dict['__str__'] = __str__
del __str__
if pyver >= 3.0:
def __dir__(self):
added_behavior = [
m
for cls in self.__class__.mro()
for m in cls.__dict__
if m[0] != '_' and m not in self._member_map_
]
return (['__class__', '__doc__', '__module__', ] + added_behavior)
temp_enum_dict['__dir__'] = __dir__
del __dir__
def __format__(self, format_spec):
# mixed-in Enums should use the mixed-in type's __format__, otherwise
# we can get strange results with the Enum name showing up instead of
# the value
# pure Enum branch
if self._member_type_ is object:
cls = str
val = str(self)
# mix-in branch
else:
cls = self._member_type_
val = self.value
return cls.__format__(val, format_spec)
temp_enum_dict['__format__'] = __format__
del __format__
####################################
# Python's less than 2.6 use __cmp__
if pyver < 2.6:
def __cmp__(self, other):
if type(other) is self.__class__:
if self is other:
return 0
return -1
return NotImplemented
raise TypeError("unorderable types: %s() and %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__cmp__'] = __cmp__
del __cmp__
else:
def __le__(self, other):
raise TypeError("unorderable types: %s() <= %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__le__'] = __le__
del __le__
def __lt__(self, other):
raise TypeError("unorderable types: %s() < %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__lt__'] = __lt__
del __lt__
def __ge__(self, other):
raise TypeError("unorderable types: %s() >= %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__ge__'] = __ge__
del __ge__
def __gt__(self, other):
raise TypeError("unorderable types: %s() > %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__gt__'] = __gt__
del __gt__
def __eq__(self, other):
if type(other) is self.__class__:
return self is other
return NotImplemented
temp_enum_dict['__eq__'] = __eq__
del __eq__
def __ne__(self, other):
if type(other) is self.__class__:
return self is not other
return NotImplemented
temp_enum_dict['__ne__'] = __ne__
del __ne__
def __hash__(self):
return hash(self._name_)
temp_enum_dict['__hash__'] = __hash__
del __hash__
def __reduce_ex__(self, proto):
return self.__class__, (self._value_, )
temp_enum_dict['__reduce_ex__'] = __reduce_ex__
del __reduce_ex__
# _RouteClassAttributeToGetattr is used to provide access to the `name`
# and `value` properties of enum members while keeping some measure of
# protection from modification, while still allowing for an enumeration
# to have members named `name` and `value`. This works because enumeration
# members are not set directly on the enum class -- __getattr__ is
# used to look them up.
@_RouteClassAttributeToGetattr
def name(self):
return self._name_
temp_enum_dict['name'] = name
del name
@_RouteClassAttributeToGetattr
def value(self):
return self._value_
temp_enum_dict['value'] = value
del value
@classmethod
def _convert(cls, name, module, filter, source=None):
"""
Create a new Enum subclass that replaces a collection of global constants
"""
# convert all constants from source (or module) that pass filter() to
# a new Enum called name, and export the enum and its members back to
# module;
# also, replace the __reduce_ex__ method so unpickling works in
# previous Python versions
module_globals = vars(_sys.modules[module])
if source:
source = vars(source)
else:
source = module_globals
members = dict((name, value) for name, value in source.items() if filter(name))
cls = cls(name, members, module=module)
cls.__reduce_ex__ = _reduce_ex_by_name
module_globals.update(cls.__members__)
module_globals[name] = cls
return cls
temp_enum_dict['_convert'] = _convert
del _convert
Enum = EnumMeta('Enum', (object, ), temp_enum_dict)
del temp_enum_dict
# Enum has now been created
###########################
class IntEnum(int, Enum):
"""Enum where members are also (and must be) ints"""
def _reduce_ex_by_name(self, proto):
return self.name
def unique(enumeration):
"""Class decorator that ensures only unique members exist in an enumeration."""
duplicates = []
for name, member in enumeration.__members__.items():
if name != member.name:
duplicates.append((name, member.name))
if duplicates:
duplicate_names = ', '.join(
["%s -> %s" % (alias, name) for (alias, name) in duplicates]
)
raise ValueError('duplicate names found in %r: %s' %
(enumeration, duplicate_names)
)
return enumeration
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Module for connecting to XBMC """
import cherrypy
import htpc
import base64
import socket
import struct
from urllib2 import quote
from jsonrpclib import Server
from sqlobject import SQLObject, SQLObjectNotFound
from sqlobject.col import StringCol, IntCol
from htpc.proxy import get_image
import logging
from cherrypy.lib.auth2 import require
class XbmcServers(SQLObject):
""" SQLObject class for xbmc_servers table """
name = StringCol()
host = StringCol()
port = IntCol()
username = StringCol(default=None)
password = StringCol(default=None)
mac = StringCol(default=None)
class Xbmc(object):
def __init__(self):
""" Add module to list of modules on load and set required settings """
self.logger = logging.getLogger('modules.xbmc')
XbmcServers.createTable(ifNotExists=True)
htpc.MODULES.append({
'name': 'XBMC',
'id': 'xbmc',
'fields': [
{'type':'bool',
'label':'Enable',
'name':'xbmc_enable'},
{'type':'text',
'label':'Menu name',
'name':'xbmc_name'},
{'type':'bool',
'label':'Enable PVR',
'name':'xbmc_enable_pvr'},
{'type':'bool',
'label':'Hide watched',
'name':'xbmc_hide_watched'}
]})
htpc.MODULES.append({
'name': 'XBMC Servers',
'id': 'xbmc_update_server',
'action': htpc.WEBDIR + 'xbmc/setserver',
'test': htpc.WEBDIR + 'xbmc/ping',
'fields': [
{'type':'select',
'label':'Server',
'name':'xbmc_server_id',
'options':[
{'name':'New', 'value':0}
]},
{'type':'text',
'label':'Name',
'name':'xbmc_server_name'},
{'type':'text',
'label':'IP / Host',
'placeholder':'localhost',
'name':'xbmc_server_host'},
{'type':'text',
'label':'Port',
'placeholder':'8080',
'name':'xbmc_server_port'},
{'type':'text',
'label':'Username',
'name':'xbmc_server_username'},
{'type':'password',
'label':'Password',
'name':'xbmc_server_password'},
{'type':'text',
'label':'Mac addr.',
'name':'xbmc_server_mac'}
]})
server = htpc.settings.get('xbmc_current_server', 0)
self.changeserver(server)
@cherrypy.expose()
@require()
def index(self):
""" Generate page from template """
return htpc.LOOKUP.get_template('xbmc.html').render(scriptname='xbmc')
@cherrypy.expose()
@require()
def webinterface(self):
""" Generate page from template """
raise cherrypy.HTTPRedirect(self.url('', True))
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def ping(self, xbmc_server_host='', xbmc_server_port='',
xbmc_server_username='', xbmc_server_password='', **kwargs):
""" Tests settings, returns MAC address on success and null on fail """
self.logger.debug("Testing XBMC connectivity")
try:
url = xbmc_server_host + ':' + xbmc_server_port
if xbmc_server_username and xbmc_server_password:
url = xbmc_server_username + ':' + xbmc_server_password + '@' + url
xbmc = Server('http://' + url + '/jsonrpc')
self.logger.debug("Trying to contact xbmc via %s" % url)
return xbmc.XBMC.GetInfoLabels(labels=["Network.MacAddress"])
except Exception, e:
self.logger.exception(e)
self.logger.error("Unable to contact XBMC via %s", url)
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def getserver(self, id=None):
if id:
""" Get XBMC server info """
try:
server = XbmcServers.selectBy(id=id).getOne()
return dict((c, getattr(server, c)) for c in server.sqlmeta.columns)
except SQLObjectNotFound:
return
""" Get a list of all servers and the current server """
servers = []
for s in XbmcServers.select():
servers.append({'id': s.id, 'name': s.name})
if len(servers) < 1:
return
try:
current = self.current.name
except AttributeError:
current = None
return {'current': current, 'servers': servers}
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def setserver(self, xbmc_server_id, xbmc_server_name, xbmc_server_host, xbmc_server_port,
xbmc_server_username=None, xbmc_server_password=None, xbmc_server_mac=None):
""" Create a server if id=0, else update a server """
if xbmc_server_id == "0":
self.logger.debug("Creating XBMC-Server in database")
try:
server = XbmcServers(name=xbmc_server_name,
host=xbmc_server_host,
port=int(xbmc_server_port),
username=xbmc_server_username,
password=xbmc_server_password,
mac=xbmc_server_mac)
self.changeserver(server.id)
return 1
except Exception, e:
self.logger.debug("Exception: " + str(e))
self.logger.error("Unable to create XBMC-Server in database")
return 0
else:
self.logger.debug("Updating XBMC-Server " + xbmc_server_name + " in database")
try:
server = XbmcServers.selectBy(id=xbmc_server_id).getOne()
server.name = xbmc_server_name
server.host = xbmc_server_host
server.port = int(xbmc_server_port)
server.username = xbmc_server_username
server.password = xbmc_server_password
server.mac = xbmc_server_mac
return 1
except SQLObjectNotFound, e:
self.logger.error("Unable to update XBMC-Server " + server.name + " in database")
return 0
@cherrypy.expose()
@require()
def delserver(self, id):
""" Delete a server """
self.logger.debug("Deleting server " + str(id))
XbmcServers.delete(id)
self.changeserver()
return
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def changeserver(self, id=0):
try:
self.current = XbmcServers.selectBy(id=id).getOne()
htpc.settings.set('xbmc_current_server', str(id))
self.logger.info("Selecting XBMC server: %s", id)
return "success"
except SQLObjectNotFound:
try:
self.current = XbmcServers.select(limit=1).getOne()
self.logger.error("Invalid server. Selecting first Available.")
return "success"
except SQLObjectNotFound:
self.current = None
self.logger.warning("No configured XBMC-Servers.")
return "No valid servers"
@cherrypy.expose()
@require()
def GetThumb(self, thumb=None, h=None, w=None, o=100):
""" Parse thumb to get the url and send to htpc.proxy.get_image """
url = self.url('/images/DefaultVideo.png')
if thumb:
url = self.url('/image/' + quote(thumb))
self.logger.debug("Trying to fetch image via %s", url)
return get_image(url, h, w, o, self.auth())
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def GetMovies(self, start=0, end=0, sortmethod='title', sortorder='ascending', hidewatched=0, filter=''):
""" Get a list of all movies """
self.logger.debug("Fetching Movies")
try:
xbmc = Server(self.url('/jsonrpc', True))
sort = {'order': sortorder, 'method': sortmethod, 'ignorearticle': True}
properties = ['title', 'year', 'plot', 'thumbnail', 'file', 'fanart', 'studio', 'trailer',
'imdbnumber', 'genre', 'rating', 'playcount']
limits = {'start': int(start), 'end': int(end)}
filter = {'field': 'title', 'operator': 'contains', 'value': filter}
if hidewatched == "1":
filter = {"and": [filter, {'field': 'playcount', 'operator': 'is', 'value': '0'}]}
return xbmc.VideoLibrary.GetMovies(sort=sort, properties=properties, limits=limits, filter=filter)
except Exception, e:
self.logger.exception(e)
self.logger.error("Unable to fetch movies!")
return
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def GetShows(self, start=0, end=0, sortmethod='title', sortorder='ascending', hidewatched=0, filter=''):
""" Get a list of all the TV Shows """
self.logger.debug("Fetching TV Shows")
try:
xbmc = Server(self.url('/jsonrpc', True))
sort = {'order': sortorder, 'method': sortmethod, 'ignorearticle': True}
properties = ['title', 'year', 'plot', 'thumbnail', 'playcount']
limits = {'start': int(start), 'end': int(end)}
filter = {'field': 'title', 'operator': 'contains', 'value': filter}
if hidewatched == "1":
filter = {"and": [filter, {'field': 'playcount', 'operator': 'is', 'value': '0'}]}
shows = xbmc.VideoLibrary.GetTVShows(sort=sort, properties=properties, limits=limits, filter=filter)
return shows
except Exception, e:
self.logger.exception(e)
self.logger.error("Unable to fetch TV Shows")
return
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def GetEpisodes(self, start=0, end=0, sortmethod='episode', sortorder='ascending', tvshowid=None, hidewatched=False, filter=''):
""" Get information about a single TV Show """
self.logger.debug("Loading information for TVID %s", str(tvshowid))
try:
xbmc = Server(self.url('/jsonrpc', True))
sort = {'order': sortorder, 'method': sortmethod, 'ignorearticle': True}
properties = ['episode', 'season', 'thumbnail', 'plot', 'file', 'playcount']
limits = {'start': int(start), 'end': int(end)}
filter = {'field': 'title', 'operator': 'contains', 'value': filter}
if hidewatched == "1":
filter = {"and": [filter, {'field': 'playcount', 'operator': 'is', 'value': '0'}]}
episodes = xbmc.VideoLibrary.GetEpisodes(sort=sort, tvshowid=int(tvshowid), properties=properties, limits=limits, filter=filter)
return episodes
except:
return
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def GetArtists(self, start=0, end=0, sortmethod='artist', sortorder='ascending', filter=''):
""" Get a list of all artists """
self.logger.debug("Fetching all artists in the music database")
try:
xbmc = Server(self.url('/jsonrpc', True))
sort = {'order': sortorder, 'method': sortmethod, 'ignorearticle': True}
properties = ['thumbnail', 'fanart']
limits = {'start': int(start), 'end': int(end)}
filter = {'field': 'artist', 'operator': 'contains', 'value': filter}
return xbmc.AudioLibrary.GetArtists(properties=properties, limits=limits, sort=sort, filter=filter)
except Exception, e:
self.logger.exception(e)
self.logger.error("Unable to fetch artists!")
return
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def GetAlbums(self, start=0, end=0, sortmethod='label', sortorder='ascending', artistid=None, filter=''):
""" Get a list of all albums for artist """
self.logger.debug("Loading all albums for ARTISTID %s", str(artistid))
try:
xbmc = Server(self.url('/jsonrpc', True))
sort = {'order': sortorder, 'method': sortmethod, 'ignorearticle': True}
properties = ['title', 'artist', 'year', 'thumbnail']
limits = {'start': int(start), 'end': int(end)}
if artistid:
filter = {'artistid': int(artistid)}
else:
filter = {'or': [{'field': 'album', 'operator': 'contains', 'value': filter},
{'field': 'artist', 'operator': 'contains', 'value': filter}]}
return xbmc.AudioLibrary.GetAlbums(properties=properties, limits=limits, sort=sort, filter=filter)
except Exception, e:
self.logger.debug("Exception: %s", str(e))
self.logger.error("Unable to fetch albums!")
return
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def GetSongs(self, start=0, end=0, sortmethod='title', sortorder='ascending', albumid=None, artistid=None, filter='', *args, **kwargs):
""" Get a list of all songs """
self.logger.debug("Fetching all artists in the music database")
try:
xbmc = Server(self.url('/jsonrpc', True))
sort = {'order': sortorder, 'method': sortmethod, 'ignorearticle': True}
properties = ['artist', 'artistid', 'album', 'albumid', 'duration', 'year', 'thumbnail']
limits = {'start': int(start), 'end': int(end)}
if albumid and filter == '':
filter = {'albumid': int(albumid)}
elif artistid and filter == '':
filter = {'artistid': int(artistid)}
else:
filter = {'or': [{'field': 'album', 'operator': 'contains', 'value': filter},
{'field': 'artist', 'operator': 'contains', 'value': filter},
{'field': 'title', 'operator': 'contains', 'value': filter}]}
return xbmc.AudioLibrary.GetSongs(properties=properties, limits=limits, sort=sort, filter=filter)
except Exception, e:
self.logger.exception(e)
self.logger.error("Unable to fetch artists!")
return
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def GetChannelGroups(self, type='tv'):
""" Get PVR channel list from xbmc """
self.logger.debug("Loading XBMC PVC channel list.")
try:
xbmc = Server(self.url('/jsonrpc', True))
return xbmc.PVR.GetChannelGroups(channeltype=type)
except Exception, e:
self.logger.exception(e)
self.logger.error("Unable to fetch channelgroups!")
return
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def GetChannels(self, type='tv', group=2):
""" Get PVR channel list from xbmc """
self.logger.debug("Loading XBMC PVC channel list.")
try:
xbmc = Server(self.url('/jsonrpc', True))
return xbmc.PVR.GetChannels(channelgroupid=int(group), properties=['thumbnail'])
except Exception, e:
self.logger.exception(e)
self.logger.error("Unable to fetch channels!")
return
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def PlayItem(self, item=None, type=None):
""" Play a file in XBMC """
self.logger.debug("Playing '%s' of the type %s", item, type)
xbmc = Server(self.url('/jsonrpc', True))
if type == 'movie':
return xbmc.Player.Open(item={'movieid': int(item)}, options={'resume': True})
elif type == 'episode':
return xbmc.Player.Open(item={'episodeid': int(item)}, options={'resume': True})
elif type == 'channel':
return xbmc.Player.Open(item={'channelid': int(item)})
elif type == 'artist':
return xbmc.Player.Open(item={'artistid': int(item)})
elif type == 'album':
return xbmc.Player.Open(item={'albumid': int(item)})
elif type == 'song':
return xbmc.Player.Open(item={'songid': int(item)})
else:
return xbmc.Player.Open(item={'file': item})
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def QueueItem(self, item, type):
""" Queue a file in XBMC """
self.logger.debug("Enqueueing '%s' of the type %s", item, type)
xbmc = Server(self.url('/jsonrpc', True))
if type == 'movie':
return xbmc.Playlist.Add(playlistid=1, item={'movieid': int(item)})
elif type == 'episode':
return xbmc.Playlist.Add(playlistid=1, item={'episodeid': int(item)})
elif type == 'channel':
return xbmc.Playlist.Add(playlistid=1, item={'channelid': int(item)})
elif type == 'artist':
return xbmc.Playlist.Add(playlistid=0, item={'artistid': int(item)})
elif type == 'album':
return xbmc.Playlist.Add(playlistid=0, item={'albumid': int(item)})
elif type == 'song':
return xbmc.Playlist.Add(playlistid=0, item={'songid': int(item)})
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def RemoveItem(self, item, playlistid=0):
""" Remove a file from the playlist """
self.logger.debug("Removing '%s' from the playlist", item)
xbmc = Server(self.url('/jsonrpc', True))
return xbmc.Playlist.Remove(playlistid=playlistid, position=int(item))
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def PlaylistMove(self, position1, position2, playlistid=0):
""" Swap files in playlist """
playlistid = int(playlistid)
position1 = int(position1)
position2 = int(position2)
i = 1 if position1 < position2 else -1
xbmc = Server(self.url('/jsonrpc', True))
while(position1 != position2):
xbmc.Playlist.Swap(playlistid=playlistid, position1=position1, position2=position1 + i)
position1 += i
return "Moved from " + str(position1) + " to " + str(position2)
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def Playlist(self, type='audio'):
""" Get a playlist from XBMC """
self.logger.debug("Loading Playlist of type %s", type)
xbmc = Server(self.url('/jsonrpc', True))
if type == 'video':
return xbmc.Playlist.GetItems(playlistid=1, properties=['year', 'showtitle', 'season', 'episode', 'runtime'])
return xbmc.Playlist.GetItems(playlistid=0, properties=['artist', 'title', 'album', 'duration'])
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def NowPlaying(self):
""" Get information about current playing item """
self.logger.debug("Fetching currently playing information")
try:
xbmc = Server(self.url('/jsonrpc', True))
player = xbmc.Player.GetActivePlayers()[0]
playerid = player['playerid']
if player['type'] == 'video':
playerprop = ['speed', 'position', 'time', 'totaltime',
'percentage', 'subtitleenabled', 'currentsubtitle',
'subtitles', 'currentaudiostream', 'audiostreams']
itemprop = ['thumbnail', 'showtitle', 'season', 'episode', 'year', 'fanart']
elif player['type'] == 'audio':
playerprop = ['speed', 'position', 'time', 'totaltime', 'percentage']
itemprop = ['thumbnail', 'title', 'artist', 'album', 'year', 'fanart']
app = xbmc.Application.GetProperties(properties=['muted', 'volume'])
player = xbmc.Player.GetProperties(playerid=playerid, properties=playerprop)
item = xbmc.Player.GetItem(playerid=playerid, properties=itemprop)
return {'playerInfo': player, 'itemInfo': item, 'app': app}
except IndexError:
self.logger.debug("Nothing current playing.")
return
except Exception, e:
self.logger.exception(e)
self.logger.error("Unable to fetch currently playing information!")
return
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def ControlPlayer(self, action, value=''):
""" Various commands to control XBMC Player """
self.logger.debug("Sending control to XBMC %s", action)
try:
xbmc = Server(self.url('/jsonrpc', True))
if action == 'seek':
player = xbmc.Player.GetActivePlayers()[0]
return xbmc.Player.Seek(playerid=player[u'playerid'], value=float(value))
elif action == 'jump':
player = xbmc.Player.GetActivePlayers()[0]
return xbmc.Player.GoTo(playerid=player[u'playerid'], to=int(value))
elif action == 'party':
return xbmc.Player.Open(item={'partymode': 'audio'})
elif action == 'getsub':
try:
#Frodo
return xbmc.Addons.ExecuteAddon(addonid='script.xbmc.subtitles')
except:
pass
try:
#Gotham
return xbmc.GUI.ActivateWindow(window='subtitlesearch')
except:
pass
elif action == 'volume':
return xbmc.Application.SetVolume(volume=int(value))
else:
return xbmc.Input.ExecuteAction(action=action)
except Exception, e:
self.logger.exception(e)
self.logger.error("Unable to control XBMC with action: %s", action)
return 'error'
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def SendText(self, text):
""" Send text to XBMC """
self.logger.debug("Sending text to XBMC: %s", text)
xbmc = Server(self.url('/jsonrpc', True))
return xbmc.Input.SendText(text=text)
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def Subtitles(self, subtitle='off'):
""" Change the subtitles """
self.logger.debug("Changing subtitles to %s", subtitle)
try:
xbmc = Server(self.url('/jsonrpc', True))
playerid = xbmc.Player.GetActivePlayers()[0][u'playerid']
try:
subtitle = int(subtitle)
xbmc.Player.SetSubtitle(playerid=playerid, subtitle=subtitle, enable=True)
return "success"
except ValueError:
xbmc.Player.SetSubtitle(playerid=playerid, subtitle='off')
return "Disabling subtitles."
except Exception, e:
self.logger.exception(e)
self.logger.error("Unable to set subtitle to specified value %s", subtitle)
return
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def Audio(self, audio):
""" Change the audio stream """
self.logger.debug("Chaning audio stream to %s", audio)
try:
xbmc = Server(self.url('/jsonrpc', True))
playerid = xbmc.Player.GetActivePlayers()[0][u'playerid']
return xbmc.Player.SetAudioStream(playerid=playerid, stream=int(audio))
except Exception, e:
self.logger.exception(e)
self.logger.error("Unable to change audio stream to specified value %s", audio)
return
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def System(self, action=''):
""" Various system commands """
xbmc = Server(self.url('/jsonrpc', True))
if action == 'Shutdown':
self.logger.info("Shutting down XBMC")
xbmc.System.Shutdown()
return 'Shutting down XBMC.'
elif action == 'Suspend':
self.logger.info("Suspending XBMC")
xbmc.System.Suspend()
return 'Suspending XBMC.'
elif action == 'Reboot':
self.logger.info("Rebooting XBMC")
xbmc.System.Reboot()
return 'Rebooting XBMC.'
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def Wake(self):
""" Send WakeOnLan package """
self.logger.info("Waking up XBMC-System")
try:
addr_byte = self.current.mac.split(':')
hw_addr = struct.pack('BBBBBB',
int(addr_byte[0], 16),
int(addr_byte[1], 16),
int(addr_byte[2], 16),
int(addr_byte[3], 16),
int(addr_byte[4], 16),
int(addr_byte[5], 16))
msg = '\xff' * 6 + hw_addr * 16
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.sendto(msg, ("255.255.255.255", 9))
self.logger.info("WOL package sent to %s", self.current.mac)
return "WOL package sent"
except Exception, e:
self.logger.exception(e)
self.logger.error("Unable to send WOL packet")
return "Unable to send WOL packet"
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def Notify(self, text):
""" Create popup in XBMC """
self.logger.debug("Sending notification to XBMC: %s", text)
xbmc = Server(self.url('/jsonrpc', True))
image = 'https://raw.github.com/styxit/HTPC-Manager/master/interfaces/default/img/xbmc-logo.png'
return xbmc.GUI.ShowNotification(title='HTPC manager', message=text, image=image)
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def GetRecentMovies(self, limit=5):
""" Get a list of recently added movies """
self.logger.debug("Fetching recently added movies")
try:
xbmc = Server(self.url('/jsonrpc', True))
properties = ['title', 'year', 'runtime', 'plot', 'thumbnail', 'file',
'fanart', 'trailer', 'imdbnumber', 'studio', 'genre', 'rating']
limits = {'start': 0, 'end': int(limit)}
return xbmc.VideoLibrary.GetRecentlyAddedMovies(properties=properties, limits=limits)
except Exception, e:
self.logger.exception(e)
self.logger.error("Unable to fetch recently added movies!")
return
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def GetRecentShows(self, limit=5):
""" Get a list of recently added TV Shows """
self.logger.debug("Fetching recently added TV Shows")
try:
xbmc = Server(self.url('/jsonrpc', True))
properties = ['showtitle', 'season', 'episode', 'title', 'runtime',
'thumbnail', 'plot', 'fanart', 'file']
limits = {'start': 0, 'end': int(limit)}
return xbmc.VideoLibrary.GetRecentlyAddedEpisodes(properties=properties, limits=limits)
except Exception, e:
self.logger.exception(e)
self.logger.error("Unable to fetch recently added TV Shows")
return
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def GetRecentAlbums(self, limit=5):
""" Get a list of recently added music """
self.logger.debug("Fetching recently added Music")
try:
xbmc = Server(self.url('/jsonrpc', True))
properties = ['artist', 'albumlabel', 'year', 'description', 'thumbnail']
limits = {'start': 0, 'end': int(limit)}
return xbmc.AudioLibrary.GetRecentlyAddedAlbums(properties=properties, limits=limits)
except Exception, e:
self.logger.exception(e)
self.logger.error("Unable to fetch recently added Music!")
return
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def Library(self, do='scan', lib='video'):
xbmc = Server(self.url('/jsonrpc', True))
if lib == 'video':
if do == 'clean':
return xbmc.VideoLibrary.Clean()
else:
return xbmc.VideoLibrary.Scan()
else:
if do == 'clean':
return xbmc.AudioLibrary.Clean()
else:
return xbmc.AudioLibrary.Scan()
def url(self, path='', auth=False):
""" Generate a URL for the RPC based on XBMC settings """
self.logger.debug("Generate URL to call XBMC")
url = self.current.host + ':' + str(self.current.port) + path
if auth and self.current.username and self.current.password:
url = self.current.username + ':' + self.current.password + '@' + url
self.logger.debug("URL: http://%s", url)
return 'http://' + url
def auth(self):
""" Generate a base64 HTTP auth string based on settings """
self.logger.debug("Generating authentication string")
if self.current.username and self.current.password:
return base64.encodestring('%s:%s' % (self.current.username, self.current.password)).strip('\n')
| |
#!/usr/bin/python
'''
A script to wrap ffmpeg for restreaming a live stream
If it can't pick up the main source then it should show colour bars
Things to look for in output to confirm stream running:
Duration: N/A, start: 0.020000, bitrate: 786 kb/s
Stream #0:0: Video: h264 (Baseline), yuv420p, 640x360 [SAR 1:1 DAR 16:9], 655 kb/s, 25 tbr, 1k tbn, 50 tbc
Stream #0:1: Audio: aac, 44100 Hz, stereo, fltp, 131 kb/s
Stream mapping:
Stream #0:0 -> #0:0 (copy)
Stream #0:1 -> #0:1 (copy)
Press [q] to stop, [?] for help
'''
import sys
import os
from subprocess import PIPE, Popen
from threading import Thread, activeCount, enumerate
import time
import datetime
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(message)s',
handlers=[logging.FileHandler("encoder.log"),
logging.StreamHandler()])
CHECK_WAIT = 15
FFMPEG = "/usr/local/bin/ffmpeg"
FFPROBE = "/usr/local/bin/ffprobe"
# Amount of time to wait before timing out a probe
PROBE_TIMEOUT = 20
RTMP_SRC = "rtmp://cp30129.live.edgefcs.net/live/videoops-videoops@50541"
RTMP_DEST = "rtmp://wowsyd.sinclairmediatech.com/live/canberra"
STREAM_RUNNING = "Press [q] to stop"
FFSMPTE = FFMPEG + " -re -f lavfi -i smptebars -s 640x360 -g 25 -c:v libx264 \
-b:v 500k -an -f flv " + RTMP_DEST
FFCMD = FFMPEG + " -i " + RTMP_SRC + \
" -map 0 -c:v copy -c:a copy -f flv " + RTMP_DEST
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty # python 3.x
logging.info("Starting")
ON_POSIX = 'posix' in sys.builtin_module_names
def process_line(std, q):
partialLine = ""
tmpLines = []
end_of_message = False
while (True):
data = std.read(10)
# print repr(data)
# break when there is no more data
if len(data) == 0:
end_of_message = True
# data needs to be added to previous line
if ((not "\n" in data) and (not end_of_message)):
partialLine += data
# lines are terminated in this string
else:
tmpLines = []
# split by \n
split = data.split("\n")
# add the rest of partial line to first item of array
if partialLine != "":
split[0] = partialLine + split[0]
partialLine = ""
# add every item apart from last to tmpLines array
if len(split) > 1:
for i in range(len(split) - 1):
tmpLines.append(split[i])
# last item is '' if data string ends in \r
# last line is partial, save for temporary storage
if split[-1] != "":
partialLine = split[-1]
# last line is terminated
else:
tmpLines.append(split[-1])
# print split[0]
q.put(split[0])
# print "stdout: " + split[0]
if (end_of_message):
# print partialLine
break
def enqueue_output(stdout, stderr, queue):
# for line in iter(stdout.readline, b''):
# queue.put(line)
process_line(stdout, queue)
stdout.close()
def clear():
os.system('cls' if os.name == 'nt' else 'clear')
def probe(stream):
probeq = Queue()
logging.info("Checking " + stream)
probeproc = run(probeq, FFPROBE + " " + stream, "probethread")
probe_timeout = datetime.datetime.now() + datetime.timedelta(0, PROBE_TIMEOUT)
# Need to read from the queue until the queue is empty and process has exited
while (True):
# print "Queue not empty"
# print probeq.join()
try:
line = probeq.get_nowait()
# logging.info(line)
'''
Possible error responses:
Server error: Failed to play stream
Input/output error
'''
if ("Stream #0:0: Video" in line):
logging.info("Found stream " + stream)
logging.info(line)
return True
elif ("error" in line):
return False
# If we have an exit code other than 0 we have a problem
elif probeproc.poll() > 0:
return False
except Empty:
if datetime.datetime.now() > probe_timeout:
logging.error("Probe timeout time!")
probeproc.terminate()
return False
pass
return False
def run(q, ffcmd, thread_name):
logging.info(thread_name + " :Running " + ffcmd)
p = Popen(ffcmd, shell=True, stdout=PIPE, stderr=PIPE, stdin=PIPE, bufsize=1, close_fds=ON_POSIX)
# q = Queue()
# t = Thread(target=enqueue_output, args=(p.stdout, q))
# t.daemon = True # thread dies with the program
# t.start()
t = Thread(target=enqueue_output, name=thread_name, args=(p.stderr, p.stdout, q))
t.daemon = True
t.start()
return p
colorbars = None
colorq = Queue()
colorbarson = False
mainq = Queue()
'''
This is a workaround for startups where it can't read the stream and needs to put colorbars up
Otherwise it can hang on starting up the stream
TODO: Terminate the ffmpeg process if no valid response is received in a specific time frame
'''
while (not probe(RTMP_SRC)):
if not colorbarson:
colorbars = run(colorq, FFSMPTE, "colorbars")
colorbarson = True
time.sleep(CHECK_WAIT)
mainencode = run(mainq, FFCMD, "mainencode")
mainencode_failedstarts = 0
'''
Need a timeout for is the stream stops returning data
Sample ffprobe with debug for this is (-loglevel debug) and just hangs:
[rtmp @ 0x7fad39c121c0] Handshaking...
[rtmp @ 0x7fad39c121c0] Type answer 3
[rtmp @ 0x7fad39c121c0] Server version 4.5.2.1
[rtmp @ 0x7fad39c121c0] Proto = rtmp, path = /live/videoops-videoops@50541, app = live, fname = videoops-videoops@50541
[rtmp @ 0x7fad39c121c0] Server bandwidth = 1250000
[rtmp @ 0x7fad39c121c0] Client bandwidth = 1250000
[rtmp @ 0x7fad39c121c0] New incoming chunk size = 4096
[rtmp @ 0x7fad39c121c0] Creating stream...
[rtmp @ 0x7fad39c121c0] Sending play command for 'videoops-videoops@50541'
[rtmp @ 0x7fad39c121c0] New incoming chunk size = 4096
'''
while (True):
# Check if process has died and restart
if (not mainencode.poll() == None):
logging.error("Main stream not available. Exit code: " + str(mainencode.poll()))
mainencode_failedstarts += 1
# Blocks here on probe which is not good if the probe hangs
feed_available = probe(RTMP_SRC)
'''
If stream not running then put up colour bars in one process
Wait for x seconds
Check if stream running (use ffprobe), if it is then kill the colour bar process and start main encode
else do nothing
'''
# Only run colorbars if not main feed not available running and colorbars not running
if (not feed_available and not colorbarson):
logging.info("Starting color bars")
colorbars = run(colorq, FFSMPTE, "colorbars")
colorbarson = True
# If the feed is now available and colorbars is running the terminate colorbars and start main
elif (feed_available and colorbarson):
logging.info("Terminating colorbars")
colorbars.terminate()
print colorbars.poll()
colorbarson = False
mainencode = run(mainq, FFCMD, "mainencode")
elif (feed_available and not colorbarson):
mainencode = run(mainq, FFCMD, "mainencode")
# print activeCount()
print enumerate()
else: # got line
try:
while (not mainq.empty()):
line = mainq.get_nowait() # or q.get(timeout=.1)
if (STREAM_RUNNING in line):
logging.info("Stream has started " + RTMP_DEST)
mainencode_failedstarts = 0
logging.info(line)
except Empty:
pass
except KeyboardInterrupt:
mainencode.terminate()
raise
# ... do something with line
time.sleep(CHECK_WAIT)
# p.communicate(input="q")
# p.terminate()
sys.exit()
| |
# GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Base class for all wrappers in all backends"""
from __future__ import unicode_literals
from __future__ import print_function
import abc
import locale
import re
import sys
import six
try:
from PIL import ImageGrab
except ImportError:
ImageGrab = None
from time import sleep
from .actionlogger import ActionLogger
from .mouse import _get_cursor_pos
from .timings import TimeoutError
from .timings import Timings
from .timings import wait_until
#=========================================================================
def remove_non_alphanumeric_symbols(s):
"""Make text usable for attribute name"""
return re.sub(r"\W", "_", s)
#=========================================================================
class InvalidElement(RuntimeError):
"""Raises when an invalid element is passed"""
pass
#=========================================================================
class ElementNotEnabled(RuntimeError):
"""Raised when an element is not enabled"""
pass
#=========================================================================
class ElementNotVisible(RuntimeError):
"""Raised when an element is not visible"""
pass
#=========================================================================
class ElementNotActive(RuntimeError):
"""Raised when an element is not active"""
pass
#=========================================================================
@six.add_metaclass(abc.ABCMeta)
class BaseMeta(abc.ABCMeta):
"""Abstract metaclass for Wrapper objects"""
@staticmethod
def find_wrapper(element):
"""Abstract static method to find an appropriate wrapper"""
raise NotImplementedError()
#=========================================================================
@six.add_metaclass(BaseMeta)
class BaseWrapper(object):
"""
Abstract wrapper for elements.
All other wrappers are derived from this.
"""
# Properties required for _MetaWrapper class
friendlyclassname = None
windowclasses = []
# Properties that describe type of the element
can_be_label = False
has_title = True
#------------------------------------------------------------
def __new__(cls, element_info, active_backend):
return BaseWrapper._create_wrapper(cls, element_info, BaseWrapper)
#------------------------------------------------------------
@staticmethod
def _create_wrapper(cls_spec, element_info, myself):
"""Create a wrapper object according to the specified element info"""
# only use the meta class to find the wrapper for BaseWrapper
# so allow users to force the wrapper if they want
if cls_spec != myself:
obj = object.__new__(cls_spec)
obj.__init__(element_info)
return obj
new_class = cls_spec.find_wrapper(element_info)
obj = object.__new__(new_class)
obj.__init__(element_info)
return obj
#------------------------------------------------------------
def __init__(self, element_info, active_backend):
"""
Initialize the element
* **element_info** is instance of int or one of ElementInfo childs
"""
self.backend = active_backend
if element_info:
#if isinstance(element_info, six.integer_types):
# element_info = self.backend.element_info_class(element_info)
self._element_info = element_info
self.handle = self._element_info.handle
self._as_parameter_ = self.handle
self.ref = None
self.appdata = None
self._cache = {}
self.actions = ActionLogger()
else:
raise RuntimeError('NULL pointer was used to initialize BaseWrapper')
def by(self, **criteria):
"""
Create WindowSpecification for search in descendants by criteria
Current wrapper object is used as a parent while searching in the subtree.
"""
from .base_application import WindowSpecification
# default to non top level windows because we are usually
# looking for a control
if 'top_level_only' not in criteria:
criteria['top_level_only'] = False
criteria['backend'] = self.backend.name
criteria['parent'] = self.element_info
child_specification = WindowSpecification(criteria)
return child_specification
def __repr_texts(self):
"""Internal common method to be called from __str__ and __repr__"""
module = self.__class__.__module__
module = module[module.rfind('.') + 1:]
type_name = module + "." + self.__class__.__name__
title = self.window_text()
class_name = self.friendly_class_name()
if six.PY2:
if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding is not None:
# some frameworks override sys.stdout without encoding attribute (Tee Stream),
# some users replace sys.stdout with file descriptor which can have None encoding
title = title.encode(sys.stdout.encoding, errors='backslashreplace')
else:
title = title.encode(locale.getpreferredencoding(), errors='backslashreplace')
return type_name, title, class_name
def __repr__(self):
"""Representation of the wrapper object
The method prints the following info:
* type name as a module name and a class name of the object
* title of the control or empty string
* friendly class name of the control
* unique ID of the control calculated as a hash value from a backend specific ID.
Notice that the reported title and class name can be used as hints to prepare
a windows specification to access the control, while the unique ID is more for
debugging purposes helping to distinguish between the runtime objects.
"""
type_name, title, class_name = self.__repr_texts()
if six.PY2:
return b"<{0} - '{1}', {2}, {3}>".format(type_name, title, class_name, self.__hash__())
else:
return "<{0} - '{1}', {2}, {3}>".format(type_name, title, class_name, self.__hash__())
def __str__(self):
"""Pretty print representation of the wrapper object
The method prints the following info:
* type name as a module name and class name of the object
* title of the wrapped control or empty string
* friendly class name of the wrapped control
Notice that the reported title and class name can be used as hints
to prepare a window specification to access the control
"""
type_name, title, class_name = self.__repr_texts()
if six.PY2:
return b"{0} - '{1}', {2}".format(type_name, title, class_name)
else:
return "{0} - '{1}', {2}".format(type_name, title, class_name)
#------------------------------------------------------------
@property
def writable_props(self):
"""
Build the list of the default properties to be written.
Derived classes may override or extend this list depending
on how much control they need.
"""
props = ['class_name',
'friendly_class_name',
'texts',
'control_id',
'rectangle',
'is_visible',
'is_enabled',
'control_count',
]
return props
#------------------------------------------------------------
@property
def _needs_image_prop(self):
"""Specify whether we need to grab an image of ourselves
when asked for properties.
"""
return False
#------------------------------------------------------------
@property
def element_info(self):
"""Read-only property to get **ElementInfo** object"""
return self._element_info
#------------------------------------------------------------
def from_point(self, x, y):
"""Get wrapper object for element at specified screen coordinates (x, y)"""
element_info = self.backend.element_info_class.from_point(x, y)
return self.backend.generic_wrapper_class(element_info)
#------------------------------------------------------------
def top_from_point(self, x, y):
"""Get wrapper object for top level element at specified screen coordinates (x, y)"""
top_element_info = self.backend.element_info_class.top_from_point(x, y)
return self.backend.generic_wrapper_class(top_element_info)
#------------------------------------------------------------
def get_active(self):
"""Get wrapper object for active element"""
element_info = self.backend.element_info_class.get_active()
return self.backend.generic_wrapper_class(element_info)
#------------------------------------------------------------
def friendly_class_name(self):
"""
Return the friendly class name for the control
This differs from the class of the control in some cases.
class_name() is the actual 'Registered' element class of the control
while friendly_class_name() is hopefully something that will make
more sense to the user.
For example Checkboxes are implemented as Buttons - so the class
of a CheckBox is "Button" - but the friendly class is "CheckBox"
"""
if self.friendlyclassname is None:
self.friendlyclassname = self.class_name()
return self.friendlyclassname
#------------------------------------------------------------
def class_name(self):
"""Return the class name of the elenemt"""
return self.element_info.class_name
#------------------------------------------------------------
def window_text(self):
"""
Window text of the element
Quite a few contorls have other text that is visible, for example
Edit controls usually have an empty string for window_text but still
have text displayed in the edit window.
"""
return self.element_info.rich_text
#------------------------------------------------------------
def control_id(self):
"""
Return the ID of the element
Only controls have a valid ID - dialogs usually have no ID assigned.
The ID usually identified the control in the window - but there can
be duplicate ID's for example lables in a dialog may have duplicate
ID's.
"""
return self.element_info.control_id
#------------------------------------------------------------
def is_visible(self):
"""
Whether the element is visible or not
Checks that both the top level parent (probably dialog) that
owns this element and the element itself are both visible.
If you want to wait for an element to become visible (or wait
for it to become hidden) use ``BaseWrapper.wait_visible()`` or
``BaseWrapper.wait_not_visible()``.
If you want to raise an exception immediately if an element is
not visible then you can use the ``BaseWrapper.verify_visible()``.
``BaseWrapper.verify_actionable()`` raises if the element is not both
visible and enabled.
"""
return self.element_info.visible #and self.top_level_parent().element_info.visible
#------------------------------------------------------------
def is_enabled(self):
"""
Whether the element is enabled or not
Checks that both the top level parent (probably dialog) that
owns this element and the element itself are both enabled.
If you want to wait for an element to become enabled (or wait
for it to become disabled) use ``BaseWrapper.wait_enabled()`` or
``BaseWrapper.wait_not_enabled()``.
If you want to raise an exception immediately if an element is
not enabled then you can use the ``BaseWrapper.verify_enabled()``.
``BaseWrapper.VerifyReady()`` raises if the window is not both
visible and enabled.
"""
return self.element_info.enabled #and self.top_level_parent().element_info.enabled
# ------------------------------------------------------------
def is_active(self):
"""
Whether the element is active or not
Checks that both the top level parent (probably dialog) that
owns this element and the element itself are both active.
If you want to wait for an element to become active (or wait
for it to become not active) use ``BaseWrapper.wait_active()`` or
``BaseWrapper.wait_not_active()``.
If you want to raise an exception immediately if an element is
not active then you can use the ``BaseWrapper.verify_active()``.
"""
return self.element_info.active
# -----------------------------------------------------------
def was_maximized(self):
"""Indicate whether the window was maximized before minimizing or not"""
raise NotImplementedError
#------------------------------------------------------------
def rectangle(self):
"""
Return the rectangle of element
The rectangle() is the rectangle of the element on the screen.
Coordinates are given from the top left of the screen.
This method returns a RECT structure, Which has attributes - top,
left, right, bottom. and has methods width() and height().
See win32structures.RECT for more information.
"""
return self.element_info.rectangle
#------------------------------------------------------------
def client_to_screen(self, client_point):
"""Maps point from client to screen coordinates"""
# Use a direct call to element_info.rectangle instead of self.rectangle
# because the latter can be overriden in one of derived wrappers
# (see _treeview_element.rectangle or _listview_item.rectangle)
rect = self.element_info.rectangle
return (client_point[0] + rect.left, client_point[1] + rect.top)
#-----------------------------------------------------------
def process_id(self):
"""Return the ID of process that owns this window"""
return self.element_info.process_id
#-----------------------------------------------------------
def is_dialog(self):
"""Return True if the control is a top level window"""
if self.parent():
return self == self.top_level_parent()
else:
return False
#-----------------------------------------------------------
def parent(self):
"""
Return the parent of this element
Note that the parent of a control is not necesarily a dialog or
other main window. A group box may be the parent of some radio
buttons for example.
To get the main (or top level) window then use
BaseWrapper.top_level_parent().
"""
parent_elem = self.element_info.parent
if parent_elem:
return self.backend.generic_wrapper_class(parent_elem)
else:
return None
#-----------------------------------------------------------
def root(self):
"""Return wrapper for root element (desktop)"""
return self.backend.generic_wrapper_class(self.backend.element_info_class())
#-----------------------------------------------------------
def top_level_parent(self):
"""
Return the top level window of this control
The TopLevel parent is different from the parent in that the parent
is the element that owns this element - but it may not be a dialog/main
window. For example most Comboboxes have an Edit. The ComboBox is the
parent of the Edit control.
This will always return a valid window element (if the control has
no top level parent then the control itself is returned - as it is
a top level window already!)
"""
if not ("top_level_parent" in self._cache.keys()):
self._cache["top_level_parent"] = self.backend.generic_wrapper_class(self.element_info.top_level_parent)
return self._cache["top_level_parent"]
#-----------------------------------------------------------
def texts(self):
"""
Return the text for each item of this control
It is a list of strings for the control. It is frequently overridden
to extract all strings from a control with multiple items.
It is always a list with one or more strings:
* The first element is the window text of the control
* Subsequent elements contain the text of any items of the
control (e.g. items in a listbox/combobox, tabs in a tabcontrol)
"""
texts_list = [self.window_text(), ]
return texts_list
#-----------------------------------------------------------
def children(self, **kwargs):
"""
Return the children of this element as a list
It returns a list of BaseWrapper (or subclass) instances.
An empty list is returned if there are no children.
"""
child_elements = self.element_info.children(**kwargs)
return [self.backend.generic_wrapper_class(element_info) for element_info in child_elements]
#-----------------------------------------------------------
def iter_children(self, **kwargs):
"""
Iterate over the children of this element
It returns a generator of BaseWrapper (or subclass) instances.
"""
child_elements = self.element_info.iter_children(**kwargs)
for element_info in child_elements:
yield self.backend.generic_wrapper_class(element_info)
#-----------------------------------------------------------
def descendants(self, **kwargs):
"""
Return the descendants of this element as a list
It returns a list of BaseWrapper (or subclass) instances.
An empty list is returned if there are no descendants.
"""
desc_elements = self.element_info.descendants(**kwargs)
return [self.backend.generic_wrapper_class(element_info) for element_info in desc_elements]
#-----------------------------------------------------------
def iter_descendants(self, **kwargs):
"""
Iterate over the descendants of this element
It returns a generator of BaseWrapper (or subclass) instances.
"""
desc_elements = self.element_info.iter_descendants(**kwargs)
for element_info in desc_elements:
yield self.backend.generic_wrapper_class(element_info)
#-----------------------------------------------------------
def control_count(self):
"""Return the number of children of this control"""
return len(self.element_info.children(process=self.process_id()))
#-----------------------------------------------------------
def capture_as_image(self, rect=None):
"""
Return a PIL image of the control.
See PIL documentation to know what you can do with the resulting
image.
"""
control_rectangle = self.rectangle()
if not (control_rectangle.width() and control_rectangle.height()):
return None
# PIL is optional so check first
if not ImageGrab:
print("PIL does not seem to be installed. "
"PIL is required for capture_as_image")
self.actions.log("PIL does not seem to be installed. "
"PIL is required for capture_as_image")
return None
if rect:
control_rectangle = rect
# get the control rectangle in a way that PIL likes it
left = control_rectangle.left
right = control_rectangle.right
top = control_rectangle.top
bottom = control_rectangle.bottom
box = (left, top, right, bottom)
# TODO: maybe check the number of monitors on Linux
# grab the image and get raw data as a string
return ImageGrab.grab(box)
#-----------------------------------------------------------
def get_properties(self):
"""Return the properties of the control as a dictionary."""
props = {}
# for each of the properties that can be written out
for propname in self.writable_props:
# set the item in the props dictionary keyed on the propname
props[propname] = getattr(self, propname)()
if self._needs_image_prop:
props["image"] = self.capture_as_image()
return props
#-----------------------------------------------------------
def draw_outline(
self,
colour='green',
thickness=2,
fill=None,
rect=None):
"""
Draw an outline around the window.
* **colour** can be either an integer or one of 'red', 'green', 'blue'
(default 'green')
* **thickness** thickness of rectangle (default 2)
* **fill** how to fill in the rectangle (default BS_NULL)
* **rect** the coordinates of the rectangle to draw (defaults to
the rectangle of the control)
"""
raise NotImplementedError()
#-----------------------------------------------------------
def is_child(self, parent):
"""
Return True if this element is a child of 'parent'.
An element is a child of another element when it is a direct of the
other element. An element is a direct descendant of a given
element if the parent element is the the chain of parent elements
for the child element.
"""
return self in parent.children(class_name = self.class_name())
# ------------------------------------------------------------
def __hash__(self):
"""Return a unique hash value based on the element's handle"""
return self.element_info.__hash__()
#-----------------------------------------------------------
def __eq__(self, other):
"""Return True if 2 BaseWrapper's describe 1 actual element"""
if hasattr(other, "element_info"):
return self.element_info == other.element_info
else:
return self.element_info == other
#-----------------------------------------------------------
def __ne__(self, other):
"""Return False if the elements described by 2 BaseWrapper's are different"""
return not self == other
#-----------------------------------------------------------
def verify_actionable(self):
"""
Verify that the element is both visible and enabled
Raise either ElementNotEnalbed or ElementNotVisible if not
enabled or visible respectively.
"""
self.wait_for_idle()
self.verify_visible()
self.verify_enabled()
#-----------------------------------------------------------
def verify_enabled(self):
"""
Verify that the element is enabled
Check first if the element's parent is enabled (skip if no parent),
then check if element itself is enabled.
"""
if not self.is_enabled():
raise ElementNotEnabled()
#-----------------------------------------------------------
def verify_visible(self):
"""
Verify that the element is visible
Check first if the element's parent is visible. (skip if no parent),
then check if element itself is visible.
"""
if not self.is_visible():
raise ElementNotVisible()
# -----------------------------------------------------------
def verify_active(self):
"""
Verify that the element is active
Check first if the element's parent is active. (skip if no parent),
then check if element itself is active.
"""
if not self.is_active():
raise ElementNotActive()
#-----------------------------------------------------------
def click_input(
self,
button = "left",
coords = (None, None),
button_down = True,
button_up = True,
double = False,
wheel_dist = 0,
use_log = True,
pressed = "",
absolute = False,
key_down = True,
key_up = True,
fast_move = False):
"""Click at the specified coordinates
* **button** The mouse button to click. One of 'left', 'right',
'middle' or 'x' (Default: 'left', 'move' is a special case)
* **coords** The coordinates to click at.(Default: the center of the control)
* **double** Whether to perform a double click or not (Default: False)
* **wheel_dist** The distance to move the mouse wheel (default: 0)
NOTES:
This is different from click method in that it requires the control
to be visible on the screen but performs a more realistic 'click'
simulation.
This method is also vulnerable if the mouse is moved by the user
as that could easily move the mouse off the control before the
click_input has finished.
"""
raise NotImplementedError()
#-----------------------------------------------------------
def double_click_input(self, button ="left", coords = (None, None)):
"""Double click at the specified coordinates"""
self.click_input(button, coords, double=True)
#-----------------------------------------------------------
def right_click_input(self, coords = (None, None)):
"""Right click at the specified coords"""
self.click_input(button='right', coords=coords)
#-----------------------------------------------------------
def press_mouse_input(
self,
button = "left",
coords = (None, None),
pressed = "",
absolute = True,
key_down = True,
key_up = True
):
"""Press a mouse button using SendInput"""
self.click_input(
button=button,
coords=coords,
button_down=True,
button_up=False,
pressed=pressed,
absolute=absolute,
key_down=key_down,
key_up=key_up
)
#-----------------------------------------------------------
def release_mouse_input(
self,
button = "left",
coords = (None, None),
pressed = "",
absolute = True,
key_down = True,
key_up = True
):
"""Release the mouse button"""
self.click_input(
button,
coords,
button_down=False,
button_up=True,
pressed=pressed,
absolute=absolute,
key_down=key_down,
key_up=key_up
)
#-----------------------------------------------------------
def move_mouse_input(self, coords=(0, 0), pressed="", absolute=True, duration=0.0):
"""Move the mouse"""
if not absolute:
self.actions.log('Moving mouse to relative (client) coordinates ' + str(coords).replace('\n', ', '))
coords = self.client_to_screen(coords) # make coords absolute
if not isinstance(duration, float):
raise TypeError("duration must be float (in seconds)")
minimum_duration = 0.05
if duration >= minimum_duration:
x_start, y_start = _get_cursor_pos()
delta_x = coords[0] - x_start
delta_y = coords[1] - y_start
max_delta = max(abs(delta_x), abs(delta_y))
num_steps = max_delta
sleep_amount = duration / max(num_steps, 1)
if sleep_amount < minimum_duration:
num_steps = int(num_steps * sleep_amount / minimum_duration)
sleep_amount = minimum_duration
delta_x /= max(num_steps, 1)
delta_y /= max(num_steps, 1)
for step in range(num_steps):
self.click_input(button='move',
coords=(x_start + int(delta_x * step), y_start + int(delta_y * step)),
absolute=True, pressed=pressed, fast_move=True)
sleep(sleep_amount)
self.click_input(button='move', coords=coords, absolute=True, pressed=pressed)
self.wait_for_idle()
return self
# -----------------------------------------------------------
def _calc_click_coords(self):
"""A helper that tries to get click coordinates of the control
The calculated coordinates are absolute and returned as
a tuple with x and y values.
"""
coords = self.rectangle().mid_point()
return (coords.x, coords.y)
# -----------------------------------------------------------
def drag_mouse_input(self,
dst=(0, 0),
src=None,
button="left",
pressed="",
absolute=True,
duration=0.0):
"""Click on **src**, drag it and drop on **dst**
* **dst** is a destination wrapper object or just coordinates.
* **src** is a source wrapper object or coordinates.
If **src** is None the self is used as a source object.
* **button** is a mouse button to hold during the drag.
It can be "left", "right", "middle" or "x"
* **pressed** is a key on the keyboard to press during the drag.
* **absolute** specifies whether to use absolute coordinates
for the mouse pointer locations
"""
raise NotImplementedError()
#-----------------------------------------------------------
def wheel_mouse_input(self, coords = (None, None), wheel_dist = 1, pressed =""):
"""Do mouse wheel"""
self.click_input(button='wheel', coords=coords, wheel_dist=wheel_dist, pressed=pressed)
return self
#-----------------------------------------------------------
def wait_for_idle(self):
"""Backend specific function to wait for idle state of a thread or a window"""
pass # do nothing by deafault
# TODO: implement wait_for_idle for backend="uia"
#-----------------------------------------------------------
def type_keys(
self,
keys,
pause = None,
with_spaces = False,
with_tabs = False,
with_newlines = False,
turn_off_numlock = True,
set_foreground = True,
vk_packet = True):
"""
Type keys to the element using keyboard.send_keys
This uses the re-written keyboard_ python module where you can
find documentation on what to use for the **keys**.
.. _keyboard: pywinauto.keyboard.html
"""
raise NotImplementedError()
#-----------------------------------------------------------
def set_focus(self):
"""Set the focus to this element"""
pass
# -----------------------------------------------------------
def wait_visible(self, timeout, retry_interval):
"""
Wait until control is visible.
:param timeout: Raise an :func:`pywinauto.timings.TimeoutError` if the window
is not visible after this number of seconds.
Default: :py:attr:`pywinauto.timings.Timings.window_find_timeout`.
:param retry_interval: How long to sleep between each retry.
Default: :py:attr:`pywinauto.timings.Timings.window_find_retry`.
"""
if timeout is None:
timeout = Timings.window_find_timeout
if retry_interval is None:
retry_interval = Timings.window_find_retry
try:
wait_until(timeout, retry_interval, self.is_visible)
return self
except TimeoutError as e:
raise e
# -----------------------------------------------------------
def wait_not_visible(self, timeout, retry_interval):
"""
Wait until control is not visible.
:param timeout: Raise an :func:`pywinauto.timings.TimeoutError` if the window
is still visible after this number of seconds.
Default: :py:attr:`pywinauto.timings.Timings.window_find_timeout`.
:param retry_interval: How long to sleep between each retry.
Default: :py:attr:`pywinauto.timings.Timings.window_find_retry`.
"""
if timeout is None:
timeout = Timings.window_find_timeout
if retry_interval is None:
retry_interval = Timings.window_find_retry
try:
wait_until(timeout, retry_interval, self.is_visible, False)
except TimeoutError as e:
raise e
# -----------------------------------------------------------
def wait_enabled(self, timeout, retry_interval):
"""
Wait until control is enabled.
:param timeout: Raise an :func:`pywinauto.timings.TimeoutError` if the window
is not enabled after this number of seconds.
Default: :py:attr:`pywinauto.timings.Timings.window_find_timeout`.
:param retry_interval: How long to sleep between each retry.
Default: :py:attr:`pywinauto.timings.Timings.window_find_retry`.
"""
if timeout is None:
timeout = Timings.window_find_timeout
if retry_interval is None:
retry_interval = Timings.window_find_retry
try:
wait_until(timeout, retry_interval, self.is_enabled)
return self
except TimeoutError as e:
raise e
# -----------------------------------------------------------
def wait_not_enabled(self, timeout, retry_interval):
"""
Wait until control is not enabled.
:param timeout: Raise an :func:`pywinauto.timings.TimeoutError` if the window
is still enabled after this number of seconds.
Default: :py:attr:`pywinauto.timings.Timings.window_find_timeout`.
:param retry_interval: How long to sleep between each retry.
Default: :py:attr:`pywinauto.timings.Timings.window_find_retry`.
"""
if timeout is None:
timeout = Timings.window_find_timeout
if retry_interval is None:
retry_interval = Timings.window_find_retry
try:
wait_until(timeout, retry_interval, self.is_enabled, False)
except TimeoutError as e:
raise e
# -----------------------------------------------------------
def wait_active(self, timeout, retry_interval):
"""
Wait until control is active.
:param timeout: Raise an :func:`pywinauto.timings.TimeoutError` if the window
is not active after this number of seconds.
Default: :py:attr:`pywinauto.timings.Timings.window_find_timeout`.
:param retry_interval: How long to sleep between each retry.
Default: :py:attr:`pywinauto.timings.Timings.window_find_retry`.
"""
if timeout is None:
timeout = Timings.window_find_timeout
if retry_interval is None:
retry_interval = Timings.window_find_retry
try:
wait_until(timeout, retry_interval, self.is_active)
return self
except TimeoutError as e:
raise e
# -----------------------------------------------------------
def wait_not_active(self, timeout, retry_interval):
"""
:param timeout: Raise an :func:`pywinauto.timings.TimeoutError` if the window
is still active after this number of seconds.
Default: :py:attr:`pywinauto.timings.Timings.window_find_timeout`.
:param retry_interval: How long to sleep between each retry.
Default: :py:attr:`pywinauto.timings.Timings.window_find_retry`.
"""
if timeout is None:
timeout = Timings.window_find_timeout
if retry_interval is None:
retry_interval = Timings.window_find_retry
try:
wait_until(timeout, retry_interval, self.is_active, False)
except TimeoutError as e:
raise e
#====================================================================
| |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CauliflowerVest glue code module."""
import logging
import os
import plistlib
from cauliflowervest.client import util
from cauliflowervest.client.mac import apfs
from cauliflowervest.client.mac import corestorage
ENCRYPTION_SUCCESS_MESSAGE = (
'Encryption enabled and passphrase escrowed successfully.\n\n'
'Your computer will now reboot to start encryption!')
ENCRYPTION_FAILED_MESSAGE = ('Encryption was not enabled. Please try again.')
ESCROW_FAILED_MESSAGE = (
'Encryption was enabled, but escrowing the recovery passphrase failed.\n\n'
'Please reboot, manually disable FileVault in '
'System Preferences -> Security & Privacy, '
'wait for decryption to complete, reboot again, and run CauliflowerVest again.\n')
FDESETUP_PATH = '/usr/bin/fdesetup'
DISKUTIL = '/usr/sbin/diskutil'
DEFAULTS = '/usr/bin/defaults'
class Error(Exception):
"""Base error."""
class InputError(Error):
"""Base class for all errors raised for invalid user input."""
class OptionError(Error):
"""Base class for all errors raised by options."""
class FileVaultTool(object):
"""Abstract base class for FileVault tools.
Child classes should implement the _GetCommand and _GetStdin methods, as well
as defining values for the attribute list below.
Attributes:
NAME: The name of the tool.
PATH: The filesystem path where the tool's binary is located.
RETURN_AUTH_FAIL: The int return code used to indicate an authentication
failure when invoking the tool.
OUTPUT_PLIST_TOKEN_KEY: The key in the output plist dict for the recovery
token.
"""
def __init__(self, username, password):
self._username = username
self._password = password
def EnableEncryption(self):
"""Enable full disk encryption.
Returns:
A 2-tuple containing the encrypted volume's UUID, and the recovery token.
"""
try:
result_plist = util.GetPlistFromExec(
self._GetCommand(), stdin=self._GetStdin())
except util.ExecError as e:
self._HandleFailure(e)
return self._HandleResult(result_plist)
def _GetCommand(self):
"""Return the sequence of args used to invoke the tool in a subprocess."""
raise NotImplementedError
def _GetStdin(self):
"""Returns the str data to be passed to the tool subprocess on stdin."""
raise NotImplementedError
def _HandleFailure(self, e):
if e.returncode == self.RETURN_AUTH_FAIL:
raise InputError(
'Authentication problem with local account. Drive NOT encrypted.')
elif e.returncode != 0:
logging.error('%s failed with stderr:\n%s', self.NAME, e.stderr)
raise Error(
'Problem running %s (exit status = %d)' % (self.NAME, e.returncode))
else:
logging.exception('Problem running %s.', self.NAME)
raise Error('Problem running %s' % self.NAME, e)
def _HandleResult(self, result_plist):
"""Parse the (plist) output of a FileVault tool."""
recovery_token = result_plist.get(self.OUTPUT_PLIST_TOKEN_KEY)
if not recovery_token:
raise Error('Could not get recovery token!')
volume_uuid = result_plist.get('LVUUID', None)
if not volume_uuid:
raise Error('Could not get volume UUID!')
return volume_uuid, recovery_token
class FullDiskEncryptionSetup(FileVaultTool):
"""The Full Disk Encryption Setup (fdesetup) FileVault tool."""
NAME = 'fdesetup'
PATH = '/usr/bin/fdesetup'
RETURN_AUTH_FAIL = 11
OUTPUT_PLIST_TOKEN_KEY = 'RecoveryKey'
def _GetCommand(self):
if not os.path.exists(FDESETUP_PATH):
raise Error('unsupported OS X version (10.7 (Lion) and below)')
return ('sudo', '-k', '-S', FDESETUP_PATH, 'enable', '-user',
self._username, '-outputplist', '-inputplist')
def _GetStdin(self):
# We first need to return the password followed by a newline for the 'sudo'
# authentication, and then a plist containing the password in a dictionary
# for 'fdesetup'. (fdesetup either reads the password directly from a tty,
# or from stdin when passed the '-inputplist' flag.)
input_plist = plistlib.writePlistToString({
'Username': self._username,
'Password': self._password,
})
if os.getuid() == 0:
return input_plist
return '%s\n%s' % (self._password, input_plist)
class APFSDiskEncryptionSetup(FullDiskEncryptionSetup):
"""The Full Disk Encryption Setup (fdesetup) FileVault tool."""
def _HandleResult(self, result_plist):
"""Parse the (plist) output of a FileVault tool."""
recovery_token = result_plist.get(self.OUTPUT_PLIST_TOKEN_KEY)
if not recovery_token:
raise Error('Could not get recovery token!')
hardware_uuid = result_plist.get('HardwareUUID', None) # sanity check
if not hardware_uuid:
raise Error('Could not get Hardware UUID!')
volume_uuid = apfs.APFSStorage().GetPrimaryVolumeUUID()
if not volume_uuid:
raise Error('Could not get Volume UUID!')
return volume_uuid, recovery_token
def UpdateEscrowPassphrase(username, password):
"""Change the FileVault2 recovery key.
Under CoreStorage, the current recovery key could be used as the password to
fdesetup, but this does not work with APFS. Since the user password works in
both cases and we already require it for sudo, we simply default to that.
Args:
username: the name of the FileVault user.
password: the user password.
Returns:
The new recovery key.
"""
command = ('sudo', '-k', '-S', FDESETUP_PATH, 'changerecovery', '-personal',
'-outputplist', '-inputplist')
stdin = plistlib.writePlistToString({
'Username': username,
'Password': password,
})
if os.getuid() != 0:
stdin = '%s\n%s' % (password, stdin)
try:
result_plist = util.GetPlistFromExec(command, stdin=stdin)
except util.ExecError as e:
logging.error(e.stderr)
raise Error(e.message)
recovery_key = result_plist.get('RecoveryKey')
return recovery_key
def ApplyEncryption(fvclient, username, password):
"""Turn encryption on."""
# Supply entropy to the system.
try:
entropy = util.RetrieveEntropy()
util.SupplyEntropy(entropy)
except util.EntropyError as e:
raise Error('Entropy operations failed: %s' % str(e))
# Use "fdesetup" on Mac OS 10.8+ (Mountain Lion).
logging.debug('Using fdesetup to enable FileVault')
fstype = GetFilesystemType()
if fstype == 'apfs':
tool = APFSDiskEncryptionSetup(username, password)
else:
tool = FullDiskEncryptionSetup(username, password)
volume_uuid, recovery_token = tool.EnableEncryption()
fvclient.SetOwner(username)
return volume_uuid, recovery_token
def CheckEncryptionPreconditions():
# FileVault2 depends on the presence of a Recovery Partition to
# enable encryption.
storage = GetStorage()
recovery = storage.GetRecoveryPartition()
if not recovery:
raise OptionError('Recovery partition must exist.')
# We can't get a recovery passphrase if a keychain is in place.
if os.path.exists('/Library/Keychains/FileVaultMaster.keychain'):
raise OptionError(
'This tool cannot operate with a FileVaultMaster keychain in place.')
def GetFilesystemType():
try:
plist = util.GetPlistFromExec((DISKUTIL, 'info', '-plist', '/'))
except util.ExecError:
logging.error('FilesystemType lookup failed. Defaulting to hfs')
return 'hfs'
fstype = plist.get('FilesystemType', None)
return fstype
def GetStorage():
"""Return the module to handle this file system on this machine.
Returns:
APFSStorage or CoreStorage object.
"""
fstype = GetFilesystemType()
if fstype == 'apfs':
return apfs.APFSStorage()
elif fstype == 'hfs':
return corestorage.CoreStorage()
return corestorage.CoreStorage() # default to core storage
| |
## @package convnet_benchmarks
# Module caffe2.python.convnet_benchmarks
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
"""
Benchmark for common convnets.
Speed on Titan X, with 10 warmup steps and 10 main steps and with different
versions of cudnn, are as follows (time reported below is per-batch time,
forward / forward+backward):
CuDNN V3 CuDNN v4
AlexNet 32.5 / 108.0 27.4 / 90.1
OverFeat 113.0 / 342.3 91.7 / 276.5
Inception 134.5 / 485.8 125.7 / 450.6
VGG (batch 64) 200.8 / 650.0 164.1 / 551.7
Speed on Inception with varied batch sizes and CuDNN v4 is as follows:
Batch Size Speed per batch Speed per image
16 22.8 / 72.7 1.43 / 4.54
32 38.0 / 127.5 1.19 / 3.98
64 67.2 / 233.6 1.05 / 3.65
128 125.7 / 450.6 0.98 / 3.52
Speed on Tesla M40, which 10 warmup steps and 10 main steps and with cudnn
v4, is as follows:
AlexNet 68.4 / 218.1
OverFeat 210.5 / 630.3
Inception 300.2 / 1122.2
VGG (batch 64) 405.8 / 1327.7
(Note that these numbers involve a "full" backprop, i.e. the gradient
with respect to the input image is also computed.)
To get the numbers, simply run:
for MODEL in AlexNet OverFeat Inception; do
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size 128 --model $MODEL --forward_only True
done
for MODEL in AlexNet OverFeat Inception; do
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size 128 --model $MODEL
done
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size 64 --model VGGA --forward_only True
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size 64 --model VGGA
for BS in 16 32 64 128; do
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size $BS --model Inception --forward_only True
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size $BS --model Inception
done
Note that VGG needs to be run at batch 64 due to memory limit on the backward
pass.
"""
import argparse
from caffe2.python import brew, cnn, workspace
from caffe2.python.model_helper import ModelHelper
from caffe2.python.models import resnet
import numpy as np
def MLP(order, cudnn_ws, mkl):
model = ModelHelper(name="benchmark")
d = 256
depth = 20
width = 3
for i in range(depth):
for j in range(width):
current = "fc_{}_{}".format(i, j) if i > 0 else "data"
next_ = "fc_{}_{}".format(i + 1, j)
brew.fc(
model,
current, next_,
dim_in=d, dim_out=d,
weight_init=('XavierFill', {}),
bias_init=('XavierFill', {}))
brew.sum(model, ["fc_{}_{}".format(depth, j) for j in range(width)], ["sum"])
brew.fc(model, "sum", "last",
dim_in=d, dim_out=1000,
weight_init=('XavierFill', {}),
bias_init=('XavierFill', {}))
xent = model.LabelCrossEntropy(["last", "label"], "xent")
if not mkl:
model.AveragedLoss(xent, "loss")
return model, d
def ResNet50(order, cudnn_ws, mkl):
my_arg_scope = {'order': order, 'use_cudnn': True,
'cudnn_exhaustive_search': True,
'ws_nbytes_limit': str(cudnn_ws)}
model = ModelHelper(name="alexnet", arg_scope=my_arg_scope)
resnet.create_resnet50(model, "data", 3, 1000, is_test=True,
final_avg_kernel=14)
return model, 448
def AlexNet(order, cudnn_ws, mkl):
my_arg_scope = {'order': order, 'use_cudnn': True,
'cudnn_exhaustive_search': True,
'ws_nbytes_limit': str(cudnn_ws)}
model = ModelHelper(name="alexnet", arg_scope=my_arg_scope)
conv1 = brew.conv(
model,
"data",
"conv1",
3,
64,
11,
('XavierFill', {}),
('ConstantFill', {}),
stride=4,
pad=2
)
relu1 = brew.relu(model, conv1, "conv1")
pool1 = brew.max_pool(model, relu1, "pool1", kernel=3, stride=2)
conv2 = brew.conv(
model,
pool1,
"conv2",
64,
192,
5,
('XavierFill', {}),
('ConstantFill', {}),
pad=2
)
relu2 = brew.relu(model, conv2, "conv2")
pool2 = brew.max_pool(model, relu2, "pool2", kernel=3, stride=2)
conv3 = brew.conv(
model,
pool2,
"conv3",
192,
384,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu3 = brew.relu(model, conv3, "conv3")
conv4 = brew.conv(
model,
relu3,
"conv4",
384,
256,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu4 = brew.relu(model, conv4, "conv4")
conv5 = brew.conv(
model,
relu4,
"conv5",
256,
256,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu5 = brew.relu(model, conv5, "conv5")
pool5 = brew.max_pool(model, relu5, "pool5", kernel=3, stride=2)
fc6 = brew.fc(
model, pool5, "fc6", 256 * 6 * 6, 4096, ('XavierFill', {}),
('ConstantFill', {})
)
relu6 = brew.relu(model, fc6, "fc6")
fc7 = brew.fc(
model, relu6, "fc7", 4096, 4096, ('XavierFill', {}), ('ConstantFill', {})
)
relu7 = brew.relu(model, fc7, "fc7")
fc8 = brew.fc(
model, relu7, "fc8", 4096, 1000, ('XavierFill', {}), ('ConstantFill', {})
)
pred = brew.softmax(model, fc8, "pred")
xent = model.LabelCrossEntropy([pred, "label"], "xent")
if not mkl:
loss = model.AveragedLoss(xent, "loss")
return model, 224
def OverFeat(order, cudnn_ws, mkl):
my_arg_scope = {'order': order, 'use_cudnn': True,
'cudnn_exhaustive_search': True,
'ws_nbytes_limit': str(cudnn_ws)}
model = ModelHelper(name='overfeat', arg_scope=my_arg_scope)
conv1 = brew.conv(
model,
"data",
"conv1",
3,
96,
11,
('XavierFill', {}),
('ConstantFill', {}),
stride=4
)
relu1 = brew.relu(model, conv1, "conv1")
pool1 = brew.max_pool(model, relu1, "pool1", kernel=2, stride=2)
conv2 = brew.conv(
model, pool1, "conv2", 96, 256, 5, ('XavierFill', {}), ('ConstantFill', {})
)
relu2 = brew.relu(model, conv2, "conv2")
pool2 = brew.max_pool(model, relu2, "pool2", kernel=2, stride=2)
conv3 = brew.conv(
model,
pool2,
"conv3",
256,
512,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu3 = brew.relu(model, conv3, "conv3")
conv4 = brew.conv(
model,
relu3,
"conv4",
512,
1024,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu4 = brew.relu(model, conv4, "conv4")
conv5 = brew.conv(
model,
relu4,
"conv5",
1024,
1024,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu5 = brew.relu(model, conv5, "conv5")
pool5 = brew.max_pool(model, relu5, "pool5", kernel=2, stride=2)
fc6 = brew.fc(
model, pool5, "fc6", 1024 * 6 * 6, 3072, ('XavierFill', {}),
('ConstantFill', {})
)
relu6 = brew.relu(model, fc6, "fc6")
fc7 = brew.fc(
model, relu6, "fc7", 3072, 4096, ('XavierFill', {}), ('ConstantFill', {})
)
relu7 = brew.relu(model, fc7, "fc7")
fc8 = brew.fc(
model, relu7, "fc8", 4096, 1000, ('XavierFill', {}), ('ConstantFill', {})
)
pred = brew.softmax(model, fc8, "pred")
xent = model.LabelCrossEntropy([pred, "label"], "xent")
if not mkl:
loss = model.AveragedLoss(xent, "loss")
return model, 231
def VGGA(order, cudnn_ws, mkl):
my_arg_scope = {'order': order, 'use_cudnn': True,
'cudnn_exhaustive_search': True,
'ws_nbytes_limit': str(cudnn_ws)}
model = ModelHelper(name='vgg-a', arg_scope=my_arg_scope)
conv1 = brew.conv(
model,
"data",
"conv1",
3,
64,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu1 = brew.relu(model, conv1, "conv1")
pool1 = brew.max_pool(model, relu1, "pool1", kernel=2, stride=2)
conv2 = brew.conv(
model,
pool1,
"conv2",
64,
128,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu2 = brew.relu(model, conv2, "conv2")
pool2 = brew.max_pool(model, relu2, "pool2", kernel=2, stride=2)
conv3 = brew.conv(
model,
pool2,
"conv3",
128,
256,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu3 = brew.relu(model, conv3, "conv3")
conv4 = brew.conv(
model,
relu3,
"conv4",
256,
256,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu4 = brew.relu(model, conv4, "conv4")
pool4 = brew.max_pool(model, relu4, "pool4", kernel=2, stride=2)
conv5 = brew.conv(
model,
pool4,
"conv5",
256,
512,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu5 = brew.relu(model, conv5, "conv5")
conv6 = brew.conv(
model,
relu5,
"conv6",
512,
512,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu6 = brew.relu(model, conv6, "conv6")
pool6 = brew.max_pool(model, relu6, "pool6", kernel=2, stride=2)
conv7 = brew.conv(
model,
pool6,
"conv7",
512,
512,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu7 = brew.relu(model, conv7, "conv7")
conv8 = brew.conv(
model,
relu7,
"conv8",
512,
512,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu8 = brew.relu(model, conv8, "conv8")
pool8 = brew.max_pool(model, relu8, "pool8", kernel=2, stride=2)
fcix = brew.fc(
model, pool8, "fcix", 512 * 7 * 7, 4096, ('XavierFill', {}),
('ConstantFill', {})
)
reluix = brew.relu(model, fcix, "fcix")
fcx = brew.fc(
model, reluix, "fcx", 4096, 4096, ('XavierFill', {}), ('ConstantFill', {})
)
relux = brew.relu(model, fcx, "fcx")
fcxi = brew.fc(
model, relux, "fcxi", 4096, 1000, ('XavierFill', {}), ('ConstantFill', {})
)
pred = brew.softmax(model, fcxi, "pred")
xent = model.LabelCrossEntropy([pred, "label"], "xent")
if not mkl:
loss = model.AveragedLoss(xent, "loss")
return model, 231
def _InceptionModule(
model, input_blob, input_depth, output_name, conv1_depth, conv3_depths,
conv5_depths, pool_depth
):
# path 1: 1x1 conv
conv1 = brew.conv(
model, input_blob, output_name + ":conv1", input_depth, conv1_depth, 1,
('XavierFill', {}), ('ConstantFill', {})
)
conv1 = brew.relu(model, conv1, conv1)
# path 2: 1x1 conv + 3x3 conv
conv3_reduce = brew.conv(
model, input_blob, output_name + ":conv3_reduce", input_depth,
conv3_depths[0], 1, ('XavierFill', {}), ('ConstantFill', {})
)
conv3_reduce = brew.relu(model, conv3_reduce, conv3_reduce)
conv3 = brew.conv(
model,
conv3_reduce,
output_name + ":conv3",
conv3_depths[0],
conv3_depths[1],
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
conv3 = brew.relu(model, conv3, conv3)
# path 3: 1x1 conv + 5x5 conv
conv5_reduce = brew.conv(
model, input_blob, output_name + ":conv5_reduce", input_depth,
conv5_depths[0], 1, ('XavierFill', {}), ('ConstantFill', {})
)
conv5_reduce = brew.relu(model, conv5_reduce, conv5_reduce)
conv5 = brew.conv(
model,
conv5_reduce,
output_name + ":conv5",
conv5_depths[0],
conv5_depths[1],
5,
('XavierFill', {}),
('ConstantFill', {}),
pad=2
)
conv5 = brew.relu(model, conv5, conv5)
# path 4: pool + 1x1 conv
pool = brew.max_pool(
model,
input_blob,
output_name + ":pool",
kernel=3,
stride=1,
pad=1
)
pool_proj = brew.conv(
model, pool, output_name + ":pool_proj", input_depth, pool_depth, 1,
('XavierFill', {}), ('ConstantFill', {})
)
pool_proj = brew.relu(model, pool_proj, pool_proj)
output = brew.concat(model, [conv1, conv3, conv5, pool_proj], output_name)
return output
def Inception(order, cudnn_ws, mkl):
my_arg_scope = {'order': order, 'use_cudnn': True,
'cudnn_exhaustive_search': True,
'ws_nbytes_limit': str(cudnn_ws)}
model = ModelHelper(name="inception", arg_scope=my_arg_scope)
conv1 = brew.conv(
model,
"data",
"conv1",
3,
64,
7,
('XavierFill', {}),
('ConstantFill', {}),
stride=2,
pad=3
)
relu1 = brew.relu(model, conv1, "conv1")
pool1 = brew.max_pool(model, relu1, "pool1", kernel=3, stride=2, pad=1)
conv2a = brew.conv(
model, pool1, "conv2a", 64, 64, 1,
('XavierFill', {}), ('ConstantFill', {})
)
conv2a = brew.relu(model, conv2a, conv2a)
conv2 = brew.conv(
model,
conv2a,
"conv2",
64,
192,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu2 = brew.relu(model, conv2, "conv2")
pool2 = brew.max_pool(model, relu2, "pool2", kernel=3, stride=2, pad=1)
# Inception modules
inc3 = _InceptionModule(
model, pool2, 192, "inc3", 64, [96, 128], [16, 32], 32
)
inc4 = _InceptionModule(
model, inc3, 256, "inc4", 128, [128, 192], [32, 96], 64
)
pool5 = brew.max_pool(model, inc4, "pool5", kernel=3, stride=2, pad=1)
inc5 = _InceptionModule(
model, pool5, 480, "inc5", 192, [96, 208], [16, 48], 64
)
inc6 = _InceptionModule(
model, inc5, 512, "inc6", 160, [112, 224], [24, 64], 64
)
inc7 = _InceptionModule(
model, inc6, 512, "inc7", 128, [128, 256], [24, 64], 64
)
inc8 = _InceptionModule(
model, inc7, 512, "inc8", 112, [144, 288], [32, 64], 64
)
inc9 = _InceptionModule(
model, inc8, 528, "inc9", 256, [160, 320], [32, 128], 128
)
pool9 = brew.max_pool(model, inc9, "pool9", kernel=3, stride=2, pad=1)
inc10 = _InceptionModule(
model, pool9, 832, "inc10", 256, [160, 320], [32, 128], 128
)
inc11 = _InceptionModule(
model, inc10, 832, "inc11", 384, [192, 384], [48, 128], 128
)
pool11 = brew.average_pool(model, inc11, "pool11", kernel=7, stride=1)
fc = brew.fc(
model, pool11, "fc", 1024, 1000,
('XavierFill', {}), ('ConstantFill', {})
)
# It seems that Soumith's benchmark does not have softmax on top
# for Inception. We will add it anyway so we can have a proper
# backward pass.
pred = brew.softmax(model, fc, "pred")
xent = model.LabelCrossEntropy([pred, "label"], "xent")
if not mkl:
loss = model.AveragedLoss(xent, "loss")
return model, 224
def AddParameterUpdate(model):
""" Simple plain SGD update -- not tuned to actually train the models """
ITER = brew.iter(model, "iter")
LR = model.LearningRate(
ITER, "LR", base_lr=-1e-8, policy="step", stepsize=10000, gamma=0.999)
ONE = model.param_init_net.ConstantFill([], "ONE", shape=[1], value=1.0)
for param in model.params:
param_grad = model.param_to_grad[param]
model.WeightedSum([param, ONE, param_grad, LR], param)
def Benchmark(model_gen, arg):
model, input_size = model_gen(arg.order, arg.cudnn_ws, arg.mkl)
model.Proto().type = arg.net_type
model.Proto().num_workers = arg.num_workers
# In order to be able to run everything without feeding more stuff, let's
# add the data and label blobs to the parameter initialization net as well.
if arg.order == "NCHW":
input_shape = [arg.batch_size, 3, input_size, input_size]
else:
input_shape = [arg.batch_size, input_size, input_size, 3]
if arg.model == "MLP":
input_shape = [arg.batch_size, input_size]
model.param_init_net.GaussianFill(
[],
"data",
shape=input_shape,
mean=0.0,
std=1.0
)
#MKL doesn't support int, so have to use numpy
if arg.mkl:
label = np.random.randint(low=0, high=1000, size=(arg.batch_size,)).astype(np.int32)
workspace.FeedBlob("label", label)
else:
model.param_init_net.UniformIntFill(
[],
"label",
shape=[arg.batch_size, ],
min=0,
max=999
)
if arg.forward_only:
print('{}: running forward only.'.format(arg.model))
else:
if arg.mkl:
print(
'==WARNING==\n'
'forward-backward not supported yet in MKL, so exiting'
)
print('{}: running forward-backward.'.format(arg.model))
model.AddGradientOperators(["loss"])
AddParameterUpdate(model)
if arg.order == 'NHWC':
print(
'==WARNING==\n'
'NHWC order with CuDNN may not be supported yet, so I might\n'
'exit suddenly.'
)
if not arg.cpu:
if arg.mkl:
model.param_init_net.RunAllOnMKL()
model.net.RunAllOnMKL()
else:
model.param_init_net.RunAllOnGPU()
model.net.RunAllOnGPU()
if arg.engine:
for op in model.net.Proto().op:
op.engine = arg.engine
if arg.dump_model:
# Writes out the pbtxt for benchmarks on e.g. Android
with open(
"{0}_init_batch_{1}.pbtxt".format(arg.model, arg.batch_size), "w"
) as fid:
fid.write(str(model.param_init_net.Proto()))
with open("{0}.pbtxt".format(arg.model, arg.batch_size), "w") as fid:
fid.write(str(model.net.Proto()))
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net)
workspace.BenchmarkNet(
model.net.Proto().name, arg.warmup_iterations, arg.iterations,
arg.layer_wise_benchmark)
def GetArgumentParser():
parser = argparse.ArgumentParser(description="Caffe2 benchmark.")
parser.add_argument(
"--batch_size",
type=int,
default=128,
help="The batch size."
)
parser.add_argument("--model", type=str, help="The model to benchmark.")
parser.add_argument(
"--order",
type=str,
default="NCHW",
help="The order to evaluate."
)
parser.add_argument(
"--cudnn_ws",
type=int,
help="The cudnn workspace size."
)
parser.add_argument(
"--iterations",
type=int,
default=10,
help="Number of iterations to run the network."
)
parser.add_argument(
"--warmup_iterations",
type=int,
default=10,
help="Number of warm-up iterations before benchmarking."
)
parser.add_argument(
"--forward_only",
action='store_true',
help="If set, only run the forward pass."
)
parser.add_argument(
"--layer_wise_benchmark",
action='store_true',
help="If True, run the layer-wise benchmark as well."
)
parser.add_argument(
"--cpu",
action='store_true',
help="If True, run testing on CPU instead of GPU."
)
parser.add_argument(
"--mkl",
action='store_true',
help="If True, run testing on CPU-MKL instead of GPU."
)
parser.add_argument(
"--engine",
type=str,
default="",
help="If set, blindly prefer the given engine(s) for every op.")
parser.add_argument(
"--dump_model",
action='store_true',
help="If True, dump the model prototxts to disk."
)
parser.add_argument("--net_type", type=str, default="simple")
parser.add_argument("--num_workers", type=int, default=2)
parser.add_argument("--use-nvtx", default=False, action='store_true')
parser.add_argument("--htrace_span_log_path", type=str)
return parser
if __name__ == '__main__':
args, extra_args = GetArgumentParser().parse_known_args()
if (
not args.batch_size or not args.model or not args.order
):
GetArgumentParser().print_help()
else:
workspace.GlobalInit(
['caffe2', '--caffe2_log_level=0'] + extra_args +
(['--caffe2_use_nvtx'] if args.use_nvtx else []) +
(['--caffe2_htrace_span_log_path=' + args.htrace_span_log_path]
if args.htrace_span_log_path else []))
model_map = {
'AlexNet': AlexNet,
'OverFeat': OverFeat,
'VGGA': VGGA,
'Inception': Inception,
'ResNet50': ResNet50,
'MLP': MLP,
}
Benchmark(model_map[args.model], args)
| |
"""
This app provides the core functionality for tracking user
engagement with content and the Kolibri app.
It stores:
* details of users' interactions with content
* summaries of those interactions
* interactions with the software in general
Eventually, it may also store user feedback on the content and the software.
"""
from __future__ import unicode_literals
from datetime import timedelta
from django.core.exceptions import ObjectDoesNotExist
from django.core.exceptions import ValidationError
from django.core.validators import MaxValueValidator
from django.core.validators import MinValueValidator
from django.db import models
from django.utils import timezone
from morango.models import SyncableModelQuerySet
from morango.models import UUIDField
from .permissions import AnyoneCanWriteAnonymousLogs
from kolibri.core.auth.constants import role_kinds
from kolibri.core.auth.models import AbstractFacilityDataModel
from kolibri.core.auth.models import Facility
from kolibri.core.auth.models import FacilityUser
from kolibri.core.auth.permissions.base import RoleBasedPermissions
from kolibri.core.auth.permissions.general import IsOwn
from kolibri.core.exams.models import Exam
from kolibri.core.fields import DateTimeTzField
from kolibri.core.fields import JSONField
from kolibri.utils.time_utils import local_now
class BaseLogQuerySet(SyncableModelQuerySet):
def filter_by_topic(self, topic, content_id_lookup="content_id"):
"""
Filter a set of logs by content_id, using content_ids from all descendants of specified topic.
"""
content_ids = topic.get_descendant_content_ids()
return self.filter_by_content_ids(content_ids)
def filter_by_content_ids(self, content_ids, content_id_lookup="content_id"):
"""
Filter a set of logs by content_id, using content_ids from the provided list or queryset.
"""
return self.filter(**{content_id_lookup + "__in": content_ids})
def log_permissions(user_field):
return (
AnyoneCanWriteAnonymousLogs(field_name=user_field + "_id")
| IsOwn(field_name=user_field + "_id")
| RoleBasedPermissions(
target_field=user_field,
can_be_created_by=(role_kinds.ADMIN,),
can_be_read_by=(role_kinds.ADMIN, role_kinds.COACH),
can_be_updated_by=(role_kinds.ADMIN,),
can_be_deleted_by=(role_kinds.ADMIN,),
)
)
class BaseLogModel(AbstractFacilityDataModel):
permissions = log_permissions("user")
class Meta:
abstract = True
def infer_dataset(self, *args, **kwargs):
if self.user_id:
return self.cached_related_dataset_lookup("user")
elif self.dataset_id:
# confirm that there exists a facility with that dataset_id
try:
return Facility.objects.get(dataset_id=self.dataset_id).dataset_id
except Facility.DoesNotExist:
pass
# if no user or matching facility, infer dataset from the default facility
facility = Facility.get_default_facility()
assert facility, "Before you can save logs, you must have a facility"
return facility.dataset_id
objects = BaseLogQuerySet.as_manager()
def calculate_partition(self):
if self.user_id:
return "{dataset_id}:user-rw:{user_id}".format(
dataset_id=self.dataset_id, user_id=self.user_id
)
else:
return "{dataset_id}:anonymous".format(dataset_id=self.dataset_id)
class ContentSessionLog(BaseLogModel):
"""
This model provides a record of interactions with a content item within a single visit to that content page.
"""
# Morango syncing settings
morango_model_name = "contentsessionlog"
user = models.ForeignKey(FacilityUser, blank=True, null=True)
content_id = UUIDField(db_index=True)
visitor_id = models.UUIDField(blank=True, null=True)
channel_id = UUIDField()
start_timestamp = DateTimeTzField()
end_timestamp = DateTimeTzField(blank=True, null=True)
time_spent = models.FloatField(
help_text="(in seconds)", default=0.0, validators=[MinValueValidator(0)]
)
progress = models.FloatField(default=0, validators=[MinValueValidator(0)])
kind = models.CharField(max_length=200)
extra_fields = JSONField(default={}, blank=True)
def save(self, *args, **kwargs):
if self.progress < 0:
raise ValidationError("Progress out of range (<0)")
super(ContentSessionLog, self).save(*args, **kwargs)
class ContentSummaryLog(BaseLogModel):
"""
This model provides an aggregate summary of all recorded interactions a user has had with
a content item over time.
"""
# Morango syncing settings
morango_model_name = "contentsummarylog"
user = models.ForeignKey(FacilityUser)
content_id = UUIDField(db_index=True)
channel_id = UUIDField()
start_timestamp = DateTimeTzField()
end_timestamp = DateTimeTzField(blank=True, null=True)
completion_timestamp = DateTimeTzField(blank=True, null=True)
time_spent = models.FloatField(
help_text="(in seconds)", default=0.0, validators=[MinValueValidator(0)]
)
progress = models.FloatField(
default=0, validators=[MinValueValidator(0), MaxValueValidator(1.01)]
)
kind = models.CharField(max_length=200)
extra_fields = JSONField(default={}, blank=True)
def calculate_source_id(self):
return self.content_id
def save(self, *args, **kwargs):
if self.progress < 0 or self.progress > 1.01:
raise ValidationError("Content summary progress out of range (0-1)")
super(ContentSummaryLog, self).save(*args, **kwargs)
class UserSessionLog(BaseLogModel):
"""
This model provides a record of a user session in Kolibri.
"""
# Morango syncing settings
morango_model_name = "usersessionlog"
user = models.ForeignKey(FacilityUser)
channels = models.TextField(blank=True)
start_timestamp = DateTimeTzField(default=local_now)
last_interaction_timestamp = DateTimeTzField(null=True, blank=True)
pages = models.TextField(blank=True)
@classmethod
def update_log(cls, user):
"""
Update the current UserSessionLog for a particular user.
"""
if user and isinstance(user, FacilityUser):
try:
user_session_log = cls.objects.filter(user=user).latest(
"last_interaction_timestamp"
)
except ObjectDoesNotExist:
user_session_log = None
if not user_session_log or timezone.now() - user_session_log.last_interaction_timestamp > timedelta(
minutes=5
):
user_session_log = cls(user=user)
user_session_log.last_interaction_timestamp = local_now()
user_session_log.save()
class MasteryLog(BaseLogModel):
"""
This model provides a summary of a user's engagement with an assessment within a mastery level
"""
# Morango syncing settings
morango_model_name = "masterylog"
user = models.ForeignKey(FacilityUser)
# Every MasteryLog is related to the single summary log for the user/content pair
summarylog = models.ForeignKey(ContentSummaryLog, related_name="masterylogs")
# The MasteryLog records the mastery criterion that has been specified for the user.
# It is recorded here to prevent this changing in the middle of a user's engagement
# with an assessment.
mastery_criterion = JSONField(default={})
start_timestamp = DateTimeTzField()
end_timestamp = DateTimeTzField(blank=True, null=True)
completion_timestamp = DateTimeTzField(blank=True, null=True)
# The integer mastery level that this log is tracking.
mastery_level = models.IntegerField(
validators=[MinValueValidator(1), MaxValueValidator(10)]
)
# Has this mastery level been completed?
complete = models.BooleanField(default=False)
def infer_dataset(self, *args, **kwargs):
return self.cached_related_dataset_lookup("user")
def calculate_source_id(self):
return "{summarylog_id}:{mastery_level}".format(
summarylog_id=self.summarylog_id, mastery_level=self.mastery_level
)
class BaseAttemptLog(BaseLogModel):
"""
This is an abstract model that provides a summary of a user's interactions with a particular
item/question in an assessment/exercise/exam
"""
# Unique identifier within the relevant assessment for the particular question/item
# that this attemptlog is a record of an interaction with.
item = models.CharField(max_length=200)
start_timestamp = DateTimeTzField()
end_timestamp = DateTimeTzField()
completion_timestamp = DateTimeTzField(blank=True, null=True)
time_spent = models.FloatField(
help_text="(in seconds)", default=0.0, validators=[MinValueValidator(0)]
)
complete = models.BooleanField(default=False)
# How correct was their answer? In simple cases, just 0 or 1.
correct = models.FloatField(validators=[MinValueValidator(0), MaxValueValidator(1)])
hinted = models.BooleanField(default=False)
# JSON blob that would allow the learner's answer to be rerendered in the frontend interface
answer = JSONField(default={}, null=True, blank=True)
# A human readable answer that could be rendered directly in coach reports, can be blank.
simple_answer = models.CharField(max_length=200, blank=True)
# A JSON Array with a sequence of JSON objects that describe the history of interaction of the user
# with this assessment item in this attempt.
interaction_history = JSONField(default=[], blank=True)
user = models.ForeignKey(FacilityUser, blank=True, null=True)
error = models.BooleanField(default=False)
class Meta:
abstract = True
class AttemptLog(BaseAttemptLog):
"""
This model provides a summary of a user's interactions with a question in a content node.
(Think of it like a ContentNodeAttemptLog to distinguish it from ExamAttemptLog and BaseAttemptLog)
"""
morango_model_name = "attemptlog"
# Which mastery log was this attemptlog associated with?
masterylog = models.ForeignKey(
MasteryLog, related_name="attemptlogs", blank=True, null=True
)
sessionlog = models.ForeignKey(ContentSessionLog, related_name="attemptlogs")
def infer_dataset(self, *args, **kwargs):
return self.cached_related_dataset_lookup("sessionlog")
class ExamLog(BaseLogModel):
"""
This model provides a summary of a user's interactions with an exam, and serves as
an aggregation point for individual attempts on questions in that exam.
"""
morango_model_name = "examlog"
# Identifies the exam that this is for.
exam = models.ForeignKey(Exam, related_name="examlogs", blank=False, null=False)
# Identifies which user this log summarizes interactions for.
user = models.ForeignKey(FacilityUser)
# Is this exam open for engagement, or is it closed?
# Used to end user engagement with an exam when it has been deactivated.
closed = models.BooleanField(default=False)
# when was this exam finished?
completion_timestamp = DateTimeTzField(blank=True, null=True)
def calculate_source_id(self):
return "{exam_id}:{user_id}".format(exam_id=self.exam_id, user_id=self.user_id)
def calculate_partition(self):
return self.dataset_id
class ExamAttemptLog(BaseAttemptLog):
"""
This model provides a summary of a user's interactions with a question in an exam
"""
morango_model_name = "examattemptlog"
examlog = models.ForeignKey(
ExamLog, related_name="attemptlogs", blank=False, null=False
)
# We have no session logs associated with ExamLogs, so we need to record the channel and content
# ids here
content_id = UUIDField()
def infer_dataset(self, *args, **kwargs):
return self.cached_related_dataset_lookup("examlog")
def calculate_partition(self):
return self.dataset_id
| |
"""Orchestrate milestoning simulations."""
__all__ = ['Simulation', 'SimulationError']
import atexit
import logging
import os
import sys
from typing import Optional # noqa: F401. Used for mypy.
import numpy as np
from miles import (BaseTimestepper, ColvarsParser, Configuration, Database, Distribution, Milestones, TrajectoryParser, load_database, load_distributions, version) # noqa: E501
class SimulationError(Exception):
pass
class Simulation:
"""Contains information for a milestoning simulation.
The contents of this class must not be changed once a simulation
is running.
"""
config_file = None
catch_signals = None
setup_reactants_and_products = None
configuration = None
database = None # type: Optional[Database]
milestones = None # type: Optional[Milestones]
collective_variables = None
reactant_distribution = None # type: Optional[Distribution]
def __init__(self, config_file: str,
catch_signals: bool = True,
setup_reactants_and_products: bool = True) -> None:
"""Initialize simulation class.
Parameters
----------
config_file : str
Path to the configuration file to be used for the
simulation. Environment variables inside the path and
os-dependent shortcuts for the user's home directory are
expanded.
catch_signals : bool, optional
Whether to set up signal handlers or not. Callers that
may alter the database should set this option to true.
setup_reactants_and_products : bool, optional
Set up reactant and product milestones. Setting this flag
to false is useful at the early stages when we are about
to run for the first time in discovery mode.
"""
print(version.v_gnu)
sys.setrecursionlimit(10000)
self.config_file = expand_user_vars(config_file)
self.configuration = Configuration()
self.configuration.parse(self.config_file)
np.set_printoptions(precision=4)
self.catch_signals = catch_signals
if self.catch_signals:
# This set up is process-specific, so we do it to avoid
# MPI processes from doing it.
self._setup_signal_handlers()
self._setup_logging()
self._setup_collective_variables()
self._setup_stateful()
self.setup_reactants_and_products = setup_reactants_and_products
if self.setup_reactants_and_products:
self._setup_reactants()
self._setup_products()
def __repr__(self) -> str:
return ('{}({!r}, catch_signals={!r}, '
'setup_reactants_and_products={!r})'
.format(self.__class__.__name__,
self.config_file, self.catch_signals,
self.setup_reactants_and_products))
def __getstate__(self):
state = self.__dict__.copy()
del state['database']
del state['milestones']
del state['reactant_distribution']
return state
def __setstate__(self, state):
self.__dict__.update(state)
self._setup_stateful()
def _setup_signal_handlers(self) -> None:
"""Set up signal handlers.
We enable the necessary signal handlers to ensure the graceful
termination of a simulation. Upon arrival of one of the
masked signals, the database is saved and the program
exits.
"""
def exit_handler():
logging.debug('Saving database.')
try:
self.database.save()
except:
logging.error('Unable to close database.')
logging.info('Exiting.')
atexit.register(exit_handler)
def _setup_reactants(self) -> None:
"""Set up the distribution at the reactant."""
dist_file = self.configuration.reactant_distribution
reactant_distributions = load_distributions(dist_file)
if not reactant_distributions:
raise SimulationError('Reactant distribution does not contain '
'points from just one milestone.')
assert len(reactant_distributions) == 1 # XXX
for milestone in reactant_distributions:
self.milestones.append_reactant(milestone)
milestone, distribution = reactant_distributions.popitem()
self.reactant_distribution = distribution # XXX Not future-proof.
def _setup_products(self) -> None:
"""Set up product milestones."""
if not self.configuration.product_milestone:
logging.debug('No product milestones are known.')
return
milestones = self.milestones
pair = self.configuration.product_milestone
product_milestone = milestones.make_from_indices(pair[0], pair[1])
milestones.append_product(product_milestone)
def _setup_logging(self) -> None:
"""Initialize the root logger with the right settings."""
import logging
logging.basicConfig(stream=sys.stdout,
level=self.configuration.logging_level,
format='%(levelname)s: %(message)s')
logger = logging.getLogger()
logger.name = 'miles'
def _setup_collective_variables(self) -> None:
"""Initialize space of collective_variables."""
colvars_file_name = self.configuration.colvars_file
if not colvars_file_name:
raise SimulationError('No colvars input file found')
colvars_parser = ColvarsParser(colvars_file_name)
self.collective_variables = colvars_parser.collective_variables
def _setup_stateful(self):
"""Set up the stateful pieces of the simulation.
"""
# Load database and set up the collection of milestones.
self.database = load_database(self.configuration.database_file,
self.collective_variables)
self.milestones = self.database.milestones
# Set up temporary directory.
try:
temp_dir = self.configuration.temp_dir
os.mkdir(temp_dir)
except FileExistsError:
pass
def make_trajectory_parser(self) -> TrajectoryParser:
"""Create a TrajectoryParser object.
"""
return TrajectoryParser(self.milestones, self.configuration,
self.collective_variables)
def make_timestepper(self) -> BaseTimestepper:
"""Create a Timestepper object for the current simulation.
"""
trajectory_parser = self.make_trajectory_parser()
command = self.configuration.command
if 'namd2' in command:
from miles import TimestepperNAMD
timestepper_class = TimestepperNAMD
elif 'brownian-dynamics' in command:
from miles import TimestepperBD
timestepper_class = TimestepperBD
else:
raise SimulationError('Unknown MD engine: {!r}'.format(command))
return timestepper_class(trajectory_parser, self.configuration,
self.collective_variables)
def expand_user_vars(file_name: str) -> str:
"""Expand environment variables and user directories.
"""
return os.path.expandvars(os.path.expanduser(file_name))
| |
import sys
import imp
import itertools
import collections.abc
import pathlib
import concurrent.futures
from collections import namedtuple, deque
import requests
import yaml
if sys.version_info >= (3, 5):
from http import HTTPStatus
elif sys.version_info >= (3, 4):
from rekt.httputils import HTTPStatus
else:
raise RuntimeError('Unsupported python version: {}'.format(sys.version_info))
from rekt.httputils import HTTPVerb, ArgsLocation, _ARGS_LOCATION_BY_VERB
from rekt.utils import (_NULL_OBJECT, read_only_dict, camel_case_to_snake_case, load_config,
api_method_name, async_api_method_name)
__all__ = ['load_service']
_RESOURCE_NAME_FMT = '{}Resource'
_RESOURCE_ATTRIBUTES = ('name', 'url', 'actions', 'request_classes', 'response_classes')
_REQUEST_NAME_FMT = '{}{}Request'
_RESPONSE_NAME_FMT = '{}{}Response'
_CLIENT_NAME_FMT = '{}Client'
# TODO: make configurable in the client
_ASYNC_WORKER_THREAD_COUNT = 6
class DynamicObject(dict):
"""
Base class for all response types. It acts like hybrid between a
. attribute access object and a defaultdict(lambda: None) meaning
that any .<attributename> that does not exist in the backing
dictionary will be guaranteed to return None.
Inspired by and similar to Groovy's Expando object.
"""
# Recipe for allowing . attribute access on a dictionary from
# http://stackoverflow.com/questions/4984647/accessing-dict-keys-like-an-attribute-in-python
def __init__(self, *args, **kwargs):
super(DynamicObject, self).__init__(*args, **kwargs)
self.__dict__ = self
def __getattr__(self, key):
return self[key]
def __missing__(self, key):
return None
def __reduce__(self):
return (DynamicObject, (), self.__getstate__())
def __getstate__(self):
return dict(self)
def __setstate__(self, state):
self.update(state)
class RestClient(object):
"""
Class for convenience off of which we will dynamically create
the rest client
"""
def __str__(self):
return '{}'.format(self.__class__.__name__)
def __repr__(self):
return '<{}>'.format(self.__class__.__name__)
def create_request_class(api, verb, args, defaults, BaseClass=DynamicObject):
"""
"""
signature = deque()
# Modify the parameters of the signature such that those
# with defaults follow those without
for arg in args:
if arg in defaults.keys():
signature.append(arg)
else:
signature.appendleft(arg)
signature = tuple(signature)
default_values = []
for arg, value in sorted(defaults.items(), key=lambda x: signature.index(x[0])):
try:
index = signature.index(arg)
except ValueError:
raise RuntimeError('Not able to find argument: {}'.format(arg))
default_values.append(value)
def __init__(self, **kwargs):
for key, value in kwargs.items():
if key not in signature:
raise TypeError('Argument {} not valid for {}'.format(key, self.__class__.__name__))
BaseClass.__init__(self, kwargs)
class_name = _REQUEST_NAME_FMT.format(verb.name.title(), api)
RequestClass = type(class_name, (BaseClass,), {'__init__' : __init__})
RequestClass.__doc__ = "{}\nParameters:\n {}".format(
class_name, '\n '.join(signature))
return RequestClass
def create_response_class(api, verb):
"""
"""
ResponseClass = type(_RESPONSE_NAME_FMT.format(verb.name.title(), api), (DynamicObject,), {})
return ResponseClass
def create_api_definition(api, defn, baseurl):
ResourceClass = namedtuple(_RESOURCE_NAME_FMT.format(api), _RESOURCE_ATTRIBUTES)
actions = []
request_classes = {}
response_classes = {}
for verb in HTTPVerb:
if defn.get(verb.name, None) is None:
continue
defaults = dict([(k, v['default']) for k,v in defn[verb.name].items()
if v is not None
and isinstance(v, dict)
and v.get('default', _NULL_OBJECT) is not _NULL_OBJECT])
actions.append(verb)
request_classes[verb] = create_request_class(api, verb, defn[verb.name].keys(), defaults)
response_classes[verb] = create_response_class(api, verb)
return ResourceClass(api, baseurl + defn['url'], actions, request_classes, response_classes)
def create_api_call_func(api, verb):
"""
From an api definition object create the related api call method
that will validate the arguments for the api call and then
dynamically dispatch the request to the appropriate requests module
convenience method for the specific HTTP verb .
"""
# Scopes some local context in which we can build
# request functions with reflection that primed with
# some static parameters.
def api_call_func(self, **kwargs):
request = api.request_classes[verb](**kwargs)
params = dict([ (k,v) for k,v in request.items() if v is not None ])
if HTTPVerb.GET == verb:
raw_response = requests.get(api.url, params=params, **self.reqargs)
elif HTTPVerb.POST == verb:
raw_response = requests.post(api.url, data=params, **self.reqargs)
else:
raise RuntimeError('{} is not a handled http verb'.format(verb))
if raw_response.status_code != HTTPStatus.OK:
raw_response.raise_for_status()
# The object hook will convert all dictionaries from the json
# objects in the response to a . attribute access
ResponseClass = api.response_classes[verb]
try:
response = raw_response.json(object_hook=lambda obj: ResponseClass(obj))
except ValueError as e:
response = ResponseClass({'content' : raw_response.content})
return response
method_name = api_method_name(verb, api)
api_call_func.__name__ = method_name
api_call_func.__doc__ = '{}\n{}'.format(
method_name, ''.join(api.request_classes[verb].__doc__.splitlines(True)[1:]))
return api_call_func
def create_async_api_call_func(api, verb):
"""
From an api definition object create the related api call method
that will validate the arguments for the api call and then
dynamically dispatch the request to the appropriate requests module
convenience method for the specific HTTP verb .
"""
# Scopes some local context in which we can build
# request functions with reflection that primed with
# some static parameters.
def api_call_func(self, **kwargs):
def _async_call_handler():
api_method = getattr(self, api_method_name(verb, api))
return api_method(**kwargs)
return self._executor.submit(_async_call_handler)
method_name = async_api_method_name(verb, api)
api_call_func.__name__ = method_name
api_call_func.__doc__ = "{}\nParameters:\n {}".format(
method_name, '\n '.join(api.request_classes[verb]().keys()))
return api_call_func
def create_rest_client_class(name, apis, BaseClass=RestClient):
"""
Generate the api call functions and attach them to the generated
RestClient subclass with the name <Service>Client.
"""
apis_with_actions = list(itertools.chain.from_iterable([ zip([api] * len(api.actions), api.actions) for api in apis]))
api_funcs = [create_api_call_func(api, verb) for api, verb in apis_with_actions]
api_funcs.extend([create_async_api_call_func(api, verb) for api, verb in apis_with_actions])
api_mapper = dict([ (f.__name__, f) for f in api_funcs ])
# Adapted from :
# http://stackoverflow.com/questions/15247075/how-can-i-dynamically-create-derived-classes-from-a-base-class
def __init__(self, thread_count=_ASYNC_WORKER_THREAD_COUNT, **reqargs):
BaseClass.__init__(self)
setattr(self, 'reqargs', read_only_dict(reqargs))
self._executor = concurrent.futures.ThreadPoolExecutor(thread_count)
api_mapper['__init__'] = __init__
ClientClass = type(_CLIENT_NAME_FMT.format(name), (BaseClass,), api_mapper)
return ClientClass
def create_service_module(service_name, apis):
"""
Dynamically creates a module named defined by the PEP-8 version of
the string contained in service_name (from the YAML config). This
module will contain a Client class, a Call Factory, and list of API
definition objects.
"""
service_module = imp.new_module(service_name.lower())
for api in apis:
setattr(service_module, api.__class__.__name__, api)
ClientClass = create_rest_client_class(service_name, apis)
setattr(service_module, 'resources', tuple(apis))
setattr(service_module, 'Client', ClientClass)
sys.modules[service_name.lower()] = service_module
return service_module
def load_service(config):
"""
Load a restful service specified by some YAML file at config_path.
:param config_path: A pathlib Path object that points to the yaml
config
:returns: A python module containing a Client class, call factory,
and the definition of each of the APIs defined by the config.
"""
if isinstance(config, collections.abc.Mapping):
service_config = config
elif isinstance(config, str):
service_config = load_config(pathlib.Path(config))
elif isinstance(config, pathlib.Path):
service_config = load_config(config)
else:
raise TypeError('Cannot load config from type: {}'.format(type(config)))
apis = []
for api, defn in service_config['apis'].items():
api_def= create_api_definition(api, defn, service_config['base_url'])
apis.append(api_def)
service_module = create_service_module(service_config['name'], apis)
return service_module
| |
# Copyright (c) 2015 Microsoft Corporation
import os
import subprocess
import shutil
import config
import filecmp
import time
def is_windows():
return os.name == 'nt'
def is_linux():
return os.name == 'posix' and os.uname()[0] == 'Linux'
def is_freebsd():
return os.name == 'posix' and os.uname()[0] == 'FreeBSD'
def is_osx():
return os.name == 'posix' and os.uname()[0] == 'Darwin'
# Goodie for changing the directory
class cd:
def __init__(self, newPath):
self.newPath = newPath
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class setenv:
def __init__(self, var, val):
self.var = var
self.val = val
self.old_val = None
def __enter__(self):
if self.var in os.environ:
self.old_val = os.environ[self.var]
os.environ[self.var] = self.val
def __exit__(self, etype, value, traceback):
if self.old_val == None:
os.environ.pop(self.var)
else:
os.environ[self.var] = self.old_val
def rmf(path):
if not os.path.exists(path):
return # nothing to be done
if not os.path.isdir(path):
os.path.remove(path)
else:
shutil.rmtree(path)
def mk_dir(d):
if not os.path.exists(d):
os.makedirs(d)
def is_z3depot(path):
"""
Return true if the Z3 main repository is located in the given path.
The function checks the existence of several files.
"""
for f in ["README.md", "LICENSE.txt", ".git", "RELEASE_NOTES", os.path.join("src", "ast", "ast.cpp")]:
if not os.path.exists(os.path.join(path, f)):
return False
return True
def find_z3depot():
"""
Try to find the Z3 main repository along the current path.
The environment variable Z3DIR is also used.
"""
c = os.getenv("Z3DIR", None)
if c != None and c != "":
if is_z3depot(c):
return c
else:
raise Exception("Environment variable Z3DIR does not seem to contain the correct path to the Z3 repository.\nZ3DIR=%s" % c)
p = os.getcwd()
if is_z3depot(p):
return p
while True:
new_p, h = os.path.split(p)
if new_p == p:
raise Exception("Failed to find path to the Z3 repository, try to set the environment variable Z3DIR")
c = os.path.join(new_p, 'z3')
if is_z3depot(c):
return c
p = new_p
def gitcheckout(branch):
if subprocess.call([config.GIT, 'checkout', '-f', branch]) != 0:
raise Exception("Failed to checkout branch '%s' at '%s'" % (branch, os.getcwd()))
def gitpull(branch):
if subprocess.call([config.GIT, 'pull', '--commit', config.ORIGIN, branch]) != 0:
raise Exception("Failed to pull latest changes from branch '%s' (from github) at '%s'" % (branch, os.getcwd()))
def get_builddir(branch, debug, clang):
if clang:
branch = '%s-clang' % branch
if debug:
return os.path.join(config.BUILDDIR, branch, 'debug')
else:
return os.path.join(config.BUILDDIR, branch, 'release')
def mk_make(branch, debug, dotnet, java, clang, static, VS64, extraflags):
cmd = ['python', os.path.join('scripts', 'mk_make.py'), '-b', get_builddir(branch, debug, clang) ] + extraflags
if debug:
cmd.append('-d')
if is_windows():
cmd.append('--parallel=24')
if VS64 and is_windows():
cmd.append('-x')
if dotnet:
cmd.append('--dotnet')
if java:
cmd.append('--java')
if static:
cmd.append('--staticlib')
if clang:
with setenv('CXX', 'clang++'):
with setenv('CC', 'clang'):
if subprocess.call(cmd) != 0:
raise Exception("Failed to execute mk_make\n%s" % cmd)
else:
if subprocess.call(cmd) != 0:
raise Exception("Failed to execute mk_make\n%s" % cmd)
def make(branch, debug, everything, clang, jobs):
bdir = get_builddir(branch, debug, clang)
with cd(bdir):
if is_windows():
cmd = ['nmake']
else:
cmd = ['make', '-j', str(jobs)]
if everything:
cmd.append('all')
cmd.append('test')
cmd.append('examples')
if subprocess.call(cmd) != 0:
raise Exception("Failed to make Z3\n%s\n" % cmd)
def buildz3(branch="master", everything=False, clean=False, debug=True, dotnet=False, java=False, clang=False, static=False, VS64=False, jobs=16, extraflags=[]):
z3dir = find_z3depot()
with cd(z3dir):
gitcheckout(branch)
gitpull(branch)
bdir = get_builddir(branch, debug, clang)
if clean:
rmf(bdir)
mk_dir(bdir)
mk_make(branch, debug, dotnet, java, clang, static, VS64, extraflags)
make(branch, debug, everything, clang, jobs)
def testz3py(branch="master", debug=True, clang=False):
z3dir = find_z3depot()
bdir = get_builddir(branch, debug, clang)
p = os.path.join(z3dir, bdir)
with cd(p):
if subprocess.call([config.PYTHON, 'z3test.py']) != 0:
raise Exception("Failed to execute Z3 python regression tests 'z3test.py' at '%s'" % p)
if subprocess.call([config.PYTHON, 'z3num.py']) != 0:
raise Exception("Failed to execute Z3 python regression tests 'z3num.py' at '%s'" % p)
def testjavaex(branch="master", debug=True, clang=False):
z3dir = find_z3depot()
bdir = get_builddir(branch, debug, clang)
p = os.path.join(z3dir, bdir)
with cd(p):
print(p)
if is_windows():
if subprocess.call([config.JAVA, '-cp', 'com.microsoft.z3.jar;.', 'JavaExample']) != 0:
raise Exception("Failed to execute Java example at '%s'" % p)
elif is_osx():
if subprocess.call([config.JAVA, '-cp', 'com.microsoft.z3.jar:.', 'JavaExample']) != 0:
raise Exception("Failed to execute Java example at '%s'" % p)
elif is_linux() or is_freebsd():
with setenv('LD_LIBRARY_PATH', '.'):
if subprocess.call([config.JAVA, '-cp', 'com.microsoft.z3.jar:.', 'JavaExample']) != 0:
raise Exception("Failed to execute Java example at '%s'" % p)
def testz3ex(exe, branch="master", debug=True, clang=False):
z3dir = find_z3depot()
bdir = get_builddir(branch, debug, clang)
p = os.path.join(z3dir, bdir)
with cd(p):
with setenv('PATH', '.'):
if is_windows() or is_osx():
if subprocess.call([exe]) != 0:
raise Exception("Failed to execute '%s' at '%s'" % (exe, p))
elif is_linux() or is_freebsd():
with setenv('LD_LIBRARY_PATH', '.'):
if subprocess.call([exe]) != 0:
raise Exception("Failed to execute '%s' at '%s'" % (exe, p))
# The duration is in seconds. It can be a float such as 0.001
def timeout(func, args=(), kwargs={}, timeout_duration=1.0, default=None):
import threading
class InterruptableThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.result = None
self.exception = None
def run(self):
try:
self.result = func(*args, **kwargs)
except Exception as ex:
self.exception = ex
it = InterruptableThread()
it.start()
it.join(timeout_duration)
if it.exception is not None:
raise it.exception
if it.isAlive():
return default
else:
return it.result
def subprocess_killer(args, stdin=None, stdout=None, stderr=None, shell=False, timeout=1.0):
try:
p = subprocess.Popen(args, stdin=stdin, stdout=stdout, stderr=stderr, shell=shell)
start = time.time()
time.sleep(0.02)
while (p.poll() == None):
time.sleep(0.1)
if (time.time() - start) > timeout:
p.kill()
return p.returncode
except Exception as ex:
print('Exception: %s' % ex)
return 0
def test_benchmark(z3exe, benchmark, timeout, expected=None):
if not os.path.exists(benchmark):
raise Exception("Benchmark '%s' does not exist" % benchmark)
base, ext = os.path.splitext(benchmark)
if expected == None:
expected = '%s.expected.out' % base
if not os.path.exists(expected):
raise Exception("Expected answer file '%s' does not exist" % benchmark)
produced = '%s.produced.out' % base
producedf = open(produced, 'w')
errcode = 0
try:
errcode = subprocess_killer([z3exe, 'model_validate=true', benchmark], stdout=producedf, stderr=producedf, timeout=timeout)
except:
raise Exception("Failed to start Z3: %s" % z3exe)
producedf.close()
if errcode != 0 and errcode != 1 and errcode != 105:
raise Exception("Z3 (%s) returned unexpected error code %s for %s" % (z3exe, errcode, benchmark))
if not filecmp.cmp(expected, produced):
print("EXPECTED")
print(open(expected, 'r').read())
print("======================")
print("PRODUCED")
print(open(produced, 'r').read())
print("======================")
raise Exception("Z3 (%s) produced unexpected output for %s" % (z3exe, benchmark))
return True
def test_benchmarks(z3exe, benchdir, ext="smt2", timeout_duration=60.0):
print("Testing benchmarks at %s using %s" % (benchdir, z3exe))
error = False
for benchmark in filter(lambda f: f.endswith(ext), os.listdir(benchdir)):
try:
bench = os.path.join(benchdir, benchmark)
print("Testing %s" % bench)
if timeout(test_benchmark,
args=(z3exe, bench, timeout_duration),
timeout_duration=timeout_duration,
default=False) == False:
raise Exception("Timeout executing benchmark %s using %s" % (bench, z3exe))
except Exception as ex:
print("Failed")
print(ex)
error = True
if error:
raise Exception("Found errors testing benchmarks at %s using %s" % (benchdir, z3exe))
def test_benchmarks_using_latest(benchdir, branch="master", debug=True, clang=False, ext="smt2", timeout_duration=60.0):
z3dir = find_z3depot()
bdir = get_builddir(branch, debug, clang)
z3exe = os.path.join(z3dir, bdir, 'z3')
test_benchmarks(z3exe, benchdir, ext, timeout_duration)
def exec_script(script, timeout):
if subprocess_killer([config.PYTHON, script], timeout=timeout) != 0:
raise Exception("Script '%s' returned non-zero exit code" % script)
return True
def test_pyscripts(z3libdir, scriptdir, ext="py", timeout_duration=60.0):
print("Testing scripts at %s using %s" % (scriptdir, z3libdir))
with setenv('LD_LIBRARY_PATH', z3libdir):
with setenv('PYTHONPATH', z3libdir):
with setenv('DYLD_LIBRARY_PATH', z3libdir):
print("Testing python scripts at %s using %s" % (scriptdir, z3libdir))
error = False
for script in filter(lambda f: f.endswith(ext), os.listdir(scriptdir)):
script = os.path.join(scriptdir, script)
print("Testing %s" % script)
try:
if timeout(exec_script,
args=[script, timeout_duration],
timeout_duration=timeout_duration,
default=False) == False:
raise Exception("Timeout executing script '%s' at '%s' using '%s'" % (script, scriptdir, z3libdir))
except Exception as ex:
print("Failed")
print(ex)
error = True
if error:
raise Exception("Found errors testing scripts at '%s' using '%s'" % (scriptdir, z3libdir))
def test_pyscripts_using_latest(scriptdir, branch="master", debug=True, clang=False, ext="py", timeout_duration=60.0):
z3dir = find_z3depot()
bdir = get_builddir(branch, debug, clang)
test_pyscripts(os.path.join(z3dir, bdir), scriptdir, ext, timeout_duration)
def exec_cs_compile(args, timeout):
if subprocess_killer(args, timeout=timeout) != 0:
raise Exception("Compilation of '%s' failed" % file)
return True
def exec_cs(timeout):
if subprocess_killer(config.CSTEMP, timeout=timeout) != 0:
raise Exception("Execution of '%s' failed" % (config.CSTEMP))
return True
def test_cs(z3libdir, csdir, ext="cs", VS64=False, timeout_duration=60.0):
print("Testing C# at %s using %s" % (csdir, z3libdir))
error = False
platform_arg = "/platform:x86"
if VS64:
platform_arg = "/platform:x64"
shutil.copyfile("%s/Microsoft.Z3.dll" % z3libdir, "Microsoft.Z3.dll")
with setenv('PATH', os.getenv("PATH") + ";" + z3libdir):
for file in filter(lambda f: f.endswith(ext), os.listdir(csdir)):
if file == config.CSDRIVER:
continue
file = os.path.join(csdir, file)
print("Testing %s" % file)
try:
# Compile.
if timeout(exec_cs_compile,
args=[[config.CSC, "/nologo",
"/reference:%s\Microsoft.Z3.dll" % z3libdir,
"/out:%s" % (config.CSTEMP),
platform_arg,
"%s\%s" % (csdir, config.CSDRIVER),
file],
timeout_duration],
timeout_duration=timeout_duration,
default=False) == False:
raise Exception("Timeout compiling '%s' at '%s' using '%s'" % (file, csdir, z3libdir))
# Run.
if timeout(exec_cs,
args=[timeout_duration],
timeout_duration=timeout_duration,
default=False) == False:
raise Exception("Timeout executing '%s' at '%s' using '%s'" % (file, csdir, z3libdir))
except Exception as ex:
print("Failed")
print(ex)
error = True
os.remove(config.CSTEMP)
os.remove("Microsoft.Z3.dll")
if error:
raise Exception("Found errors testing C# at '%s' using '%s'" % (csdir, z3libdir))
def test_cs_using_latest(csdir, branch="master", debug=True, clang=False, ext="cs", VS64=False, timeout_duration=60.0):
z3dir = find_z3depot()
bdir = get_builddir(branch, debug, clang)
test_cs(os.path.join(z3dir, bdir), csdir, ext, VS64, timeout_duration)
# buildz3(java=True, everything=True)
# testjavaex()
# testz3ex('cpp_example', "master", True, True)
# testz3ex('c_example')
# test_benchmarks('/home/leo/projects/z3/build/debug/z3', 'regressions/smt2')
# test_benchmark('/home/leo/projects/z3/build/debug/z3', 'regressions/smt2/bad_patterns.smt2')
# test_benchmarks_using_latest('regressions/smt2')
# test_pyscripts('/home/leo/projects/z3/build/debug', 'regressions/python')
# test_pyscripts_using_latest('regressions/python')
| |
"""Statistics helper."""
from __future__ import annotations
from collections import defaultdict
from collections.abc import Callable, Iterable
import contextlib
import dataclasses
from datetime import datetime, timedelta
from itertools import chain, groupby
import json
import logging
import os
import re
from statistics import mean
from typing import TYPE_CHECKING, Any, Literal
from sqlalchemy import bindparam, func
from sqlalchemy.exc import SQLAlchemyError, StatementError
from sqlalchemy.ext import baked
from sqlalchemy.orm.scoping import scoped_session
from sqlalchemy.sql.expression import literal_column, true
from homeassistant.const import (
PRESSURE_PA,
TEMP_CELSIUS,
VOLUME_CUBIC_FEET,
VOLUME_CUBIC_METERS,
)
from homeassistant.core import Event, HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import entity_registry
from homeassistant.helpers.json import JSONEncoder
from homeassistant.helpers.storage import STORAGE_DIR
import homeassistant.util.dt as dt_util
import homeassistant.util.pressure as pressure_util
import homeassistant.util.temperature as temperature_util
from homeassistant.util.unit_system import UnitSystem
import homeassistant.util.volume as volume_util
from .const import DATA_INSTANCE, DOMAIN, MAX_ROWS_TO_PURGE
from .models import (
StatisticData,
StatisticMetaData,
StatisticResult,
Statistics,
StatisticsMeta,
StatisticsRuns,
StatisticsShortTerm,
process_timestamp,
process_timestamp_to_utc_isoformat,
)
from .util import execute, retryable_database_job, session_scope
if TYPE_CHECKING:
from . import Recorder
QUERY_STATISTICS = [
Statistics.metadata_id,
Statistics.start,
Statistics.mean,
Statistics.min,
Statistics.max,
Statistics.last_reset,
Statistics.state,
Statistics.sum,
]
QUERY_STATISTICS_SHORT_TERM = [
StatisticsShortTerm.metadata_id,
StatisticsShortTerm.start,
StatisticsShortTerm.mean,
StatisticsShortTerm.min,
StatisticsShortTerm.max,
StatisticsShortTerm.last_reset,
StatisticsShortTerm.state,
StatisticsShortTerm.sum,
]
QUERY_STATISTICS_SUMMARY_MEAN = [
StatisticsShortTerm.metadata_id,
func.avg(StatisticsShortTerm.mean),
func.min(StatisticsShortTerm.min),
func.max(StatisticsShortTerm.max),
]
QUERY_STATISTICS_SUMMARY_SUM = [
StatisticsShortTerm.metadata_id,
StatisticsShortTerm.start,
StatisticsShortTerm.last_reset,
StatisticsShortTerm.state,
StatisticsShortTerm.sum,
func.row_number()
.over(
partition_by=StatisticsShortTerm.metadata_id,
order_by=StatisticsShortTerm.start.desc(),
)
.label("rownum"),
]
QUERY_STATISTICS_SUMMARY_SUM_LEGACY = [
StatisticsShortTerm.metadata_id,
StatisticsShortTerm.last_reset,
StatisticsShortTerm.state,
StatisticsShortTerm.sum,
]
QUERY_STATISTIC_META = [
StatisticsMeta.id,
StatisticsMeta.statistic_id,
StatisticsMeta.source,
StatisticsMeta.unit_of_measurement,
StatisticsMeta.has_mean,
StatisticsMeta.has_sum,
StatisticsMeta.name,
]
QUERY_STATISTIC_META_ID = [
StatisticsMeta.id,
StatisticsMeta.statistic_id,
]
MAX_DUPLICATES = 1000000
STATISTICS_BAKERY = "recorder_statistics_bakery"
STATISTICS_META_BAKERY = "recorder_statistics_meta_bakery"
STATISTICS_SHORT_TERM_BAKERY = "recorder_statistics_short_term_bakery"
# Convert pressure and temperature statistics from the native unit used for statistics
# to the units configured by the user
UNIT_CONVERSIONS = {
PRESSURE_PA: lambda x, units: pressure_util.convert(
x, PRESSURE_PA, units.pressure_unit
)
if x is not None
else None,
TEMP_CELSIUS: lambda x, units: temperature_util.convert(
x, TEMP_CELSIUS, units.temperature_unit
)
if x is not None
else None,
VOLUME_CUBIC_METERS: lambda x, units: volume_util.convert(
x, VOLUME_CUBIC_METERS, _configured_unit(VOLUME_CUBIC_METERS, units)
)
if x is not None
else None,
}
_LOGGER = logging.getLogger(__name__)
def split_statistic_id(entity_id: str) -> list[str]:
"""Split a state entity ID into domain and object ID."""
return entity_id.split(":", 1)
VALID_STATISTIC_ID = re.compile(r"^(?!.+__)(?!_)[\da-z_]+(?<!_):(?!_)[\da-z_]+(?<!_)$")
def valid_statistic_id(statistic_id: str) -> bool:
"""Test if a statistic ID is a valid format.
Format: <domain>:<statistic> where both are slugs.
"""
return VALID_STATISTIC_ID.match(statistic_id) is not None
@dataclasses.dataclass
class ValidationIssue:
"""Error or warning message."""
type: str
data: dict[str, str | None] | None = None
def as_dict(self) -> dict:
"""Return dictionary version."""
return dataclasses.asdict(self)
def async_setup(hass: HomeAssistant) -> None:
"""Set up the history hooks."""
hass.data[STATISTICS_BAKERY] = baked.bakery()
hass.data[STATISTICS_META_BAKERY] = baked.bakery()
hass.data[STATISTICS_SHORT_TERM_BAKERY] = baked.bakery()
def entity_id_changed(event: Event) -> None:
"""Handle entity_id changed."""
old_entity_id = event.data["old_entity_id"]
entity_id = event.data["entity_id"]
with session_scope(hass=hass) as session:
session.query(StatisticsMeta).filter(
(StatisticsMeta.statistic_id == old_entity_id)
& (StatisticsMeta.source == DOMAIN)
).update({StatisticsMeta.statistic_id: entity_id})
@callback
def entity_registry_changed_filter(event: Event) -> bool:
"""Handle entity_id changed filter."""
if event.data["action"] != "update" or "old_entity_id" not in event.data:
return False
return True
if hass.is_running:
hass.bus.async_listen(
entity_registry.EVENT_ENTITY_REGISTRY_UPDATED,
entity_id_changed,
event_filter=entity_registry_changed_filter,
)
def get_start_time() -> datetime:
"""Return start time."""
now = dt_util.utcnow()
current_period_minutes = now.minute - now.minute % 5
current_period = now.replace(minute=current_period_minutes, second=0, microsecond=0)
last_period = current_period - timedelta(minutes=5)
return last_period
def _update_or_add_metadata(
hass: HomeAssistant,
session: scoped_session,
new_metadata: StatisticMetaData,
) -> int:
"""Get metadata_id for a statistic_id.
If the statistic_id is previously unknown, add it. If it's already known, update
metadata if needed.
Updating metadata source is not possible.
"""
statistic_id = new_metadata["statistic_id"]
old_metadata_dict = get_metadata_with_session(
hass, session, statistic_ids=[statistic_id]
)
if not old_metadata_dict:
meta = StatisticsMeta.from_meta(new_metadata)
session.add(meta)
session.flush() # Flush to get the metadata id assigned
_LOGGER.debug(
"Added new statistics metadata for %s, new_metadata: %s",
statistic_id,
new_metadata,
)
return meta.id # type: ignore[no-any-return]
metadata_id, old_metadata = old_metadata_dict[statistic_id]
if (
old_metadata["has_mean"] != new_metadata["has_mean"]
or old_metadata["has_sum"] != new_metadata["has_sum"]
or old_metadata["unit_of_measurement"] != new_metadata["unit_of_measurement"]
):
session.query(StatisticsMeta).filter_by(statistic_id=statistic_id).update(
{
StatisticsMeta.has_mean: new_metadata["has_mean"],
StatisticsMeta.has_sum: new_metadata["has_sum"],
StatisticsMeta.unit_of_measurement: new_metadata["unit_of_measurement"],
},
synchronize_session=False,
)
_LOGGER.debug(
"Updated statistics metadata for %s, old_metadata: %s, new_metadata: %s",
statistic_id,
old_metadata,
new_metadata,
)
return metadata_id
def _find_duplicates(
session: scoped_session, table: type[Statistics | StatisticsShortTerm]
) -> tuple[list[int], list[dict]]:
"""Find duplicated statistics."""
subquery = (
session.query(
table.start,
table.metadata_id,
literal_column("1").label("is_duplicate"),
)
.group_by(table.metadata_id, table.start)
.having(func.count() > 1)
.subquery()
)
query = (
session.query(table)
.outerjoin(
subquery,
(subquery.c.metadata_id == table.metadata_id)
& (subquery.c.start == table.start),
)
.filter(subquery.c.is_duplicate == 1)
.order_by(table.metadata_id, table.start, table.id.desc())
.limit(MAX_ROWS_TO_PURGE)
)
duplicates = execute(query)
original_as_dict = {}
start = None
metadata_id = None
duplicate_ids: list[int] = []
non_identical_duplicates_as_dict: list[dict] = []
if not duplicates:
return (duplicate_ids, non_identical_duplicates_as_dict)
def columns_to_dict(duplicate: type[Statistics | StatisticsShortTerm]) -> dict:
"""Convert a SQLAlchemy row to dict."""
dict_ = {}
for key in duplicate.__mapper__.c.keys():
dict_[key] = getattr(duplicate, key)
return dict_
def compare_statistic_rows(row1: dict, row2: dict) -> bool:
"""Compare two statistics rows, ignoring id and created."""
ignore_keys = ["id", "created"]
keys1 = set(row1).difference(ignore_keys)
keys2 = set(row2).difference(ignore_keys)
return keys1 == keys2 and all(row1[k] == row2[k] for k in keys1)
for duplicate in duplicates:
if start != duplicate.start or metadata_id != duplicate.metadata_id:
original_as_dict = columns_to_dict(duplicate)
start = duplicate.start
metadata_id = duplicate.metadata_id
continue
duplicate_as_dict = columns_to_dict(duplicate)
duplicate_ids.append(duplicate.id)
if not compare_statistic_rows(original_as_dict, duplicate_as_dict):
non_identical_duplicates_as_dict.append(
{"duplicate": duplicate_as_dict, "original": original_as_dict}
)
return (duplicate_ids, non_identical_duplicates_as_dict)
def _delete_duplicates_from_table(
session: scoped_session, table: type[Statistics | StatisticsShortTerm]
) -> tuple[int, list[dict]]:
"""Identify and delete duplicated statistics from a specified table."""
all_non_identical_duplicates: list[dict] = []
total_deleted_rows = 0
while True:
duplicate_ids, non_identical_duplicates = _find_duplicates(session, table)
if not duplicate_ids:
break
all_non_identical_duplicates.extend(non_identical_duplicates)
deleted_rows = (
session.query(table)
.filter(table.id.in_(duplicate_ids))
.delete(synchronize_session=False)
)
total_deleted_rows += deleted_rows
if total_deleted_rows >= MAX_DUPLICATES:
break
return (total_deleted_rows, all_non_identical_duplicates)
def delete_duplicates(instance: Recorder, session: scoped_session) -> None:
"""Identify and delete duplicated statistics.
A backup will be made of duplicated statistics before it is deleted.
"""
deleted_statistics_rows, non_identical_duplicates = _delete_duplicates_from_table(
session, Statistics
)
if deleted_statistics_rows:
_LOGGER.info("Deleted %s duplicated statistics rows", deleted_statistics_rows)
if non_identical_duplicates:
isotime = dt_util.utcnow().isoformat()
backup_file_name = f"deleted_statistics.{isotime}.json"
backup_path = instance.hass.config.path(STORAGE_DIR, backup_file_name)
os.makedirs(os.path.dirname(backup_path), exist_ok=True)
with open(backup_path, "w", encoding="utf8") as backup_file:
json.dump(
non_identical_duplicates,
backup_file,
indent=4,
sort_keys=True,
cls=JSONEncoder,
)
_LOGGER.warning(
"Deleted %s non identical duplicated %s rows, a backup of the deleted rows "
"has been saved to %s",
len(non_identical_duplicates),
Statistics.__tablename__,
backup_path,
)
if deleted_statistics_rows >= MAX_DUPLICATES:
_LOGGER.warning(
"Found more than %s duplicated statistic rows, please report at "
'https://github.com/home-assistant/core/issues?q=is%%3Aissue+label%%3A"integration%%3A+recorder"+',
MAX_DUPLICATES - 1,
)
deleted_short_term_statistics_rows, _ = _delete_duplicates_from_table(
session, StatisticsShortTerm
)
if deleted_short_term_statistics_rows:
_LOGGER.warning(
"Deleted duplicated short term statistic rows, please report at "
'https://github.com/home-assistant/core/issues?q=is%%3Aissue+label%%3A"integration%%3A+recorder"+'
)
def compile_hourly_statistics(
instance: Recorder, session: scoped_session, start: datetime
) -> None:
"""Compile hourly statistics.
This will summarize 5-minute statistics for one hour:
- average, min max is computed by a database query
- sum is taken from the last 5-minute entry during the hour
"""
start_time = start.replace(minute=0)
end_time = start_time + timedelta(hours=1)
# Compute last hour's average, min, max
summary: dict[str, StatisticData] = {}
baked_query = instance.hass.data[STATISTICS_SHORT_TERM_BAKERY](
lambda session: session.query(*QUERY_STATISTICS_SUMMARY_MEAN)
)
baked_query += lambda q: q.filter(
StatisticsShortTerm.start >= bindparam("start_time")
)
baked_query += lambda q: q.filter(StatisticsShortTerm.start < bindparam("end_time"))
baked_query += lambda q: q.group_by(StatisticsShortTerm.metadata_id)
baked_query += lambda q: q.order_by(StatisticsShortTerm.metadata_id)
stats = execute(
baked_query(session).params(start_time=start_time, end_time=end_time)
)
if stats:
for stat in stats:
metadata_id, _mean, _min, _max = stat
summary[metadata_id] = {
"start": start_time,
"mean": _mean,
"min": _min,
"max": _max,
}
# Get last hour's last sum
if instance._db_supports_row_number: # pylint: disable=[protected-access]
subquery = (
session.query(*QUERY_STATISTICS_SUMMARY_SUM)
.filter(StatisticsShortTerm.start >= bindparam("start_time"))
.filter(StatisticsShortTerm.start < bindparam("end_time"))
.subquery()
)
query = (
session.query(subquery)
.filter(subquery.c.rownum == 1)
.order_by(subquery.c.metadata_id)
)
stats = execute(query.params(start_time=start_time, end_time=end_time))
if stats:
for stat in stats:
metadata_id, start, last_reset, state, _sum, _ = stat
if metadata_id in summary:
summary[metadata_id].update(
{
"last_reset": process_timestamp(last_reset),
"state": state,
"sum": _sum,
}
)
else:
summary[metadata_id] = {
"start": start_time,
"last_reset": process_timestamp(last_reset),
"state": state,
"sum": _sum,
}
else:
baked_query = instance.hass.data[STATISTICS_SHORT_TERM_BAKERY](
lambda session: session.query(*QUERY_STATISTICS_SUMMARY_SUM_LEGACY)
)
baked_query += lambda q: q.filter(
StatisticsShortTerm.start >= bindparam("start_time")
)
baked_query += lambda q: q.filter(
StatisticsShortTerm.start < bindparam("end_time")
)
baked_query += lambda q: q.order_by(
StatisticsShortTerm.metadata_id, StatisticsShortTerm.start.desc()
)
stats = execute(
baked_query(session).params(start_time=start_time, end_time=end_time)
)
if stats:
for metadata_id, group in groupby(stats, lambda stat: stat["metadata_id"]): # type: ignore
(
metadata_id,
last_reset,
state,
_sum,
) = next(group)
if metadata_id in summary:
summary[metadata_id].update(
{
"start": start_time,
"last_reset": process_timestamp(last_reset),
"state": state,
"sum": _sum,
}
)
else:
summary[metadata_id] = {
"start": start_time,
"last_reset": process_timestamp(last_reset),
"state": state,
"sum": _sum,
}
# Insert compiled hourly statistics in the database
for metadata_id, stat in summary.items():
session.add(Statistics.from_stats(metadata_id, stat))
@retryable_database_job("statistics")
def compile_statistics(instance: Recorder, start: datetime) -> bool:
"""Compile 5-minute statistics for all integrations with a recorder platform.
The actual calculation is delegated to the platforms.
"""
start = dt_util.as_utc(start)
end = start + timedelta(minutes=5)
# Return if we already have 5-minute statistics for the requested period
with session_scope(session=instance.get_session()) as session: # type: ignore
if session.query(StatisticsRuns).filter_by(start=start).first():
_LOGGER.debug("Statistics already compiled for %s-%s", start, end)
return True
_LOGGER.debug("Compiling statistics for %s-%s", start, end)
platform_stats: list[StatisticResult] = []
# Collect statistics from all platforms implementing support
for domain, platform in instance.hass.data[DOMAIN].items():
if not hasattr(platform, "compile_statistics"):
continue
platform_stat = platform.compile_statistics(instance.hass, start, end)
_LOGGER.debug(
"Statistics for %s during %s-%s: %s", domain, start, end, platform_stat
)
platform_stats.extend(platform_stat)
# Insert collected statistics in the database
with session_scope(
session=instance.get_session(), # type: ignore
exception_filter=_filter_unique_constraint_integrity_error(instance),
) as session:
for stats in platform_stats:
metadata_id = _update_or_add_metadata(instance.hass, session, stats["meta"])
_insert_statistics(
session,
StatisticsShortTerm,
metadata_id,
stats["stat"],
)
if start.minute == 55:
# A full hour is ready, summarize it
compile_hourly_statistics(instance, session, start)
session.add(StatisticsRuns(start=start))
return True
def _insert_statistics(
session: scoped_session,
table: type[Statistics | StatisticsShortTerm],
metadata_id: int,
statistic: StatisticData,
) -> None:
"""Insert statistics in the database."""
try:
session.add(table.from_stats(metadata_id, statistic))
except SQLAlchemyError:
_LOGGER.exception(
"Unexpected exception when inserting statistics %s:%s ",
metadata_id,
statistic,
)
def _update_statistics(
session: scoped_session,
table: type[Statistics | StatisticsShortTerm],
stat_id: int,
statistic: StatisticData,
) -> None:
"""Insert statistics in the database."""
try:
session.query(table).filter_by(id=stat_id).update(
{
table.mean: statistic.get("mean"),
table.min: statistic.get("min"),
table.max: statistic.get("max"),
table.last_reset: statistic.get("last_reset"),
table.state: statistic.get("state"),
table.sum: statistic.get("sum"),
},
synchronize_session=False,
)
except SQLAlchemyError:
_LOGGER.exception(
"Unexpected exception when updating statistics %s:%s ",
id,
statistic,
)
def get_metadata_with_session(
hass: HomeAssistant,
session: scoped_session,
*,
statistic_ids: list[str] | tuple[str] | None = None,
statistic_type: Literal["mean"] | Literal["sum"] | None = None,
statistic_source: str | None = None,
) -> dict[str, tuple[int, StatisticMetaData]]:
"""Fetch meta data.
Returns a dict of (metadata_id, StatisticMetaData) tuples indexed by statistic_id.
If statistic_ids is given, fetch metadata only for the listed statistics_ids.
If statistic_type is given, fetch metadata only for statistic_ids supporting it.
"""
# Fetch metatadata from the database
baked_query = hass.data[STATISTICS_META_BAKERY](
lambda session: session.query(*QUERY_STATISTIC_META)
)
if statistic_ids is not None:
baked_query += lambda q: q.filter(
StatisticsMeta.statistic_id.in_(bindparam("statistic_ids"))
)
if statistic_source is not None:
baked_query += lambda q: q.filter(
StatisticsMeta.source == bindparam("statistic_source")
)
if statistic_type == "mean":
baked_query += lambda q: q.filter(StatisticsMeta.has_mean == true())
elif statistic_type == "sum":
baked_query += lambda q: q.filter(StatisticsMeta.has_sum == true())
result = execute(
baked_query(session).params(
statistic_ids=statistic_ids, statistic_source=statistic_source
)
)
if not result:
return {}
return {
meta["statistic_id"]: (
meta["id"],
{
"source": meta["source"],
"statistic_id": meta["statistic_id"],
"unit_of_measurement": meta["unit_of_measurement"],
"has_mean": meta["has_mean"],
"has_sum": meta["has_sum"],
"name": meta["name"],
},
)
for meta in result
}
def get_metadata(
hass: HomeAssistant,
*,
statistic_ids: list[str] | tuple[str] | None = None,
statistic_type: Literal["mean"] | Literal["sum"] | None = None,
statistic_source: str | None = None,
) -> dict[str, tuple[int, StatisticMetaData]]:
"""Return metadata for statistic_ids."""
with session_scope(hass=hass) as session:
return get_metadata_with_session(
hass,
session,
statistic_ids=statistic_ids,
statistic_type=statistic_type,
statistic_source=statistic_source,
)
def _configured_unit(unit: str, units: UnitSystem) -> str:
"""Return the pressure and temperature units configured by the user."""
if unit == PRESSURE_PA:
return units.pressure_unit
if unit == TEMP_CELSIUS:
return units.temperature_unit
if unit == VOLUME_CUBIC_METERS:
if units.is_metric:
return VOLUME_CUBIC_METERS
return VOLUME_CUBIC_FEET
return unit
def clear_statistics(instance: Recorder, statistic_ids: list[str]) -> None:
"""Clear statistics for a list of statistic_ids."""
with session_scope(session=instance.get_session()) as session: # type: ignore
session.query(StatisticsMeta).filter(
StatisticsMeta.statistic_id.in_(statistic_ids)
).delete(synchronize_session=False)
def update_statistics_metadata(
instance: Recorder, statistic_id: str, unit_of_measurement: str | None
) -> None:
"""Update statistics metadata for a statistic_id."""
with session_scope(session=instance.get_session()) as session: # type: ignore
session.query(StatisticsMeta).filter(
StatisticsMeta.statistic_id == statistic_id
).update({StatisticsMeta.unit_of_measurement: unit_of_measurement})
def list_statistic_ids(
hass: HomeAssistant,
statistic_type: Literal["mean"] | Literal["sum"] | None = None,
) -> list[dict | None]:
"""Return all statistic_ids and unit of measurement.
Queries the database for existing statistic_ids, as well as integrations with
a recorder platform for statistic_ids which will be added in the next statistics
period.
"""
units = hass.config.units
statistic_ids = {}
# Query the database
with session_scope(hass=hass) as session:
metadata = get_metadata_with_session(
hass, session, statistic_type=statistic_type
)
for _, meta in metadata.values():
if (unit := meta["unit_of_measurement"]) is not None:
# Display unit according to user settings
unit = _configured_unit(unit, units)
meta["unit_of_measurement"] = unit
statistic_ids = {
meta["statistic_id"]: {
"name": meta["name"],
"source": meta["source"],
"unit_of_measurement": meta["unit_of_measurement"],
}
for _, meta in metadata.values()
}
# Query all integrations with a registered recorder platform
for platform in hass.data[DOMAIN].values():
if not hasattr(platform, "list_statistic_ids"):
continue
platform_statistic_ids = platform.list_statistic_ids(hass, statistic_type)
for statistic_id, info in platform_statistic_ids.items():
if (unit := info["unit_of_measurement"]) is not None:
# Display unit according to user settings
unit = _configured_unit(unit, units)
platform_statistic_ids[statistic_id]["unit_of_measurement"] = unit
for key, value in platform_statistic_ids.items():
statistic_ids.setdefault(key, value)
# Return a list of statistic_id + metadata
return [
{
"statistic_id": _id,
"name": info.get("name"),
"source": info["source"],
"unit_of_measurement": info["unit_of_measurement"],
}
for _id, info in statistic_ids.items()
]
def _statistics_during_period_query(
hass: HomeAssistant,
end_time: datetime | None,
statistic_ids: list[str] | None,
bakery: Any,
base_query: Iterable,
table: type[Statistics | StatisticsShortTerm],
) -> Callable:
"""Prepare a database query for statistics during a given period.
This prepares a baked query, so we don't insert the parameters yet.
"""
baked_query = hass.data[bakery](lambda session: session.query(*base_query))
baked_query += lambda q: q.filter(table.start >= bindparam("start_time"))
if end_time is not None:
baked_query += lambda q: q.filter(table.start < bindparam("end_time"))
if statistic_ids is not None:
baked_query += lambda q: q.filter(
table.metadata_id.in_(bindparam("metadata_ids"))
)
baked_query += lambda q: q.order_by(table.metadata_id, table.start)
return baked_query # type: ignore[no-any-return]
def _reduce_statistics(
stats: dict[str, list[dict[str, Any]]],
same_period: Callable[[datetime, datetime], bool],
period_start_end: Callable[[datetime], tuple[datetime, datetime]],
period: timedelta,
) -> dict[str, list[dict[str, Any]]]:
"""Reduce hourly statistics to daily or monthly statistics."""
result: dict[str, list[dict[str, Any]]] = defaultdict(list)
for statistic_id, stat_list in stats.items():
max_values: list[float] = []
mean_values: list[float] = []
min_values: list[float] = []
prev_stat: dict[str, Any] = stat_list[0]
# Loop over the hourly statistics + a fake entry to end the period
for statistic in chain(
stat_list, ({"start": stat_list[-1]["start"] + period},)
):
if not same_period(prev_stat["start"], statistic["start"]):
start, end = period_start_end(prev_stat["start"])
# The previous statistic was the last entry of the period
result[statistic_id].append(
{
"statistic_id": statistic_id,
"start": start.isoformat(),
"end": end.isoformat(),
"mean": mean(mean_values) if mean_values else None,
"min": min(min_values) if min_values else None,
"max": max(max_values) if max_values else None,
"last_reset": prev_stat.get("last_reset"),
"state": prev_stat.get("state"),
"sum": prev_stat["sum"],
}
)
max_values = []
mean_values = []
min_values = []
if statistic.get("max") is not None:
max_values.append(statistic["max"])
if statistic.get("mean") is not None:
mean_values.append(statistic["mean"])
if statistic.get("min") is not None:
min_values.append(statistic["min"])
prev_stat = statistic
return result
def same_day(time1: datetime, time2: datetime) -> bool:
"""Return True if time1 and time2 are in the same date."""
date1 = dt_util.as_local(time1).date()
date2 = dt_util.as_local(time2).date()
return date1 == date2
def day_start_end(time: datetime) -> tuple[datetime, datetime]:
"""Return the start and end of the period (day) time is within."""
start = dt_util.as_utc(
dt_util.as_local(time).replace(hour=0, minute=0, second=0, microsecond=0)
)
end = start + timedelta(days=1)
return (start, end)
def _reduce_statistics_per_day(
stats: dict[str, list[dict[str, Any]]]
) -> dict[str, list[dict[str, Any]]]:
"""Reduce hourly statistics to daily statistics."""
return _reduce_statistics(stats, same_day, day_start_end, timedelta(days=1))
def same_month(time1: datetime, time2: datetime) -> bool:
"""Return True if time1 and time2 are in the same year and month."""
date1 = dt_util.as_local(time1).date()
date2 = dt_util.as_local(time2).date()
return (date1.year, date1.month) == (date2.year, date2.month)
def month_start_end(time: datetime) -> tuple[datetime, datetime]:
"""Return the start and end of the period (month) time is within."""
start_local = dt_util.as_local(time).replace(
day=1, hour=0, minute=0, second=0, microsecond=0
)
start = dt_util.as_utc(start_local)
end_local = (start_local + timedelta(days=31)).replace(day=1)
end = dt_util.as_utc(end_local)
return (start, end)
def _reduce_statistics_per_month(
stats: dict[str, list[dict[str, Any]]],
) -> dict[str, list[dict[str, Any]]]:
"""Reduce hourly statistics to monthly statistics."""
return _reduce_statistics(stats, same_month, month_start_end, timedelta(days=31))
def statistics_during_period(
hass: HomeAssistant,
start_time: datetime,
end_time: datetime | None = None,
statistic_ids: list[str] | None = None,
period: Literal["5minute", "day", "hour", "month"] = "hour",
start_time_as_datetime: bool = False,
) -> dict[str, list[dict[str, Any]]]:
"""Return statistics during UTC period start_time - end_time for the statistic_ids.
If end_time is omitted, returns statistics newer than or equal to start_time.
If statistic_ids is omitted, returns statistics for all statistics ids.
"""
metadata = None
with session_scope(hass=hass) as session:
# Fetch metadata for the given (or all) statistic_ids
metadata = get_metadata_with_session(hass, session, statistic_ids=statistic_ids)
if not metadata:
return {}
metadata_ids = None
if statistic_ids is not None:
metadata_ids = [metadata_id for metadata_id, _ in metadata.values()]
if period == "5minute":
bakery = STATISTICS_SHORT_TERM_BAKERY
base_query = QUERY_STATISTICS_SHORT_TERM
table = StatisticsShortTerm
else:
bakery = STATISTICS_BAKERY
base_query = QUERY_STATISTICS
table = Statistics
baked_query = _statistics_during_period_query(
hass, end_time, statistic_ids, bakery, base_query, table
)
stats = execute(
baked_query(session).params(
start_time=start_time, end_time=end_time, metadata_ids=metadata_ids
)
)
if not stats:
return {}
# Return statistics combined with metadata
if period not in ("day", "month"):
return _sorted_statistics_to_dict(
hass,
session,
stats,
statistic_ids,
metadata,
True,
table,
start_time,
start_time_as_datetime,
)
result = _sorted_statistics_to_dict(
hass, session, stats, statistic_ids, metadata, True, table, start_time, True
)
if period == "day":
return _reduce_statistics_per_day(result)
return _reduce_statistics_per_month(result)
def _get_last_statistics(
hass: HomeAssistant,
number_of_stats: int,
statistic_id: str,
convert_units: bool,
table: type[Statistics | StatisticsShortTerm],
) -> dict[str, list[dict]]:
"""Return the last number_of_stats statistics for a given statistic_id."""
statistic_ids = [statistic_id]
with session_scope(hass=hass) as session:
# Fetch metadata for the given statistic_id
metadata = get_metadata_with_session(hass, session, statistic_ids=statistic_ids)
if not metadata:
return {}
if table == StatisticsShortTerm:
bakery = STATISTICS_SHORT_TERM_BAKERY
base_query = QUERY_STATISTICS_SHORT_TERM
else:
bakery = STATISTICS_BAKERY
base_query = QUERY_STATISTICS
baked_query = hass.data[bakery](lambda session: session.query(*base_query))
baked_query += lambda q: q.filter_by(metadata_id=bindparam("metadata_id"))
metadata_id = metadata[statistic_id][0]
baked_query += lambda q: q.order_by(table.metadata_id, table.start.desc())
baked_query += lambda q: q.limit(bindparam("number_of_stats"))
stats = execute(
baked_query(session).params(
number_of_stats=number_of_stats, metadata_id=metadata_id
)
)
if not stats:
return {}
# Return statistics combined with metadata
return _sorted_statistics_to_dict(
hass,
session,
stats,
statistic_ids,
metadata,
convert_units,
table,
None,
)
def get_last_statistics(
hass: HomeAssistant, number_of_stats: int, statistic_id: str, convert_units: bool
) -> dict[str, list[dict]]:
"""Return the last number_of_stats statistics for a statistic_id."""
return _get_last_statistics(
hass, number_of_stats, statistic_id, convert_units, Statistics
)
def get_last_short_term_statistics(
hass: HomeAssistant, number_of_stats: int, statistic_id: str, convert_units: bool
) -> dict[str, list[dict]]:
"""Return the last number_of_stats short term statistics for a statistic_id."""
return _get_last_statistics(
hass, number_of_stats, statistic_id, convert_units, StatisticsShortTerm
)
def _statistics_at_time(
session: scoped_session,
metadata_ids: set[int],
table: type[Statistics | StatisticsShortTerm],
start_time: datetime,
) -> list | None:
"""Return last known statics, earlier than start_time, for the metadata_ids."""
# Fetch metadata for the given (or all) statistic_ids
if table == StatisticsShortTerm:
base_query = QUERY_STATISTICS_SHORT_TERM
else:
base_query = QUERY_STATISTICS
query = session.query(*base_query)
most_recent_statistic_ids = (
session.query(
func.max(table.id).label("max_id"),
)
.filter(table.start < start_time)
.filter(table.metadata_id.in_(metadata_ids))
)
most_recent_statistic_ids = most_recent_statistic_ids.group_by(table.metadata_id)
most_recent_statistic_ids = most_recent_statistic_ids.subquery()
query = query.join(
most_recent_statistic_ids,
table.id == most_recent_statistic_ids.c.max_id,
)
return execute(query)
def _sorted_statistics_to_dict(
hass: HomeAssistant,
session: scoped_session,
stats: list,
statistic_ids: list[str] | None,
_metadata: dict[str, tuple[int, StatisticMetaData]],
convert_units: bool,
table: type[Statistics | StatisticsShortTerm],
start_time: datetime | None,
start_time_as_datetime: bool = False,
) -> dict[str, list[dict]]:
"""Convert SQL results into JSON friendly data structure."""
result: dict = defaultdict(list)
units = hass.config.units
metadata = dict(_metadata.values())
need_stat_at_start_time = set()
stats_at_start_time = {}
def no_conversion(val: Any, _: Any) -> float | None:
"""Return x."""
return val # type: ignore
# Set all statistic IDs to empty lists in result set to maintain the order
if statistic_ids is not None:
for stat_id in statistic_ids:
result[stat_id] = []
# Identify metadata IDs for which no data was available at the requested start time
for meta_id, group in groupby(stats, lambda stat: stat.metadata_id): # type: ignore
first_start_time = process_timestamp(next(group).start)
if start_time and first_start_time > start_time:
need_stat_at_start_time.add(meta_id)
# Fetch last known statistics for the needed metadata IDs
if need_stat_at_start_time:
assert start_time # Can not be None if need_stat_at_start_time is not empty
tmp = _statistics_at_time(session, need_stat_at_start_time, table, start_time)
if tmp:
for stat in tmp:
stats_at_start_time[stat.metadata_id] = (stat,)
# Append all statistic entries, and optionally do unit conversion
for meta_id, group in groupby(stats, lambda stat: stat.metadata_id): # type: ignore
unit = metadata[meta_id]["unit_of_measurement"]
statistic_id = metadata[meta_id]["statistic_id"]
convert: Callable[[Any, Any], float | None]
if convert_units:
convert = UNIT_CONVERSIONS.get(unit, lambda x, units: x) # type: ignore
else:
convert = no_conversion
ent_results = result[meta_id]
for db_state in chain(stats_at_start_time.get(meta_id, ()), group):
start = process_timestamp(db_state.start)
end = start + table.duration
ent_results.append(
{
"statistic_id": statistic_id,
"start": start if start_time_as_datetime else start.isoformat(),
"end": end.isoformat(),
"mean": convert(db_state.mean, units),
"min": convert(db_state.min, units),
"max": convert(db_state.max, units),
"last_reset": process_timestamp_to_utc_isoformat(
db_state.last_reset
),
"state": convert(db_state.state, units),
"sum": convert(db_state.sum, units),
}
)
# Filter out the empty lists if some states had 0 results.
return {metadata[key]["statistic_id"]: val for key, val in result.items() if val}
def validate_statistics(hass: HomeAssistant) -> dict[str, list[ValidationIssue]]:
"""Validate statistics."""
platform_validation: dict[str, list[ValidationIssue]] = {}
for platform in hass.data[DOMAIN].values():
if not hasattr(platform, "validate_statistics"):
continue
platform_validation.update(platform.validate_statistics(hass))
return platform_validation
def _statistics_exists(
session: scoped_session,
table: type[Statistics | StatisticsShortTerm],
metadata_id: int,
start: datetime,
) -> int | None:
"""Return id if a statistics entry already exists."""
result = (
session.query(table.id)
.filter((table.metadata_id == metadata_id) & (table.start == start))
.first()
)
return result["id"] if result else None
@callback
def async_add_external_statistics(
hass: HomeAssistant,
metadata: StatisticMetaData,
statistics: Iterable[StatisticData],
) -> None:
"""Add hourly statistics from an external source.
This inserts an add_external_statistics job in the recorder's queue.
"""
# The statistic_id has same limitations as an entity_id, but with a ':' as separator
if not valid_statistic_id(metadata["statistic_id"]):
raise HomeAssistantError("Invalid statistic_id")
# The source must not be empty and must be aligned with the statistic_id
domain, _object_id = split_statistic_id(metadata["statistic_id"])
if not metadata["source"] or metadata["source"] != domain:
raise HomeAssistantError("Invalid source")
for statistic in statistics:
start = statistic["start"]
if start.tzinfo is None or start.tzinfo.utcoffset(start) is None:
raise HomeAssistantError("Naive timestamp")
if start.minute != 0 or start.second != 0 or start.microsecond != 0:
raise HomeAssistantError("Invalid timestamp")
statistic["start"] = dt_util.as_utc(start)
# Insert job in recorder's queue
hass.data[DATA_INSTANCE].async_external_statistics(metadata, statistics)
def _filter_unique_constraint_integrity_error(
instance: Recorder,
) -> Callable[[Exception], bool]:
def _filter_unique_constraint_integrity_error(err: Exception) -> bool:
"""Handle unique constraint integrity errors."""
if not isinstance(err, StatementError):
return False
ignore = False
if (
instance.engine.dialect.name == "sqlite"
and "UNIQUE constraint failed" in str(err)
):
ignore = True
if (
instance.engine.dialect.name == "postgresql"
and hasattr(err.orig, "pgcode")
and err.orig.pgcode == "23505"
):
ignore = True
if instance.engine.dialect.name == "mysql" and hasattr(err.orig, "args"):
with contextlib.suppress(TypeError):
if err.orig.args[0] == 1062:
ignore = True
if ignore:
_LOGGER.warning(
"Blocked attempt to insert duplicated statistic rows, please report at "
'https://github.com/home-assistant/core/issues?q=is%%3Aissue+label%%3A"integration%%3A+recorder"+',
exc_info=err,
)
return ignore
return _filter_unique_constraint_integrity_error
@retryable_database_job("statistics")
def add_external_statistics(
instance: Recorder,
metadata: StatisticMetaData,
statistics: Iterable[StatisticData],
) -> bool:
"""Process an add_statistics job."""
with session_scope(
session=instance.get_session(), # type: ignore
exception_filter=_filter_unique_constraint_integrity_error(instance),
) as session:
metadata_id = _update_or_add_metadata(instance.hass, session, metadata)
for stat in statistics:
if stat_id := _statistics_exists(
session, Statistics, metadata_id, stat["start"]
):
_update_statistics(session, Statistics, stat_id, stat)
else:
_insert_statistics(session, Statistics, metadata_id, stat)
return True
| |
import logging
import random
import string
import pytest
import salt.config
import salt.loader
import salt.states.boto_cloudwatch_event as boto_cloudwatch_event
from tests.support.mock import MagicMock, patch
boto = pytest.importorskip("boto")
boto3 = pytest.importorskip("boto3", "1.2.1")
botocore = pytest.importorskip("botocore", "1.4.41")
log = logging.getLogger(__name__)
class GlobalConfig:
region = "us-east-1"
access_key = "GKTADJGHEIQSXMKKRBJ08H"
secret_key = "askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs"
conn_parameters = {
"region": region,
"key": access_key,
"keyid": secret_key,
"profile": {},
}
error_message = (
"An error occurred (101) when calling the {0} operation: Test-defined error"
)
error_content = {"Error": {"Code": 101, "Message": "Test-defined error"}}
rule_name = "test_thing_type"
rule_desc = "test_thing_type_desc"
rule_sched = "rate(20 min)"
rule_arn = "arn:::::rule/arn"
rule_ret = dict(
Arn=rule_arn,
Description=rule_desc,
EventPattern=None,
Name=rule_name,
RoleArn=None,
ScheduleExpression=rule_sched,
State="ENABLED",
)
@pytest.fixture
def global_config():
params = GlobalConfig()
return params
@pytest.fixture
def configure_loader_modules():
opts = salt.config.DEFAULT_MINION_OPTS.copy()
opts["grains"] = salt.loader.grains(opts)
ctx = {}
utils = salt.loader.utils(
opts,
whitelist=["boto3", "args", "systemd", "path", "platform"],
context=ctx,
)
serializers = salt.loader.serializers(opts)
funcs = funcs = salt.loader.minion_mods(
opts, context=ctx, utils=utils, whitelist=["boto_cloudwatch_event"]
)
salt_states = salt.loader.states(
opts=opts,
functions=funcs,
utils=utils,
whitelist=["boto_cloudwatch_event"],
serializers=serializers,
)
return {
boto_cloudwatch_event: {
"__opts__": opts,
"__salt__": funcs,
"__utils__": utils,
"__states__": salt_states,
"__serializers__": serializers,
}
}
def test_present_when_failing_to_describe_rule(global_config):
"""
Tests exceptions when checking rule existence
"""
global_config.conn_parameters["key"] = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(50)
)
patcher = patch("boto3.session.Session")
mock_session = patcher.start()
session_instance = mock_session.return_value
conn = MagicMock()
session_instance.client.return_value = conn
conn.list_rules.side_effect = botocore.exceptions.ClientError(
global_config.error_content, "error on list rules"
)
result = boto_cloudwatch_event.__states__["boto_cloudwatch_event.present"](
name="test present",
Name=global_config.rule_name,
Description=global_config.rule_desc,
ScheduleExpression=global_config.rule_sched,
Targets=[{"Id": "target1", "Arn": "arn::::::*"}],
**global_config.conn_parameters
)
assert result.get("result") is False
assert "error on list rules" in result.get("comment", {})
def test_present_when_failing_to_create_a_new_rule(global_config):
"""
Tests present on a rule name that doesn't exist and
an error is thrown on creation.
"""
global_config.conn_parameters["key"] = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(50)
)
patcher = patch("boto3.session.Session")
mock_session = patcher.start()
session_instance = mock_session.return_value
conn = MagicMock()
session_instance.client.return_value = conn
conn.list_rules.return_value = {"Rules": []}
conn.put_rule.side_effect = botocore.exceptions.ClientError(
global_config.error_content, "put_rule"
)
result = boto_cloudwatch_event.__states__["boto_cloudwatch_event.present"](
name="test present",
Name=global_config.rule_name,
Description=global_config.rule_desc,
ScheduleExpression=global_config.rule_sched,
Targets=[{"Id": "target1", "Arn": "arn::::::*"}],
**global_config.conn_parameters
)
assert result.get("result") is False
assert "put_rule" in result.get("comment", "")
def test_present_when_failing_to_describe_the_new_rule(global_config):
"""
Tests present on a rule name that doesn't exist and
an error is thrown when adding targets.
"""
global_config.conn_parameters["key"] = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(50)
)
patcher = patch("boto3.session.Session")
mock_session = patcher.start()
session_instance = mock_session.return_value
conn = MagicMock()
session_instance.client.return_value = conn
conn.list_rules.return_value = {"Rules": []}
conn.put_rule.return_value = global_config.rule_ret
conn.describe_rule.side_effect = botocore.exceptions.ClientError(
global_config.error_content, "describe_rule"
)
result = boto_cloudwatch_event.__states__["boto_cloudwatch_event.present"](
name="test present",
Name=global_config.rule_name,
Description=global_config.rule_desc,
ScheduleExpression=global_config.rule_sched,
Targets=[{"Id": "target1", "Arn": "arn::::::*"}],
**global_config.conn_parameters
)
assert result.get("result") is False
assert "describe_rule" in result.get("comment", "")
def test_present_when_failing_to_create_a_new_rules_targets(global_config):
"""
Tests present on a rule name that doesn't exist and
an error is thrown when adding targets.
"""
global_config.conn_parameters["key"] = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(50)
)
patcher = patch("boto3.session.Session")
mock_session = patcher.start()
session_instance = mock_session.return_value
conn = MagicMock()
session_instance.client.return_value = conn
conn.list_rules.return_value = {"Rules": []}
conn.put_rule.return_value = global_config.rule_ret
conn.describe_rule.return_value = global_config.rule_ret
conn.put_targets.side_effect = botocore.exceptions.ClientError(
global_config.error_content, "put_targets"
)
result = boto_cloudwatch_event.__states__["boto_cloudwatch_event.present"](
name="test present",
Name=global_config.rule_name,
Description=global_config.rule_desc,
ScheduleExpression=global_config.rule_sched,
Targets=[{"Id": "target1", "Arn": "arn::::::*"}],
**global_config.conn_parameters
)
assert result.get("result") is False
assert "put_targets" in result.get("comment", "")
def test_present_when_rule_does_not_exist(global_config):
"""
Tests the successful case of creating a new rule, and updating its
targets
"""
global_config.conn_parameters["key"] = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(50)
)
patcher = patch("boto3.session.Session")
mock_session = patcher.start()
session_instance = mock_session.return_value
conn = MagicMock()
session_instance.client.return_value = conn
conn.list_rules.return_value = {"Rules": []}
conn.put_rule.return_value = global_config.rule_ret
conn.describe_rule.return_value = global_config.rule_ret
conn.put_targets.return_value = {"FailedEntryCount": 0}
result = boto_cloudwatch_event.__states__["boto_cloudwatch_event.present"](
name="test present",
Name=global_config.rule_name,
Description=global_config.rule_desc,
ScheduleExpression=global_config.rule_sched,
Targets=[{"Id": "target1", "Arn": "arn::::::*"}],
**global_config.conn_parameters
)
assert result.get("result") is True
def test_present_when_failing_to_update_an_existing_rule(global_config):
"""
Tests present on an existing rule where an error is thrown on updating the pool properties.
"""
global_config.conn_parameters["key"] = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(50)
)
patcher = patch("boto3.session.Session")
mock_session = patcher.start()
session_instance = mock_session.return_value
conn = MagicMock()
session_instance.client.return_value = conn
conn.list_rules.return_value = {"Rules": [global_config.rule_ret]}
conn.describe_rule.side_effect = botocore.exceptions.ClientError(
global_config.error_content, "describe_rule"
)
result = boto_cloudwatch_event.__states__["boto_cloudwatch_event.present"](
name="test present",
Name=global_config.rule_name,
Description=global_config.rule_desc,
ScheduleExpression=global_config.rule_sched,
Targets=[{"Id": "target1", "Arn": "arn::::::*"}],
**global_config.conn_parameters
)
assert result.get("result") is False
assert "describe_rule" in result.get("comment", "")
def test_present_when_failing_to_get_targets(global_config):
"""
Tests present on an existing rule where put_rule succeeded, but an error
is thrown on getting targets
"""
global_config.conn_parameters["key"] = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(50)
)
patcher = patch("boto3.session.Session")
mock_session = patcher.start()
session_instance = mock_session.return_value
conn = MagicMock()
session_instance.client.return_value = conn
conn.list_rules.return_value = {"Rules": [global_config.rule_ret]}
conn.put_rule.return_value = global_config.rule_ret
conn.describe_rule.return_value = global_config.rule_ret
conn.list_targets_by_rule.side_effect = botocore.exceptions.ClientError(
global_config.error_content, "list_targets"
)
result = boto_cloudwatch_event.__states__["boto_cloudwatch_event.present"](
name="test present",
Name=global_config.rule_name,
Description=global_config.rule_desc,
ScheduleExpression=global_config.rule_sched,
Targets=[{"Id": "target1", "Arn": "arn::::::*"}],
**global_config.conn_parameters
)
assert result.get("result") is False
assert "list_targets" in result.get("comment", "")
def test_present_when_failing_to_put_targets(global_config):
"""
Tests present on an existing rule where put_rule succeeded, but an error
is thrown on putting targets
"""
global_config.conn_parameters["key"] = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(50)
)
patcher = patch("boto3.session.Session")
mock_session = patcher.start()
session_instance = mock_session.return_value
conn = MagicMock()
session_instance.client.return_value = conn
conn.list_rules.return_value = {"Rules": []}
conn.put_rule.return_value = global_config.rule_ret
conn.describe_rule.return_value = global_config.rule_ret
conn.list_targets.return_value = {"Targets": []}
conn.put_targets.side_effect = botocore.exceptions.ClientError(
global_config.error_content, "put_targets"
)
result = boto_cloudwatch_event.__states__["boto_cloudwatch_event.present"](
name="test present",
Name=global_config.rule_name,
Description=global_config.rule_desc,
ScheduleExpression=global_config.rule_sched,
Targets=[{"Id": "target1", "Arn": "arn::::::*"}],
**global_config.conn_parameters
)
assert result.get("result") is False
assert "put_targets" in result.get("comment", "")
def test_present_when_putting_targets(global_config):
"""
Tests present on an existing rule where put_rule succeeded, and targets
must be added
"""
global_config.conn_parameters["key"] = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(50)
)
patcher = patch("boto3.session.Session")
mock_session = patcher.start()
session_instance = mock_session.return_value
conn = MagicMock()
session_instance.client.return_value = conn
conn.list_rules.return_value = {"Rules": []}
conn.put_rule.return_value = global_config.rule_ret
conn.describe_rule.return_value = global_config.rule_ret
conn.list_targets.return_value = {"Targets": []}
conn.put_targets.return_value = {"FailedEntryCount": 0}
result = boto_cloudwatch_event.__states__["boto_cloudwatch_event.present"](
name="test present",
Name=global_config.rule_name,
Description=global_config.rule_desc,
ScheduleExpression=global_config.rule_sched,
Targets=[{"Id": "target1", "Arn": "arn::::::*"}],
**global_config.conn_parameters
)
assert result.get("result") is True
def test_present_when_removing_targets(global_config):
"""
Tests present on an existing rule where put_rule succeeded, and targets
must be removed
"""
global_config.conn_parameters["key"] = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(50)
)
patcher = patch("boto3.session.Session")
mock_session = patcher.start()
session_instance = mock_session.return_value
conn = MagicMock()
session_instance.client.return_value = conn
conn.list_rules.return_value = {"Rules": []}
conn.put_rule.return_value = global_config.rule_ret
conn.describe_rule.return_value = global_config.rule_ret
conn.list_targets.return_value = {"Targets": [{"Id": "target1"}, {"Id": "target2"}]}
conn.put_targets.return_value = {"FailedEntryCount": 0}
result = boto_cloudwatch_event.__states__["boto_cloudwatch_event.present"](
name="test present",
Name=global_config.rule_name,
Description=global_config.rule_desc,
ScheduleExpression=global_config.rule_sched,
Targets=[{"Id": "target1", "Arn": "arn::::::*"}],
**global_config.conn_parameters
)
assert result.get("result") is True
def test_absent_when_failing_to_describe_rule(global_config):
"""
Tests exceptions when checking rule existence
"""
global_config.conn_parameters["key"] = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(50)
)
patcher = patch("boto3.session.Session")
mock_session = patcher.start()
session_instance = mock_session.return_value
conn = MagicMock()
session_instance.client.return_value = conn
conn.list_rules.side_effect = botocore.exceptions.ClientError(
global_config.error_content, "error on list rules"
)
result = boto_cloudwatch_event.__states__["boto_cloudwatch_event.absent"](
name="test present",
Name=global_config.rule_name,
**global_config.conn_parameters
)
assert result.get("result") is False
assert "error on list rules" in result.get("comment", {})
def test_absent_when_rule_does_not_exist(global_config):
"""
Tests absent on an non-existing rule
"""
global_config.conn_parameters["key"] = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(50)
)
patcher = patch("boto3.session.Session")
mock_session = patcher.start()
session_instance = mock_session.return_value
conn = MagicMock()
session_instance.client.return_value = conn
conn.list_rules.return_value = {"Rules": []}
result = boto_cloudwatch_event.__states__["boto_cloudwatch_event.absent"](
name="test absent",
Name=global_config.rule_name,
**global_config.conn_parameters
)
assert result.get("result") is True
assert result["changes"] == {}
def test_absent_when_failing_to_list_targets(global_config):
"""
Tests absent on an rule when the list_targets call fails
"""
global_config.conn_parameters["key"] = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(50)
)
patcher = patch("boto3.session.Session")
mock_session = patcher.start()
session_instance = mock_session.return_value
conn = MagicMock()
session_instance.client.return_value = conn
conn.list_rules.return_value = {"Rules": [global_config.rule_ret]}
conn.list_targets_by_rule.side_effect = botocore.exceptions.ClientError(
global_config.error_content, "list_targets"
)
result = boto_cloudwatch_event.__states__["boto_cloudwatch_event.absent"](
name="test absent",
Name=global_config.rule_name,
**global_config.conn_parameters
)
assert result.get("result") is False
assert "list_targets" in result.get("comment", "")
def test_absent_when_failing_to_remove_targets_exception(global_config):
"""
Tests absent on an rule when the remove_targets call fails
"""
global_config.conn_parameters["key"] = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(50)
)
patcher = patch("boto3.session.Session")
mock_session = patcher.start()
session_instance = mock_session.return_value
conn = MagicMock()
session_instance.client.return_value = conn
conn.list_rules.return_value = {"Rules": [global_config.rule_ret]}
conn.list_targets_by_rule.return_value = {"Targets": [{"Id": "target1"}]}
conn.remove_targets.side_effect = botocore.exceptions.ClientError(
global_config.error_content, "remove_targets"
)
result = boto_cloudwatch_event.__states__["boto_cloudwatch_event.absent"](
name="test absent",
Name=global_config.rule_name,
**global_config.conn_parameters
)
assert result.get("result") is False
assert "remove_targets" in result.get("comment", "")
def test_absent_when_failing_to_remove_targets_nonexception(global_config):
"""
Tests absent on an rule when the remove_targets call fails
"""
global_config.conn_parameters["key"] = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(50)
)
patcher = patch("boto3.session.Session")
mock_session = patcher.start()
session_instance = mock_session.return_value
conn = MagicMock()
session_instance.client.return_value = conn
conn.list_rules.return_value = {"Rules": [global_config.rule_ret]}
conn.list_targets_by_rule.return_value = {"Targets": [{"Id": "target1"}]}
conn.remove_targets.return_value = {"FailedEntryCount": 1}
result = boto_cloudwatch_event.__states__["boto_cloudwatch_event.absent"](
name="test absent",
Name=global_config.rule_name,
**global_config.conn_parameters
)
assert result.get("result") is False
def test_absent_when_failing_to_delete_rule(global_config):
"""
Tests absent on an rule when the delete_rule call fails
"""
global_config.conn_parameters["key"] = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(50)
)
patcher = patch("boto3.session.Session")
mock_session = patcher.start()
session_instance = mock_session.return_value
conn = MagicMock()
session_instance.client.return_value = conn
conn.list_rules.return_value = {"Rules": [global_config.rule_ret]}
conn.list_targets_by_rule.return_value = {"Targets": [{"Id": "target1"}]}
conn.remove_targets.return_value = {"FailedEntryCount": 0}
conn.delete_rule.side_effect = botocore.exceptions.ClientError(
global_config.error_content, "delete_rule"
)
result = boto_cloudwatch_event.__states__["boto_cloudwatch_event.absent"](
name="test absent",
Name=global_config.rule_name,
**global_config.conn_parameters
)
assert result.get("result") is False
assert "delete_rule" in result.get("comment", "")
def test_absent(global_config):
"""
Tests absent on an rule
"""
global_config.conn_parameters["key"] = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(50)
)
patcher = patch("boto3.session.Session")
mock_session = patcher.start()
session_instance = mock_session.return_value
conn = MagicMock()
session_instance.client.return_value = conn
conn.list_rules.return_value = {"Rules": [global_config.rule_ret]}
conn.list_targets_by_rule.return_value = {"Targets": [{"Id": "target1"}]}
conn.remove_targets.return_value = {"FailedEntryCount": 0}
result = boto_cloudwatch_event.__states__["boto_cloudwatch_event.absent"](
name="test absent",
Name=global_config.rule_name,
**global_config.conn_parameters
)
assert result.get("result") is True
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012 Red Hat, Inc.
# Copyright 2013 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
from oslo_config import cfg
from nova import exception
from nova.i18n import _
from nova import utils
CONF = cfg.CONF
class SecurityGroupBase(object):
def __init__(self, skip_policy_check=False):
self.skip_policy_check = skip_policy_check
def parse_cidr(self, cidr):
if cidr:
try:
cidr = urllib.unquote(cidr).decode()
except Exception as e:
self.raise_invalid_cidr(cidr, e)
if not utils.is_valid_cidr(cidr):
self.raise_invalid_cidr(cidr)
return cidr
else:
return '0.0.0.0/0'
@staticmethod
def new_group_ingress_rule(grantee_group_id, protocol, from_port,
to_port):
return SecurityGroupBase._new_ingress_rule(
protocol, from_port, to_port, group_id=grantee_group_id)
@staticmethod
def new_cidr_ingress_rule(grantee_cidr, protocol, from_port, to_port):
return SecurityGroupBase._new_ingress_rule(
protocol, from_port, to_port, cidr=grantee_cidr)
@staticmethod
def _new_ingress_rule(ip_protocol, from_port, to_port,
group_id=None, cidr=None):
values = {}
if group_id:
values['group_id'] = group_id
# Open everything if an explicit port range or type/code are not
# specified, but only if a source group was specified.
ip_proto_upper = ip_protocol.upper() if ip_protocol else ''
if (ip_proto_upper == 'ICMP' and
from_port is None and to_port is None):
from_port = -1
to_port = -1
elif (ip_proto_upper in ['TCP', 'UDP'] and from_port is None
and to_port is None):
from_port = 1
to_port = 65535
elif cidr:
values['cidr'] = cidr
if ip_protocol and from_port is not None and to_port is not None:
ip_protocol = str(ip_protocol)
try:
# Verify integer conversions
from_port = int(from_port)
to_port = int(to_port)
except ValueError:
if ip_protocol.upper() == 'ICMP':
raise exception.InvalidInput(reason=_("Type and"
" Code must be integers for ICMP protocol type"))
else:
raise exception.InvalidInput(reason=_("To and From ports "
"must be integers"))
if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']:
raise exception.InvalidIpProtocol(protocol=ip_protocol)
# Verify that from_port must always be less than
# or equal to to_port
if (ip_protocol.upper() in ['TCP', 'UDP'] and
(from_port > to_port)):
raise exception.InvalidPortRange(from_port=from_port,
to_port=to_port, msg="Former value cannot"
" be greater than the later")
# Verify valid TCP, UDP port ranges
if (ip_protocol.upper() in ['TCP', 'UDP'] and
(from_port < 1 or to_port > 65535)):
raise exception.InvalidPortRange(from_port=from_port,
to_port=to_port, msg="Valid TCP ports should"
" be between 1-65535")
# Verify ICMP type and code
if (ip_protocol.upper() == "ICMP" and
(from_port < -1 or from_port > 255 or
to_port < -1 or to_port > 255)):
raise exception.InvalidPortRange(from_port=from_port,
to_port=to_port, msg="For ICMP, the"
" type:code must be valid")
values['protocol'] = ip_protocol
values['from_port'] = from_port
values['to_port'] = to_port
else:
# If cidr based filtering, protocol and ports are mandatory
if cidr:
return None
return values
def create_security_group_rule(self, context, security_group, new_rule):
if self.rule_exists(security_group, new_rule):
msg = (_('This rule already exists in group %s') %
new_rule['parent_group_id'])
self.raise_group_already_exists(msg)
return self.add_rules(context, new_rule['parent_group_id'],
security_group['name'],
[new_rule])[0]
def rule_exists(self, security_group, new_rule):
"""Indicates whether the specified rule is already
defined in the given security group.
"""
for rule in security_group['rules']:
keys = ('group_id', 'cidr', 'from_port', 'to_port', 'protocol')
for key in keys:
if rule.get(key) != new_rule.get(key):
break
else:
return rule.get('id') or True
return False
def validate_property(self, value, property, allowed):
pass
def ensure_default(self, context):
pass
def trigger_handler(self, event, *args):
pass
def trigger_rules_refresh(self, context, id):
"""Called when a rule is added to or removed from a security_group."""
pass
def trigger_members_refresh(self, context, group_ids):
"""Called when a security group gains a new or loses a member.
Sends an update request to each compute node for each instance for
which this is relevant.
"""
pass
def populate_security_groups(self, instance, security_groups):
"""Called when populating the database for an instances
security groups.
"""
raise NotImplementedError()
def create_security_group(self, context, name, description):
raise NotImplementedError()
def update_security_group(self, context, security_group,
name, description):
raise NotImplementedError()
def get(self, context, name=None, id=None, map_exception=False):
raise NotImplementedError()
def list(self, context, names=None, ids=None, project=None,
search_opts=None):
raise NotImplementedError()
def destroy(self, context, security_group):
raise NotImplementedError()
def add_rules(self, context, id, name, vals):
raise NotImplementedError()
def remove_rules(self, context, security_group, rule_ids):
raise NotImplementedError()
def get_rule(self, context, id):
raise NotImplementedError()
def get_instance_security_groups(self, context, instance_uuid,
detailed=False):
raise NotImplementedError()
def add_to_instance(self, context, instance, security_group_name):
"""Add security group to the instance.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param security_group_name: security group name to add
"""
raise NotImplementedError()
def remove_from_instance(self, context, instance, security_group_name):
"""Remove the security group associated with the instance.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param security_group_name: security group name to remove
"""
raise NotImplementedError()
@staticmethod
def raise_invalid_property(msg):
raise exception.Invalid(msg)
@staticmethod
def raise_group_already_exists(msg):
raise exception.Invalid(msg)
@staticmethod
def raise_invalid_group(msg):
raise exception.Invalid(msg)
@staticmethod
def raise_invalid_cidr(cidr, decoding_exception=None):
raise exception.InvalidCidr(cidr=cidr)
@staticmethod
def raise_over_quota(msg):
raise exception.SecurityGroupLimitExceeded(msg)
@staticmethod
def raise_not_found(msg):
raise exception.SecurityGroupNotFound(msg)
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from lxml import etree as ET
except ImportError:
from xml.etree import ElementTree as ET
import sys
from libcloud.utils.py3 import httplib
from libcloud.common.dimensiondata import DimensionDataAPIException
from libcloud.common.types import InvalidCredsError
from libcloud.backup.base import BackupTargetJob
from libcloud.backup.drivers.dimensiondata import DimensionDataBackupDriver as DimensionData
from libcloud.backup.drivers.dimensiondata import DEFAULT_BACKUP_PLAN
from libcloud.test import MockHttp, unittest
from libcloud.test.file_fixtures import BackupFileFixtures
from libcloud.test.secrets import DIMENSIONDATA_PARAMS
class DimensionData_v2_3_Tests(unittest.TestCase):
def setUp(self):
DimensionData.connectionCls.active_api_version = '2.3'
DimensionData.connectionCls.conn_class = DimensionDataMockHttp
DimensionDataMockHttp.type = None
self.driver = DimensionData(*DIMENSIONDATA_PARAMS)
def test_invalid_region(self):
with self.assertRaises(ValueError):
self.driver = DimensionData(*DIMENSIONDATA_PARAMS, region='blah')
def test_invalid_creds(self):
DimensionDataMockHttp.type = 'UNAUTHORIZED'
with self.assertRaises(InvalidCredsError):
self.driver.list_targets()
def test_list_targets(self):
targets = self.driver.list_targets()
self.assertEqual(len(targets), 2)
self.assertEqual(targets[0].id, '5579f3a7-4c32-4cf5-8a7e-b45c36a35c10')
self.assertEqual(targets[0].address, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(targets[0].extra['servicePlan'], 'Enterprise')
def test_create_target(self):
target = self.driver.create_target(
'name',
'e75ead52-692f-4314-8725-c8a4f4d13a87',
extra={'servicePlan': 'Enterprise'})
self.assertEqual(target.id, 'ee7c4b64-f7af-4a4f-8384-be362273530f')
self.assertEqual(target.address, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(target.extra['servicePlan'], 'Enterprise')
def test_create_target_DEFAULT(self):
DimensionDataMockHttp.type = 'DEFAULT'
target = self.driver.create_target(
'name',
'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(target.id, 'ee7c4b64-f7af-4a4f-8384-be362273530f')
self.assertEqual(target.address, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
def test_create_target_EXISTS(self):
DimensionDataMockHttp.type = 'EXISTS'
with self.assertRaises(DimensionDataAPIException) as context:
self.driver.create_target(
'name',
'e75ead52-692f-4314-8725-c8a4f4d13a87',
extra={'servicePlan': 'Enterprise'})
self.assertEqual(context.exception.code, 'ERROR')
self.assertEqual(context.exception.msg, 'Cloud backup for this server is already enabled or being enabled (state: NORMAL).')
def test_update_target(self):
target = self.driver.list_targets()[0]
extra = {'servicePlan': 'Essentials'}
new_target = self.driver.update_target(target, extra=extra)
self.assertEqual(new_target.extra['servicePlan'], 'Essentials')
def test_update_target_DEFAULT(self):
DimensionDataMockHttp.type = 'DEFAULT'
target = 'e75ead52-692f-4314-8725-c8a4f4d13a87'
self.driver.update_target(target)
def test_update_target_STR(self):
target = 'e75ead52-692f-4314-8725-c8a4f4d13a87'
extra = {'servicePlan': 'Essentials'}
new_target = self.driver.update_target(target, extra=extra)
self.assertEqual(new_target.extra['servicePlan'], 'Essentials')
def test_delete_target(self):
target = self.driver.list_targets()[0]
self.assertTrue(self.driver.delete_target(target))
def test_ex_add_client_to_target(self):
target = self.driver.list_targets()[0]
client = self.driver.ex_list_available_client_types(target)[0]
storage_policy = self.driver.ex_list_available_storage_policies(target)[0]
schedule_policy = self.driver.ex_list_available_schedule_policies(target)[0]
self.assertTrue(
self.driver.ex_add_client_to_target(target, client, storage_policy,
schedule_policy, 'ON_FAILURE', 'nobody@example.com')
)
def test_ex_add_client_to_target_STR(self):
self.assertTrue(
self.driver.ex_add_client_to_target('e75ead52-692f-4314-8725-c8a4f4d13a87', 'FA.Linux', '14 Day Storage Policy',
'12AM - 6AM', 'ON_FAILURE', 'nobody@example.com')
)
def test_ex_get_backup_details_for_target(self):
target = self.driver.list_targets()[0]
response = self.driver.ex_get_backup_details_for_target(target)
self.assertEqual(response.service_plan, 'Enterprise')
client = response.clients[0]
self.assertEqual(client.id, '30b1ff76-c76d-4d7c-b39d-3b72be0384c8')
self.assertEqual(client.type.type, 'FA.Linux')
self.assertEqual(client.running_job.progress, 5)
self.assertTrue(isinstance(client.running_job, BackupTargetJob))
self.assertEqual(len(client.alert.notify_list), 2)
self.assertTrue(isinstance(client.alert.notify_list, list))
def test_ex_get_backup_details_for_target_NOBACKUP(self):
target = self.driver.list_targets()[0].address
DimensionDataMockHttp.type = 'NOBACKUP'
response = self.driver.ex_get_backup_details_for_target(target)
self.assertTrue(response is None)
def test_ex_cancel_target_job(self):
target = self.driver.list_targets()[0]
response = self.driver.ex_get_backup_details_for_target(target)
client = response.clients[0]
self.assertTrue(isinstance(client.running_job, BackupTargetJob))
success = client.running_job.cancel()
self.assertTrue(success)
def test_ex_cancel_target_job_with_extras(self):
success = self.driver.cancel_target_job(
None,
ex_client='30b1ff76_c76d_4d7c_b39d_3b72be0384c8',
ex_target='e75ead52_692f_4314_8725_c8a4f4d13a87'
)
self.assertTrue(success)
def test_ex_cancel_target_job_FAIL(self):
DimensionDataMockHttp.type = 'FAIL'
with self.assertRaises(DimensionDataAPIException) as context:
self.driver.cancel_target_job(
None,
ex_client='30b1ff76_c76d_4d7c_b39d_3b72be0384c8',
ex_target='e75ead52_692f_4314_8725_c8a4f4d13a87'
)
self.assertEqual(context.exception.code, 'ERROR')
"""Test a backup info for a target that does not have a client"""
def test_ex_get_backup_details_for_target_NO_CLIENT(self):
DimensionDataMockHttp.type = 'NOCLIENT'
response = self.driver.ex_get_backup_details_for_target('e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(response.service_plan, 'Essentials')
self.assertEqual(len(response.clients), 0)
"""Test a backup details that has a client, but no alerting or running jobs"""
def test_ex_get_backup_details_for_target_NO_JOB_OR_ALERT(self):
DimensionDataMockHttp.type = 'NOJOB'
response = self.driver.ex_get_backup_details_for_target('e75ead52-692f-4314_8725-c8a4f4d13a87')
self.assertEqual(response.service_plan, 'Enterprise')
self.assertTrue(isinstance(response.clients, list))
self.assertEqual(len(response.clients), 1)
client = response.clients[0]
self.assertEqual(client.id, '30b1ff76-c76d-4d7c-b39d-3b72be0384c8')
self.assertEqual(client.type.type, 'FA.Linux')
self.assertIsNone(client.running_job)
self.assertIsNone(client.alert)
"""Test getting backup info for a server that doesn't exist"""
def test_ex_get_backup_details_for_target_DISABLED(self):
DimensionDataMockHttp.type = 'DISABLED'
with self.assertRaises(DimensionDataAPIException) as context:
self.driver.ex_get_backup_details_for_target('e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(context.exception.code, 'ERROR')
self.assertEqual(context.exception.msg, 'Server e75ead52-692f-4314-8725-c8a4f4d13a87 has not been provisioned for backup')
def test_ex_list_available_client_types(self):
target = self.driver.list_targets()[0]
answer = self.driver.ex_list_available_client_types(target)
self.assertEqual(len(answer), 1)
self.assertEqual(answer[0].type, 'FA.Linux')
self.assertEqual(answer[0].is_file_system, True)
self.assertEqual(answer[0].description, 'Linux File system')
def test_ex_list_available_storage_policies(self):
target = self.driver.list_targets()[0]
answer = self.driver.ex_list_available_storage_policies(target)
self.assertEqual(len(answer), 1)
self.assertEqual(answer[0].name,
'30 Day Storage Policy + Secondary Copy')
self.assertEqual(answer[0].retention_period, 30)
self.assertEqual(answer[0].secondary_location, 'Primary')
def test_ex_list_available_schedule_policies(self):
target = self.driver.list_targets()[0]
answer = self.driver.ex_list_available_schedule_policies(target)
self.assertEqual(len(answer), 1)
self.assertEqual(answer[0].name, '12AM - 6AM')
self.assertEqual(answer[0].description, 'Daily backup will start between 12AM - 6AM')
def test_ex_remove_client_from_target(self):
target = self.driver.list_targets()[0]
client = self.driver.ex_get_backup_details_for_target('e75ead52-692f-4314-8725-c8a4f4d13a87').clients[0]
self.assertTrue(self.driver.ex_remove_client_from_target(target, client))
def test_ex_remove_client_from_target_STR(self):
self.assertTrue(
self.driver.ex_remove_client_from_target(
'e75ead52-692f-4314-8725-c8a4f4d13a87',
'30b1ff76-c76d-4d7c-b39d-3b72be0384c8'
)
)
def test_ex_remove_client_from_target_FAIL(self):
DimensionDataMockHttp.type = 'FAIL'
with self.assertRaises(DimensionDataAPIException) as context:
self.driver.ex_remove_client_from_target(
'e75ead52-692f-4314-8725-c8a4f4d13a87',
'30b1ff76-c76d-4d7c-b39d-3b72be0384c8'
)
self.assertEqual(context.exception.code, 'ERROR')
self.assertTrue('Backup Client is currently performing another operation' in context.exception.msg)
def test_priv_target_to_target_address(self):
target = self.driver.list_targets()[0]
self.assertEqual(
self.driver._target_to_target_address(target),
'e75ead52-692f-4314-8725-c8a4f4d13a87'
)
def test_priv_target_to_target_address_STR(self):
self.assertEqual(
self.driver._target_to_target_address('e75ead52-692f-4314-8725-c8a4f4d13a87'),
'e75ead52-692f-4314-8725-c8a4f4d13a87'
)
def test_priv_target_to_target_address_TYPEERROR(self):
with self.assertRaises(TypeError):
self.driver._target_to_target_address([1, 2, 3])
def test_priv_client_to_client_id(self):
client = self.driver.ex_get_backup_details_for_target('e75ead52-692f-4314-8725-c8a4f4d13a87').clients[0]
self.assertEqual(
self.driver._client_to_client_id(client),
'30b1ff76-c76d-4d7c-b39d-3b72be0384c8'
)
def test_priv_client_to_client_id_STR(self):
self.assertEqual(
self.driver._client_to_client_id('30b1ff76-c76d-4d7c-b39d-3b72be0384c8'),
'30b1ff76-c76d-4d7c-b39d-3b72be0384c8'
)
def test_priv_client_to_client_id_TYPEERROR(self):
with self.assertRaises(TypeError):
self.driver._client_to_client_id([1, 2, 3])
class InvalidRequestError(Exception):
def __init__(self, tag):
super(InvalidRequestError, self).__init__("Invalid Request - %s" % tag)
class DimensionDataMockHttp(MockHttp):
fixtures = BackupFileFixtures('dimensiondata')
def _oec_0_9_myaccount_UNAUTHORIZED(self, method, url, body, headers):
return (httplib.UNAUTHORIZED, "", {}, httplib.responses[httplib.UNAUTHORIZED])
def _oec_0_9_myaccount(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_EXISTS(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_DEFAULT(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_INPROGRESS(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_FAIL(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_NOCLIENT(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_DISABLED(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_NOJOB(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87(self, method, url, body, headers):
body = self.fixtures.load(
'server_server_e75ead52_692f_4314_8725_c8a4f4d13a87.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DEFAULT(self, method, url, body, headers):
body = self.fixtures.load(
'server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DEFAULT.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_NOCLIENT(self, method, url, body, headers):
body = self.fixtures.load(
'server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DEFAULT.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_NOJOB(self, method, url, body, headers):
body = self.fixtures.load(
'server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DEFAULT.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DISABLED(self, method, url, body, headers):
body = self.fixtures.load(
'server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_DEFAULT.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server(self, method, url, body, headers):
body = self.fixtures.load(
'server_server.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client_type(self, method, url, body, headers):
body = self.fixtures.load(
'_backup_client_type.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client_storagePolicy(
self, method, url, body, headers):
body = self.fixtures.load(
'_backup_client_storagePolicy.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client_schedulePolicy(
self, method, url, body, headers):
body = self.fixtures.load(
'_backup_client_schedulePolicy.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client(
self, method, url, body, headers):
if method == 'POST':
body = self.fixtures.load(
'_backup_client_SUCCESS_PUT.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
else:
raise ValueError("Unknown Method {0}".format(method))
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_NOCLIENT(
self, method, url, body, headers):
# only gets here are implemented
# If we get any other method something has gone wrong
assert(method == 'GET')
body = self.fixtures.load(
'_backup_INFO_NOCLIENT.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_DISABLED(
self, method, url, body, headers):
# only gets here are implemented
# If we get any other method something has gone wrong
assert(method == 'GET')
body = self.fixtures.load(
'_backup_INFO_DISABLED.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_NOJOB(
self, method, url, body, headers):
# only gets here are implemented
# If we get any other method something has gone wrong
assert(method == 'GET')
body = self.fixtures.load(
'_backup_INFO_NOJOB.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_DEFAULT(
self, method, url, body, headers):
if method != 'POST':
raise InvalidRequestError('Only POST is accepted for this test')
request = ET.fromstring(body)
service_plan = request.get('servicePlan')
if service_plan != DEFAULT_BACKUP_PLAN:
raise InvalidRequestError('The default plan %s should have been passed in. Not %s' % (DEFAULT_BACKUP_PLAN, service_plan))
body = self.fixtures.load(
'_backup_ENABLE.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup(
self, method, url, body, headers):
if method == 'POST':
body = self.fixtures.load(
'_backup_ENABLE.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
elif method == 'GET':
if url.endswith('disable'):
body = self.fixtures.load(
'_backup_DISABLE.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
body = self.fixtures.load(
'_backup_INFO.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
else:
raise ValueError("Unknown Method {0}".format(method))
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87_NOBACKUP(
self, method, url, body, headers):
assert(method == 'GET')
body = self.fixtures.load('server_server_NOBACKUP.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_EXISTS(
self, method, url, body, headers):
# only POSTs are implemented
# If we get any other method something has gone wrong
assert(method == 'POST')
body = self.fixtures.load(
'_backup_EXISTS.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_modify(
self, method, url, body, headers):
request = ET.fromstring(body)
service_plan = request.get('servicePlan')
if service_plan != 'Essentials':
raise InvalidRequestError("Expected Essentials backup plan in request")
body = self.fixtures.load('_backup_modify.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_modify_DEFAULT(
self, method, url, body, headers):
request = ET.fromstring(body)
service_plan = request.get('servicePlan')
if service_plan != DEFAULT_BACKUP_PLAN:
raise InvalidRequestError("Expected % backup plan in test" % DEFAULT_BACKUP_PLAN)
body = self.fixtures.load('_backup_modify.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client_30b1ff76_c76d_4d7c_b39d_3b72be0384c8(
self, method, url, body, headers):
if url.endswith('disable'):
body = self.fixtures.load(
('_remove_backup_client.xml')
)
elif url.endswith('cancelJob'):
body = self.fixtures.load(
(''
'_backup_client_30b1ff76_c76d_4d7c_b39d_3b72be0384c8_cancelJob.xml')
)
else:
raise ValueError("Unknown URL: %s" % url)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_backup_client_30b1ff76_c76d_4d7c_b39d_3b72be0384c8_FAIL(
self, method, url, body, headers):
if url.endswith('disable'):
body = self.fixtures.load(
('_remove_backup_client_FAIL.xml')
)
elif url.endswith('cancelJob'):
body = self.fixtures.load(
(''
'_backup_client_30b1ff76_c76d_4d7c_b39d_3b72be0384c8_cancelJob_FAIL.xml')
)
else:
raise ValueError("Unknown URL: %s" % url)
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
| |
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import time
import zlib
import mock
from oslo_log import log as logging
from oslo_serialization import base64
from oslo_utils import timeutils
from nova.compute import api as compute_api
from nova.compute import rpcapi
from nova import context
from nova import exception
from nova import objects
from nova.objects import block_device as block_device_obj
from nova import test
from nova.tests.functional.api import client
from nova.tests.functional import integrated_helpers
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_network
from nova import volume
LOG = logging.getLogger(__name__)
class ServersTestBase(integrated_helpers._IntegratedTestBase):
api_major_version = 'v2'
_force_delete_parameter = 'forceDelete'
_image_ref_parameter = 'imageRef'
_flavor_ref_parameter = 'flavorRef'
_access_ipv4_parameter = 'accessIPv4'
_access_ipv6_parameter = 'accessIPv6'
_return_resv_id_parameter = 'return_reservation_id'
_min_count_parameter = 'min_count'
def setUp(self):
super(ServersTestBase, self).setUp()
# The network service is called as part of server creates but no
# networks have been populated in the db, so stub the methods.
# The networks aren't relevant to what is being tested.
fake_network.set_stub_network_methods(self)
self.conductor = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
def _wait_for_state_change(self, server, from_status):
for i in range(0, 50):
server = self.api.get_server(server['id'])
if server['status'] != from_status:
break
time.sleep(.1)
return server
def _wait_for_deletion(self, server_id):
# Wait (briefly) for deletion
for _retries in range(50):
try:
found_server = self.api.get_server(server_id)
except client.OpenStackApiNotFoundException:
found_server = None
LOG.debug("Got 404, proceeding")
break
LOG.debug("Found_server=%s" % found_server)
# TODO(justinsb): Mock doesn't yet do accurate state changes
# if found_server['status'] != 'deleting':
# break
time.sleep(.1)
# Should be gone
self.assertFalse(found_server)
def _delete_server(self, server_id):
# Delete the server
self.api.delete_server(server_id)
self._wait_for_deletion(server_id)
def _get_access_ips_params(self):
return {self._access_ipv4_parameter: "172.19.0.2",
self._access_ipv6_parameter: "fe80::2"}
def _verify_access_ips(self, server):
self.assertEqual('172.19.0.2',
server[self._access_ipv4_parameter])
self.assertEqual('fe80::2', server[self._access_ipv6_parameter])
class ServersTest(ServersTestBase):
def test_get_servers(self):
# Simple check that listing servers works.
servers = self.api.get_servers()
for server in servers:
LOG.debug("server: %s" % server)
def test_create_server_with_error(self):
# Create a server which will enter error state.
def throw_error(*args, **kwargs):
raise exception.BuildAbortException(reason='',
instance_uuid='fake')
self.stub_out('nova.virt.fake.FakeDriver.spawn', throw_error)
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({"server": server})
created_server_id = created_server['id']
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
found_server = self._wait_for_state_change(found_server, 'BUILD')
self.assertEqual('ERROR', found_server['status'])
self._delete_server(created_server_id)
def test_create_and_delete_server(self):
# Creates and deletes a server.
# Create server
# Build the server data gradually, checking errors along the way
server = {}
good_server = self._build_minimal_create_server_request()
post = {'server': server}
# Without an imageRef, this throws 500.
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
# With an invalid imageRef, this throws 500.
server[self._image_ref_parameter] = self.get_invalid_image()
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
# Add a valid imageRef
server[self._image_ref_parameter] = good_server.get(
self._image_ref_parameter)
# Without flavorRef, this throws 500
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
server[self._flavor_ref_parameter] = good_server.get(
self._flavor_ref_parameter)
# Without a name, this throws 500
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
# Set a valid server name
server['name'] = good_server['name']
created_server = self.api.post_server(post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Check it's there
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
# It should also be in the all-servers list
servers = self.api.get_servers()
server_ids = [s['id'] for s in servers]
self.assertIn(created_server_id, server_ids)
found_server = self._wait_for_state_change(found_server, 'BUILD')
# It should be available...
# TODO(justinsb): Mock doesn't yet do this...
self.assertEqual('ACTIVE', found_server['status'])
servers = self.api.get_servers(detail=True)
for server in servers:
self.assertIn("image", server)
self.assertIn("flavor", server)
self._delete_server(created_server_id)
def _force_reclaim(self):
# Make sure that compute manager thinks the instance is
# old enough to be expired
the_past = timeutils.utcnow() + datetime.timedelta(hours=1)
timeutils.set_time_override(override_time=the_past)
self.addCleanup(timeutils.clear_time_override)
ctxt = context.get_admin_context()
self.compute._reclaim_queued_deletes(ctxt)
def test_deferred_delete(self):
# Creates, deletes and waits for server to be reclaimed.
self.flags(reclaim_instance_interval=1)
# Create server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Wait for it to finish being created
found_server = self._wait_for_state_change(created_server, 'BUILD')
# It should be available...
self.assertEqual('ACTIVE', found_server['status'])
# Cannot restore unless instance is deleted
self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, created_server_id,
{'restore': {}})
# Delete the server
self.api.delete_server(created_server_id)
# Wait for queued deletion
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
self.assertEqual('SOFT_DELETED', found_server['status'])
self._force_reclaim()
# Wait for real deletion
self._wait_for_deletion(created_server_id)
def test_deferred_delete_restore(self):
# Creates, deletes and restores a server.
self.flags(reclaim_instance_interval=3600)
# Create server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Wait for it to finish being created
found_server = self._wait_for_state_change(created_server, 'BUILD')
# It should be available...
self.assertEqual('ACTIVE', found_server['status'])
# Delete the server
self.api.delete_server(created_server_id)
# Wait for queued deletion
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
self.assertEqual('SOFT_DELETED', found_server['status'])
# Restore server
self.api.post_server_action(created_server_id, {'restore': {}})
# Wait for server to become active again
found_server = self._wait_for_state_change(found_server, 'DELETED')
self.assertEqual('ACTIVE', found_server['status'])
def test_deferred_delete_force(self):
# Creates, deletes and force deletes a server.
self.flags(reclaim_instance_interval=3600)
# Create server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Wait for it to finish being created
found_server = self._wait_for_state_change(created_server, 'BUILD')
# It should be available...
self.assertEqual('ACTIVE', found_server['status'])
# Delete the server
self.api.delete_server(created_server_id)
# Wait for queued deletion
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
self.assertEqual('SOFT_DELETED', found_server['status'])
# Force delete server
self.api.post_server_action(created_server_id,
{self._force_delete_parameter: {}})
# Wait for real deletion
self._wait_for_deletion(created_server_id)
def test_create_server_with_metadata(self):
# Creates a server with metadata.
# Build the server data gradually, checking errors along the way
server = self._build_minimal_create_server_request()
metadata = {}
for i in range(30):
metadata['key_%s' % i] = 'value_%s' % i
server['metadata'] = metadata
post = {'server': server}
created_server = self.api.post_server(post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual(metadata, found_server.get('metadata'))
# The server should also be in the all-servers details list
servers = self.api.get_servers(detail=True)
server_map = {server['id']: server for server in servers}
found_server = server_map.get(created_server_id)
self.assertTrue(found_server)
# Details do include metadata
self.assertEqual(metadata, found_server.get('metadata'))
# The server should also be in the all-servers summary list
servers = self.api.get_servers(detail=False)
server_map = {server['id']: server for server in servers}
found_server = server_map.get(created_server_id)
self.assertTrue(found_server)
# Summary should not include metadata
self.assertFalse(found_server.get('metadata'))
# Cleanup
self._delete_server(created_server_id)
def test_server_metadata_actions_negative_invalid_state(self):
# Create server with metadata
server = self._build_minimal_create_server_request()
metadata = {'key_1': 'value_1'}
server['metadata'] = metadata
post = {'server': server}
created_server = self.api.post_server(post)
found_server = self._wait_for_state_change(created_server, 'BUILD')
self.assertEqual('ACTIVE', found_server['status'])
self.assertEqual(metadata, found_server.get('metadata'))
server_id = found_server['id']
# Change status from ACTIVE to SHELVED for negative test
self.flags(shelved_offload_time = -1)
self.api.post_server_action(server_id, {'shelve': {}})
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
self.assertEqual('SHELVED', found_server['status'])
metadata = {'key_2': 'value_2'}
# Update Metadata item in SHELVED (not ACTIVE, etc.)
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_metadata,
server_id, metadata)
self.assertEqual(409, ex.response.status_code)
self.assertEqual('SHELVED', found_server['status'])
# Delete Metadata item in SHELVED (not ACTIVE, etc.)
ex = self.assertRaises(client.OpenStackApiException,
self.api.delete_server_metadata,
server_id, 'key_1')
self.assertEqual(409, ex.response.status_code)
self.assertEqual('SHELVED', found_server['status'])
# Cleanup
self._delete_server(server_id)
def test_create_and_rebuild_server(self):
# Rebuild a server with metadata.
# create a server with initially has no metadata
server = self._build_minimal_create_server_request()
server_post = {'server': server}
metadata = {}
for i in range(30):
metadata['key_%s' % i] = 'value_%s' % i
server_post['server']['metadata'] = metadata
created_server = self.api.post_server(server_post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
created_server = self._wait_for_state_change(created_server, 'BUILD')
# rebuild the server with metadata and other server attributes
post = {}
post['rebuild'] = {
self._image_ref_parameter: "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"name": "blah",
self._access_ipv4_parameter: "172.19.0.2",
self._access_ipv6_parameter: "fe80::2",
"metadata": {'some': 'thing'},
}
post['rebuild'].update(self._get_access_ips_params())
self.api.post_server_action(created_server_id, post)
LOG.debug("rebuilt server: %s" % created_server)
self.assertTrue(created_server['id'])
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual({'some': 'thing'}, found_server.get('metadata'))
self.assertEqual('blah', found_server.get('name'))
self.assertEqual(post['rebuild'][self._image_ref_parameter],
found_server.get('image')['id'])
self._verify_access_ips(found_server)
# rebuild the server with empty metadata and nothing else
post = {}
post['rebuild'] = {
self._image_ref_parameter: "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"metadata": {},
}
self.api.post_server_action(created_server_id, post)
LOG.debug("rebuilt server: %s" % created_server)
self.assertTrue(created_server['id'])
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual({}, found_server.get('metadata'))
self.assertEqual('blah', found_server.get('name'))
self.assertEqual(post['rebuild'][self._image_ref_parameter],
found_server.get('image')['id'])
self._verify_access_ips(found_server)
# Cleanup
self._delete_server(created_server_id)
def test_rename_server(self):
# Test building and renaming a server.
# Create a server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
server_id = created_server['id']
self.assertTrue(server_id)
# Rename the server to 'new-name'
self.api.put_server(server_id, {'server': {'name': 'new-name'}})
# Check the name of the server
created_server = self.api.get_server(server_id)
self.assertEqual(created_server['name'], 'new-name')
# Cleanup
self._delete_server(server_id)
def test_create_multiple_servers(self):
# Creates multiple servers and checks for reservation_id.
# Create 2 servers, setting 'return_reservation_id, which should
# return a reservation_id
server = self._build_minimal_create_server_request()
server[self._min_count_parameter] = 2
server[self._return_resv_id_parameter] = True
post = {'server': server}
response = self.api.post_server(post)
self.assertIn('reservation_id', response)
reservation_id = response['reservation_id']
self.assertNotIn(reservation_id, ['', None])
# Create 1 more server, which should not return a reservation_id
server = self._build_minimal_create_server_request()
post = {'server': server}
created_server = self.api.post_server(post)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# lookup servers created by the first request.
servers = self.api.get_servers(detail=True,
search_opts={'reservation_id': reservation_id})
server_map = {server['id']: server for server in servers}
found_server = server_map.get(created_server_id)
# The server from the 2nd request should not be there.
self.assertIsNone(found_server)
# Should have found 2 servers.
self.assertEqual(len(server_map), 2)
# Cleanup
self._delete_server(created_server_id)
for server_id in server_map:
self._delete_server(server_id)
def test_create_server_with_injected_files(self):
# Creates a server with injected_files.
personality = []
# Inject a text file
data = 'Hello, World!'
personality.append({
'path': '/helloworld.txt',
'contents': base64.encode_as_bytes(data),
})
# Inject a binary file
data = zlib.compress(b'Hello, World!')
personality.append({
'path': '/helloworld.zip',
'contents': base64.encode_as_bytes(data),
})
# Create server
server = self._build_minimal_create_server_request()
server['personality'] = personality
post = {'server': server}
created_server = self.api.post_server(post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Check it's there
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
found_server = self._wait_for_state_change(found_server, 'BUILD')
self.assertEqual('ACTIVE', found_server['status'])
# Cleanup
self._delete_server(created_server_id)
def test_stop_start_servers_negative_invalid_state(self):
# Create server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({"server": server})
created_server_id = created_server['id']
found_server = self._wait_for_state_change(created_server, 'BUILD')
self.assertEqual('ACTIVE', found_server['status'])
# Start server in ACTIVE
# NOTE(mkoshiya): When os-start API runs, the server status
# must be SHUTOFF.
# By returning 409, I want to confirm that the ACTIVE server does not
# cause unexpected behavior.
post = {'os-start': {}}
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action,
created_server_id, post)
self.assertEqual(409, ex.response.status_code)
self.assertEqual('ACTIVE', found_server['status'])
# Stop server
post = {'os-stop': {}}
self.api.post_server_action(created_server_id, post)
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
self.assertEqual('SHUTOFF', found_server['status'])
# Stop server in SHUTOFF
# NOTE(mkoshiya): When os-stop API runs, the server status
# must be ACTIVE or ERROR.
# By returning 409, I want to confirm that the SHUTOFF server does not
# cause unexpected behavior.
post = {'os-stop': {}}
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action,
created_server_id, post)
self.assertEqual(409, ex.response.status_code)
self.assertEqual('SHUTOFF', found_server['status'])
# Cleanup
self._delete_server(created_server_id)
class ServersTestV21(ServersTest):
api_major_version = 'v2.1'
class ServersTestV219(ServersTestBase):
api_major_version = 'v2.1'
def _create_server(self, set_desc = True, desc = None):
server = self._build_minimal_create_server_request()
if set_desc:
server['description'] = desc
post = {'server': server}
response = self.api.api_post('/servers', post).body
return (server, response['server'])
def _update_server(self, server_id, set_desc = True, desc = None):
new_name = integrated_helpers.generate_random_alphanumeric(8)
server = {'server': {'name': new_name}}
if set_desc:
server['server']['description'] = desc
self.api.api_put('/servers/%s' % server_id, server)
def _rebuild_server(self, server_id, set_desc = True, desc = None):
new_name = integrated_helpers.generate_random_alphanumeric(8)
post = {}
post['rebuild'] = {
"name": new_name,
self._image_ref_parameter: "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
self._access_ipv4_parameter: "172.19.0.2",
self._access_ipv6_parameter: "fe80::2",
"metadata": {'some': 'thing'},
}
post['rebuild'].update(self._get_access_ips_params())
if set_desc:
post['rebuild']['description'] = desc
self.api.api_post('/servers/%s/action' % server_id, post)
def _create_server_and_verify(self, set_desc = True, expected_desc = None):
# Creates a server with a description and verifies it is
# in the GET responses.
created_server_id = self._create_server(set_desc,
expected_desc)[1]['id']
self._verify_server_description(created_server_id, expected_desc)
self._delete_server(created_server_id)
def _update_server_and_verify(self, server_id, set_desc = True,
expected_desc = None):
# Updates a server with a description and verifies it is
# in the GET responses.
self._update_server(server_id, set_desc, expected_desc)
self._verify_server_description(server_id, expected_desc)
def _rebuild_server_and_verify(self, server_id, set_desc = True,
expected_desc = None):
# Rebuilds a server with a description and verifies it is
# in the GET responses.
self._rebuild_server(server_id, set_desc, expected_desc)
self._verify_server_description(server_id, expected_desc)
def _verify_server_description(self, server_id, expected_desc = None,
desc_in_resp = True):
# Calls GET on the servers and verifies that the description
# is set as expected in the response, or not set at all.
response = self.api.api_get('/servers/%s' % server_id)
found_server = response.body['server']
self.assertEqual(server_id, found_server['id'])
if desc_in_resp:
# Verify the description is set as expected (can be None)
self.assertEqual(expected_desc, found_server.get('description'))
else:
# Verify the description is not included in the response.
self.assertNotIn('description', found_server)
servers = self.api.api_get('/servers/detail').body['servers']
server_map = {server['id']: server for server in servers}
found_server = server_map.get(server_id)
self.assertTrue(found_server)
if desc_in_resp:
# Verify the description is set as expected (can be None)
self.assertEqual(expected_desc, found_server.get('description'))
else:
# Verify the description is not included in the response.
self.assertNotIn('description', found_server)
def _create_assertRaisesRegex(self, desc):
# Verifies that a 400 error is thrown on create server
with self.assertRaisesRegex(client.OpenStackApiException,
".*Unexpected status code.*") as cm:
self._create_server(True, desc)
self.assertEqual(400, cm.exception.response.status_code)
def _update_assertRaisesRegex(self, server_id, desc):
# Verifies that a 400 error is thrown on update server
with self.assertRaisesRegex(client.OpenStackApiException,
".*Unexpected status code.*") as cm:
self._update_server(server_id, True, desc)
self.assertEqual(400, cm.exception.response.status_code)
def _rebuild_assertRaisesRegex(self, server_id, desc):
# Verifies that a 400 error is thrown on rebuild server
with self.assertRaisesRegex(client.OpenStackApiException,
".*Unexpected status code.*") as cm:
self._rebuild_server(server_id, True, desc)
self.assertEqual(400, cm.exception.response.status_code)
def test_create_server_with_description(self):
self.api.microversion = '2.19'
# Create and get a server with a description
self._create_server_and_verify(True, 'test description')
# Create and get a server with an empty description
self._create_server_and_verify(True, '')
# Create and get a server with description set to None
self._create_server_and_verify()
# Create and get a server without setting the description
self._create_server_and_verify(False)
def test_update_server_with_description(self):
self.api.microversion = '2.19'
# Create a server with an initial description
server_id = self._create_server(True, 'test desc 1')[1]['id']
# Update and get the server with a description
self._update_server_and_verify(server_id, True, 'updated desc')
# Update and get the server name without changing the description
self._update_server_and_verify(server_id, False, 'updated desc')
# Update and get the server with an empty description
self._update_server_and_verify(server_id, True, '')
# Update and get the server by removing the description (set to None)
self._update_server_and_verify(server_id)
# Update and get the server with a 2nd new description
self._update_server_and_verify(server_id, True, 'updated desc2')
# Cleanup
self._delete_server(server_id)
def test_rebuild_server_with_description(self):
self.api.microversion = '2.19'
# Create a server with an initial description
server = self._create_server(True, 'test desc 1')[1]
server_id = server['id']
self._wait_for_state_change(server, 'BUILD')
# Rebuild and get the server with a description
self._rebuild_server_and_verify(server_id, True, 'updated desc')
# Rebuild and get the server name without changing the description
self._rebuild_server_and_verify(server_id, False, 'updated desc')
# Rebuild and get the server with an empty description
self._rebuild_server_and_verify(server_id, True, '')
# Rebuild and get the server by removing the description (set to None)
self._rebuild_server_and_verify(server_id)
# Rebuild and get the server with a 2nd new description
self._rebuild_server_and_verify(server_id, True, 'updated desc2')
# Cleanup
self._delete_server(server_id)
def test_version_compatibility(self):
# Create a server with microversion v2.19 and a description.
self.api.microversion = '2.19'
server_id = self._create_server(True, 'test desc 1')[1]['id']
# Verify that the description is not included on V2.18 GETs
self.api.microversion = '2.18'
self._verify_server_description(server_id, desc_in_resp = False)
# Verify that updating the server with description on V2.18
# results in a 400 error
self._update_assertRaisesRegex(server_id, 'test update 2.18')
# Verify that rebuilding the server with description on V2.18
# results in a 400 error
self._rebuild_assertRaisesRegex(server_id, 'test rebuild 2.18')
# Cleanup
self._delete_server(server_id)
# Create a server on V2.18 and verify that the description
# defaults to the name on a V2.19 GET
server_req, response = self._create_server(False)
server_id = response['id']
self.api.microversion = '2.19'
self._verify_server_description(server_id, server_req['name'])
# Cleanup
self._delete_server(server_id)
# Verify that creating a server with description on V2.18
# results in a 400 error
self.api.microversion = '2.18'
self._create_assertRaisesRegex('test create 2.18')
def test_description_errors(self):
self.api.microversion = '2.19'
# Create servers with invalid descriptions. These throw 400.
# Invalid unicode with non-printable control char
self._create_assertRaisesRegex(u'invalid\0dstring')
# Description is longer than 255 chars
self._create_assertRaisesRegex('x' * 256)
# Update and rebuild servers with invalid descriptions.
# These throw 400.
server_id = self._create_server(True, "desc")[1]['id']
# Invalid unicode with non-printable control char
self._update_assertRaisesRegex(server_id, u'invalid\u0604string')
self._rebuild_assertRaisesRegex(server_id, u'invalid\u0604string')
# Description is longer than 255 chars
self._update_assertRaisesRegex(server_id, 'x' * 256)
self._rebuild_assertRaisesRegex(server_id, 'x' * 256)
class ServerTestV220(ServersTestBase):
api_major_version = 'v2.1'
def setUp(self):
super(ServerTestV220, self).setUp()
self.api.microversion = '2.20'
fake_network.set_stub_network_methods(self)
self.ctxt = context.get_admin_context()
def _create_server(self):
server = self._build_minimal_create_server_request()
post = {'server': server}
response = self.api.api_post('/servers', post).body
return (server, response['server'])
def _shelve_server(self):
server = self._create_server()[1]
server_id = server['id']
self._wait_for_state_change(server, 'BUILD')
self.api.post_server_action(server_id, {'shelve': None})
return self._wait_for_state_change(server, 'ACTIVE')
def _get_fake_bdms(self, ctxt):
return block_device_obj.block_device_make_list(self.ctxt,
[fake_block_device.FakeDbBlockDeviceDict(
{'device_name': '/dev/vda',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': '5d721593-f033-4f6d-ab6f-b5b067e61bc4'})])
def test_attach_detach_vol_to_shelved_server(self):
self.flags(shelved_offload_time=-1)
found_server = self._shelve_server()
self.assertEqual('SHELVED', found_server['status'])
server_id = found_server['id']
# Test attach volume
with test.nested(mock.patch.object(compute_api.API,
'_check_attach_and_reserve_volume'),
mock.patch.object(rpcapi.ComputeAPI,
'attach_volume')) as (mock_reserve,
mock_attach):
volume_attachment = {"volumeAttachment": {"volumeId":
"5d721593-f033-4f6d-ab6f-b5b067e61bc4"}}
self.api.api_post(
'/servers/%s/os-volume_attachments' % (server_id),
volume_attachment)
self.assertTrue(mock_reserve.called)
self.assertTrue(mock_attach.called)
# Test detach volume
self.stub_out('nova.volume.cinder.API.get', fakes.stub_volume_get)
with test.nested(mock.patch.object(compute_api.API,
'_check_and_begin_detach'),
mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid'),
mock.patch.object(rpcapi.ComputeAPI,
'detach_volume')
) as (mock_check, mock_get_bdms, mock_rpc):
mock_get_bdms.return_value = self._get_fake_bdms(self.ctxt)
attachment_id = mock_get_bdms.return_value[0]['volume_id']
self.api.api_delete('/servers/%s/os-volume_attachments/%s' %
(server_id, attachment_id))
self.assertTrue(mock_check.called)
self.assertTrue(mock_rpc.called)
self._delete_server(server_id)
def test_attach_detach_vol_to_shelved_offloaded_server(self):
self.flags(shelved_offload_time=0)
found_server = self._shelve_server()
self.assertEqual('SHELVED_OFFLOADED', found_server['status'])
server_id = found_server['id']
# Test attach volume
with test.nested(mock.patch.object(compute_api.API,
'_check_attach_and_reserve_volume'),
mock.patch.object(volume.cinder.API,
'attach')) as (mock_reserve, mock_vol):
volume_attachment = {"volumeAttachment": {"volumeId":
"5d721593-f033-4f6d-ab6f-b5b067e61bc4"}}
attach_response = self.api.api_post(
'/servers/%s/os-volume_attachments' % (server_id),
volume_attachment).body['volumeAttachment']
self.assertTrue(mock_reserve.called)
self.assertTrue(mock_vol.called)
self.assertIsNone(attach_response['device'])
# Test detach volume
self.stub_out('nova.volume.cinder.API.get', fakes.stub_volume_get)
with test.nested(mock.patch.object(compute_api.API,
'_check_and_begin_detach'),
mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid'),
mock.patch.object(compute_api.API,
'_local_cleanup_bdm_volumes')
) as (mock_check, mock_get_bdms, mock_clean_vols):
mock_get_bdms.return_value = self._get_fake_bdms(self.ctxt)
attachment_id = mock_get_bdms.return_value[0]['volume_id']
self.api.api_delete('/servers/%s/os-volume_attachments/%s' %
(server_id, attachment_id))
self.assertTrue(mock_check.called)
self.assertTrue(mock_clean_vols.called)
self._delete_server(server_id)
| |
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
from generic_benchmark_tools import schema_creator
from generic_benchmark_tools import staging_table_generator
from generic_benchmark_tools import table_util
from generic_benchmark_tools import benchmark_runner
from load_benchmark_tools import load_file_generator
from load_benchmark_tools import load_file_parameters
def parse_args(argv):
"""Parses arguments from command line.
Args:
argv: list of arguments.
Returns:
parsed_args: parsed arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--create_results_table',
help='Flag to initiate the process of creating results table to'
'store the results of the benchmark loads. '
'load_file_parameters.py.',
action='store_true')
parser.add_argument(
'--create_benchmark_schemas',
help='Flag to initiate the process of creating schemas for the '
'benchmarked tables based off of parameters in '
'load_file_parameters.py.',
action='store_true')
parser.add_argument(
'--benchmark_table_schemas_directory',
default='json_schemas/benchmark_table_schemas',
help='Directory that stores the JSON files that hold the schemas for '
'the benchmark tables.')
parser.add_argument(
'--create_staging_tables',
help='Flag to initiate process of creating staging tables using '
'file parameters, which will be used to create files for '
'loaing into benchmarked tables.',
action='store_true')
parser.add_argument(
'--create_files',
help='Flag to initiate process of creating files for loading '
'into benchmarked tables.',
action='store_true')
parser.add_argument(
'--restart_file',
help='File to start with when creating files if program failed in '
'the middle of file creation. Can only be used with '
'--create_files flag.')
parser.add_argument(
'--create_benchmark_tables',
help='Flag to initiate process of creating benchmarked tables '
'from files and storing results for comparison.',
action='store_true')
parser.add_argument(
'--duplicate_benchmark_tables',
help='Flag that will create new benchmark tables from files that '
'have already been used to create benchmark tables. Without '
'this flag, tables will only be created from files that have not '
'yet been loaded into benchmark tables. Can only be used with '
'--create_benchmark_tables flag.',
action='store_true')
parser.add_argument(
'--bq_project_id',
help='Project ID that contains bigquery resources for running '
'the benchmark.')
parser.add_argument(
'--benchmark_dataset_id',
help='Dataset ID that benchmarked tables will be loaded to. ')
parser.add_argument(
'--staging_project_id',
help='Name of the project that will hold resources for staging tables.')
parser.add_argument(
'--staging_dataset_id',
help='Dataset ID that staging tables will be loaded to.')
parser.add_argument(
'--resized_staging_dataset_id',
help='Dataset ID that resized staging tables will be loaded to.')
parser.add_argument('--results_table_name',
help='Name of table that will store results of '
'benchmark loads.')
parser.add_argument(
'--results_dataset_id',
help='Name of the dataset that will hold the results table.')
parser.add_argument(
'--results_table_schema_path',
default='json_schemas/results_table_schema.json',
help='Path of JSON file that holds the schema for the table '
'that the benchmark results will be loaded into. ')
parser.add_argument(
'--gcs_project_id',
help='Project ID that contains GCS resources for running '
'the benchmark.')
parser.add_argument(
'--bucket_name',
help='Name of bucket that will contain files for loading into '
'benchmarked tables.')
parser.add_argument('--dataflow_temp_location',
help='Temporary location for Dataflow jobs on GCS.')
parser.add_argument('--dataflow_staging_location',
help='Staging location for Dataflow jobs on GCS.')
parser.add_argument(
'--bq_logs_dataset',
help='Dataset that holds the table storing logs for BQ jobs '
'in --bq_project_id')
parser.add_argument(
'--run_federated_query_benchmark',
help='Flag to initiate the process running the Federated Query '
'Benchmark by creating tables from files, running queries on both '
'the table and the files, and storing performance results. ',
action='store_true')
parser.add_argument(
'--include_federated_query_benchmark',
help='This flag can be included with --run_file_loader_benchmark to '
'run the Federated Query Benchmark along with the File Load '
'Benchmark.',
action='store_true')
parser.add_argument(
'--run_file_loader_benchmark',
help='Flag to initiate process of running the File Loader benchmark by'
' creating tables from files and storing results for comparison.',
action='store_true')
args = parser.parse_args(args=argv)
# Only certain args are required depending on the command. Rather than
# making each arg required, raise an error when a command is missing an
# accompanying arg.
missing_args_error = ('Missing arg(s): {0:s} required with the '
'{1:s} command.')
if args.create_results_table:
required_args = {
'--results_table_name': args.results_table_name,
'--results_dataset_id': args.results_dataset_id
}
missing_arguments = ", ".join(
[arg for arg in required_args if not required_args[arg]])
if missing_arguments:
parser.error(
missing_args_error.format(missing_arguments,
'--create_results_table'))
if args.create_staging_tables:
required_args = {
'--bq_project_id': args.bq_project_id,
'--staging_dataset_id': args.staging_dataset_id,
'--resized_staging_dataset_id': args.resized_staging_dataset_id,
'--dataflow_staging_location': args.dataflow_staging_location,
'--dataflow_temp_location': args.dataflow_temp_location,
}
missing_arguments = ", ".join(
[arg for arg in required_args if not required_args[arg]])
if missing_arguments:
parser.error(
missing_args_error.format(missing_arguments,
'--create_staging_tables'))
if args.create_files:
required_args = {
'--gcs_project_id': args.gcs_project_id,
'--resized_staging_dataset_id': args.resized_staging_dataset_id,
'--bucket_name': args.bucket_name,
'--dataflow_staging_location': args.dataflow_staging_location,
'--dataflow_temp_location': args.dataflow_temp_location,
}
missing_arguments = ", ".join(
[arg for arg in required_args if not required_args[arg]])
if missing_arguments:
parser.error(
missing_args_error.format(missing_arguments, '--create_files'))
if args.restart_file:
required_args = {'--create_files': args.create_files}
missing_arguments = ", ".join(
[arg for arg in required_args if not required_args[arg]])
if missing_arguments:
parser.error(
missing_args_error.format(missing_arguments, '--restart_file'))
if args.run_file_loader_benchmark:
required_args = {
'--bq_project_id': args.bq_project_id,
'--gcs_project_id': args.gcs_project_id,
'--staging_project_id': args.staging_project_id,
'--staging_dataset_id': args.staging_dataset_id,
'--benchmark_dataset_id': args.benchmark_dataset_id,
'--bucket_name': args.bucket_name,
'--results_table_name': args.results_table_name,
'--results_dataset_id': args.results_dataset_id,
'--bq_logs_dataset': args.bq_logs_dataset
}
missing_arguments = ", ".join(
[arg for arg in required_args if not required_args[arg]])
if missing_arguments:
parser.error(
missing_args_error.format(missing_arguments,
'--run_file_loader_benchmark'))
if args.run_federated_query_benchmark:
required_args = {
'--bq_project_id': args.bq_project_id,
'--gcs_project_id': args.gcs_project_id,
'--staging_project_id': args.staging_project_id,
'--staging_dataset_id': args.staging_dataset_id,
'--benchmark_dataset_id': args.benchmark_dataset_id,
'--bucket_name': args.bucket_name,
'--results_table_name': args.results_table_name,
'--results_dataset_id': args.results_dataset_id,
'--bq_logs_dataset': args.bq_logs_dataset
}
missing_arguments = ", ".join(
[arg for arg in required_args if not required_args[arg]])
if missing_arguments:
parser.error(
missing_args_error.format(missing_arguments,
'--run_federated_query_benchmark'))
return args
def main(argv=None):
args = parse_args(argv)
create_results_table = args.create_results_table
create_benchmark_schemas = args.create_benchmark_schemas
benchmark_table_schemas_dir = args.benchmark_table_schemas_directory
create_staging_tables = args.create_staging_tables
create_files = args.create_files
restart_file = args.restart_file
run_file_loader_benchmark = args.run_file_loader_benchmark
run_federated_query_benchmark = args.run_federated_query_benchmark
duplicate_benchmark_tables = args.duplicate_benchmark_tables
bq_project_id = args.bq_project_id
benchmark_dataset_id = args.benchmark_dataset_id
staging_project_id = args.staging_project_id
staging_dataset_id = args.staging_dataset_id
resized_staging_dataset_id = args.resized_staging_dataset_id
results_table_name = args.results_table_name
results_dataset_id = args.results_dataset_id
results_table_schema_path = args.results_table_schema_path
gcs_project_id = args.gcs_project_id
bucket_name = args.bucket_name
dataflow_temp_location = args.dataflow_temp_location
dataflow_staging_location = args.dataflow_temp_location
bq_logs_dataset = args.bq_logs_dataset
include_federated_query_benchmark = args.include_federated_query_benchmark
file_params = load_file_parameters.FILE_PARAMETERS
# Run provided commands
if create_results_table:
logging.info('Creating results table {0:s} from schema in '
'{1:s}.'.format(
results_table_name,
results_table_schema_path,
))
results_table_util = table_util.TableUtil(
table_id=results_table_name,
dataset_id=results_dataset_id,
json_schema_filename=results_table_schema_path,
)
results_table_util.create_table()
logging.info('Done creating results table.')
if create_benchmark_schemas:
benchmark_schema_creator = schema_creator.SchemaCreator(
schemas_dir=benchmark_table_schemas_dir, file_params=file_params)
benchmark_schema_creator.create_schemas()
if create_staging_tables:
benchmark_staging_table_generator = (
staging_table_generator.StagingTableGenerator(
project=bq_project_id,
staging_dataset_id=staging_dataset_id,
resized_dataset_id=resized_staging_dataset_id,
json_schema_path=benchmark_table_schemas_dir,
file_params=file_params,
num_rows=500))
benchmark_staging_table_generator.create_staging_tables(
dataflow_staging_location=dataflow_staging_location,
dataflow_temp_location=dataflow_staging_location,
)
benchmark_staging_table_generator.create_resized_tables()
if create_files:
benchmark_load_file_generator = load_file_generator.FileGenerator(
project_id=gcs_project_id,
primitive_staging_dataset_id=resized_staging_dataset_id,
bucket_name=bucket_name,
file_params=file_params,
dataflow_staging_location=dataflow_staging_location,
dataflow_temp_location=dataflow_temp_location,
)
if restart_file:
benchmark_load_file_generator.restart_incomplete_combination(
restart_file)
benchmark_load_file_generator.create_files()
if run_file_loader_benchmark:
load_benchmark_runner = benchmark_runner.BenchmarkRunner(
bq_project=bq_project_id,
gcs_project=gcs_project_id,
staging_project=staging_project_id,
staging_dataset_id=staging_dataset_id,
dataset_id=benchmark_dataset_id,
bucket_name=bucket_name,
results_table_name=results_table_name,
results_table_dataset_id=results_dataset_id,
duplicate_benchmark_tables=duplicate_benchmark_tables,
file_params=file_params,
bq_logs_dataset=bq_logs_dataset,
include_federated_query_benchmark=include_federated_query_benchmark)
load_benchmark_runner.execute_file_loader_benchmark()
if run_federated_query_benchmark:
federated_query_benchmark_runner = benchmark_runner.BenchmarkRunner(
bq_project=bq_project_id,
gcs_project=gcs_project_id,
staging_project=staging_project_id,
staging_dataset_id=staging_dataset_id,
dataset_id=benchmark_dataset_id,
bucket_name=bucket_name,
results_table_name=results_table_name,
results_table_dataset_id=results_dataset_id,
duplicate_benchmark_tables=duplicate_benchmark_tables,
file_params=file_params,
bq_logs_dataset=bq_logs_dataset,
run_federated_query_benchmark=run_federated_query_benchmark)
federated_query_benchmark_runner.execute_federated_query_benchmark()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
main()
| |
#
# Candelabra
#
# Copyright Alvaro Saurin 2013 - All right Reserved
#
"""
Plugins infrastructure for Candelabra..
You can add a plugin by defining an entry-point in your software distribution. For example, for a provider
for VMware, you should define an entry point like this in your `setup.py` file:
>>> entry_points={
>>> 'candelabra.provider': [
>>> 'vmware_provider = candelabra_vmware.plugin:register_me',
>>> ]
>>> }
Then, in your candelabra_vmware/plugin.py, there must be a register_me function like this:
>>> from candelabra.plugins import ProviderPlugin
>>>
>>> class VMWareProvider(ProviderPlugin):
>>> MACHINE = VMWareMachine
>>>
>>> provider_instance = VMWareProvider()
>>>
>>> def register_me(registry_instance):
>>> registry_instance.register('vmware', provider_instance)
"""
from logging import getLogger
import os
import sys
import pkg_resources
from candelabra.config import config
from candelabra.constants import CFG_DEFAULT_PROVIDER
from candelabra.errors import TopologyException, ComponentNotFoundException
logger = getLogger(__name__)
########################################################################################################################
class PluginsRegistry(object):
""" A registry for plugins
"""
def __init__(self):
self.plugins = {}
def register(self, name, plugin):
if self.validate(plugin):
self.plugins[name] = plugin
def validate(self, plugin):
return True
@property
def names(self):
return self.plugins.keys()
PLUGINS_REGISTRIES = {
'candelabra.provider': PluginsRegistry(),
'candelabra.provisioner': PluginsRegistry(),
'candelabra.guest': PluginsRegistry(),
'candelabra.command': PluginsRegistry(),
'candelabra.communicator': PluginsRegistry(),
}
def register_all():
""" Register all plugins we can find in the system.
For each plugin, we will invoke the entry point with a :class:`PluginsRegistry` instance as the
only parameter. Then, the entry point must call the instance :meth:`register: method in order
to register it...
"""
logger.debug('registering all plugins')
for family_name, registry_instance in PLUGINS_REGISTRIES.iteritems():
logger.debug('... looking for plugins for %s', family_name)
for entrypoint in pkg_resources.iter_entry_points(group=family_name):
# Grab the function that is the actual plugin.
plugin_entrypoint = entrypoint.load()
# Call the plugin with the data
plugin_entrypoint(registry_instance)
########################################################################################################################
class CommandPlugin(object):
""" A command from command line
"""
NAME = 'unknown'
DESCRIPTION = 'unknown'
def run(self, args, command):
""" Run the command
"""
raise NotImplementedError('must be implemented')
def run_with_topology(self, args, topology_file, command=None, save_state=True):
""" Run a command, managing the topology
"""
if command:
logger.info('running command "%s"', command)
if topology_file is None:
from candelabra.topology.root import guess_topology_file
topology_file = guess_topology_file()
if topology_file is None:
logger.critical('no topology file provided')
sys.exit(1)
if not os.path.exists(topology_file):
logger.critical('topology file %s does not exist', topology_file)
sys.exit(1)
from candelabra.errors import TopologyException, ProviderNotFoundException, CandelabraException
from candelabra.scheduler.base import TasksScheduler
# load the topology file and create a tree
try:
from candelabra.topology.root import TopologyRoot
topology = TopologyRoot()
topology.load(topology_file)
except TopologyException, e:
logger.critical(str(e))
sys.exit(1)
except ProviderNotFoundException, e:
logger.critical(str(e))
sys.exit(1)
except KeyboardInterrupt:
logger.critical('interrupted with Ctrl-C... bye!')
sys.exit(0)
scheduler = None
try:
if command:
scheduler = TasksScheduler()
tasks = topology.get_tasks(command)
assert all(isinstance(t, tuple) for t in tasks)
scheduler.append(tasks)
scheduler.run()
except CandelabraException:
raise
except KeyboardInterrupt:
raise CandelabraException('interrupted with Ctrl-C... bye!')
except Exception, e:
logger.critical('uncaught exception')
raise
finally:
if save_state:
if scheduler and scheduler.num_completed > 0:
topology.state.save()
return topology
class ProviderPlugin(object):
""" A provider
"""
NAME = 'unknown'
DESCRIPTION = 'unknown'
MACHINE = None # the machine class that will be instantiated for each definition in the topology
APPLIANCE = None # the appliance class that will be instantiated for each definition in the topology
INTERFACE = None
SHARED = None
COMMUNICATORS = None
class ProvisionerPlugin(object):
""" A provisioner
"""
NAME = 'unknown'
DESCRIPTION = 'unknown'
PROVISIONER = None # the provisioner class that will be instantiated for each machine
def run(self, command):
""" Run a command
"""
raise NotImplementedError('must be implemented')
class GuestPlugin(object):
""" A guest definition
"""
NAME = 'unknown'
DESCRIPTION = 'unknown'
class CommunicatorPlugin(object):
""" A communicator
"""
NAME = 'unknown'
DESCRIPTION = 'unknown'
ONLY_PROVIDERS = []
COMMUNICATOR = None # the communicator class that will be instantiated for each machine
########################################################################################################################
def get_provider(name):
""" Get a ProviderPlugin for a given name
"""
return PLUGINS_REGISTRIES['candelabra.provider'].plugins[name]
def _get_provider_class_from_dict(**kwargs):
""" Get a a provider class from a dictionary
"""
for name in ['class', 'cfg_class', '_class']:
if name in kwargs:
return kwargs[name]
for alternative in ['_container', '_parent']:
if alternative in kwargs:
alternative_attr = kwargs[alternative]
if alternative_attr:
for name in ['class', 'cfg_class', '_class']:
if hasattr(alternative_attr, name):
return getattr(alternative_attr, name)
return config.get_key(CFG_DEFAULT_PROVIDER)
def _get_instance_from_plugin(_family, _attr, _name, **kwargs):
_class_name = _get_provider_class_from_dict(**kwargs).lower()
if not _class_name:
raise TopologyException('internal: no %s class available' % (_name))
try:
_class = getattr(PLUGINS_REGISTRIES[_family].plugins[_class_name], _attr)
except KeyError, e:
m = 'cannot build a %s of class "%s"' % (_name, _class_name)
logger.warning(m)
raise ComponentNotFoundException(m)
return _class(**kwargs)
def build_machine_instance(**kwargs):
""" The factory for machine that returns a subclass fo MachineNode with the right node
"""
return _get_instance_from_plugin('candelabra.provider', 'MACHINE', 'communicator', **kwargs)
def build_provisioner_instance(**kwargs):
""" The factory for provisioners that returns a subclass fo Provisioner with the right node
"""
return _get_instance_from_plugin('candelabra.provisioner', 'PROVISIONER', 'provisioner', **kwargs)
def build_shared_instance(**kwargs):
""" The factory for shared folders that returns a subclass fo SharedNode with the right node
"""
return _get_instance_from_plugin('candelabra.provider', 'SHARED', 'shared folder', **kwargs)
def build_network_instance(**kwargs):
""" The factory for networks that returns a subclass fo NetworkNode with the right node
"""
return _get_instance_from_plugin('candelabra.provider', 'NETWORK', 'network', **kwargs)
def build_interface_instance(**kwargs):
""" The factory for interfaces that returns a subclass fo InterfaceNode with the right node
"""
return _get_instance_from_plugin('candelabra.provider', 'INTERFACE', 'interface', **kwargs)
def build_guest_instance(**kwargs):
""" The factory for guests that returns a subclass fo Guest with the right node
"""
return _get_instance_from_plugin('candelabra.guest', 'GUEST', 'guest', **kwargs)
def build_communicator_instance(**kwargs):
""" The factory for communicator that returns a subclass fo Communicator with the right node
"""
return _get_instance_from_plugin('candelabra.communicator', 'COMMUNICATOR', 'communicator', **kwargs)
| |
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
from tests_config import *
#If imported values are not defined then set to zero (or disabled)
if 'ENABLE_WALLET' not in vars():
ENABLE_WALLET=0
if 'ENABLE_BITCOIND' not in vars():
ENABLE_BITCOIND=0
if 'ENABLE_UTILS' not in vars():
ENABLE_UTILS=0
if 'ENABLE_ZMQ' not in vars():
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
#Create a set to store arguments and create the passOn string
opts = set()
passOn = ""
p = re.compile("^--")
bold = ("","")
if (os.name == 'posix'):
bold = ('\033[0m', '\033[1m')
for arg in sys.argv[1:]:
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif (p.match(arg) or arg == "-h"):
passOn += " " + arg
else:
opts.add(arg)
#Set env vars
buildDir = BUILDDIR
if "DASHD" not in os.environ:
os.environ["DASHD"] = buildDir + '/src/dashd' + EXEEXT
if "DASHCLI" not in os.environ:
os.environ["DASHCLI"] = buildDir + '/src/dash-cli' + EXEEXT
if EXEEXT == ".exe" and "-win" not in opts:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print "Win tests currently disabled by default. Use -win option to enable"
sys.exit(0)
if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1):
print "No rpc tests to run. Wallet, utils, and bitcoind must all be enabled"
sys.exit(0)
# python-zmq may not be installed. Handle this gracefully and with some helpful info
if ENABLE_ZMQ:
try:
import zmq
except ImportError as e:
print("ERROR: \"import zmq\" failed. Set ENABLE_ZMQ=0 or " \
"to run zmq tests, see dependency info in /qa/README.md.")
raise e
#Tests
testScripts = [
'bip68-112-113-p2p.py',
'wallet.py',
'wallet-hd.py',
'listtransactions.py',
'receivedby.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rawtransactions.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'mempool_limit.py',
'httpbasics.py',
'multi_rpc.py',
'zapwallettxes.py',
'proxy_test.py',
'merkle_blocks.py',
'fundrawtransaction.py',
'fundrawtransaction-hd.py',
'signrawtransactions.py',
'walletbackup.py',
'nodehandling.py',
'reindex.py',
'addressindex.py',
'timestampindex.py',
'spentindex.py',
'decodescript.py',
'p2p-fullblocktest.py', # NOTE: needs dash_hash to pass
'blockchain.py',
'disablewallet.py',
'sendheaders.py', # NOTE: needs dash_hash to pass
'keypool.py',
'keypool-hd.py',
'prioritise_transaction.py',
'invalidblockrequest.py', # NOTE: needs dash_hash to pass
'invalidtxrequest.py', # NOTE: needs dash_hash to pass
'abandonconflict.py',
'p2p-versionbits-warning.py',
]
if ENABLE_ZMQ:
testScripts.append('zmq_test.py')
testScriptsExt = [
'bip9-softforks.py',
'bip65-cltv.py',
'bip65-cltv-p2p.py', # NOTE: needs dash_hash to pass
'bip68-sequence.py',
'bipdersig-p2p.py', # NOTE: needs dash_hash to pass
'bipdersig.py',
'getblocktemplate_longpoll.py', # FIXME: "socket.error: [Errno 54] Connection reset by peer" on my Mac, same as https://github.com/bitcoin/bitcoin/issues/6651
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
# 'pruning.py', # Prune mode is incompatible with -txindex.
'forknotify.py',
'invalidateblock.py',
# 'rpcbind_test.py', #temporary, bug in libevent, see #6655
'smartfees.py',
'maxblocksinflight.py',
'p2p-acceptblock.py', # NOTE: needs dash_hash to pass
'mempool_packages.py',
'maxuploadtarget.py',
# 'replace-by-fee.py', # RBF is disabled in Dash Core
]
def runtests():
coverage = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
rpcTestDir = buildDir + '/qa/rpc-tests/'
run_extended = '-extended' in opts
cov_flag = coverage.flag if coverage else ''
flags = " --srcdir %s/src %s %s" % (buildDir, cov_flag, passOn)
#Run Tests
for i in range(len(testScripts)):
if (len(opts) == 0
or (len(opts) == 1 and "-win" in opts )
or run_extended
or testScripts[i] in opts
or re.sub(".py$", "", testScripts[i]) in opts ):
print("Running testscript %s%s%s ..." % (bold[1], testScripts[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScripts[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
# exit if help is called so we print just one set of
# instructions
p = re.compile(" -h| --help")
if p.match(passOn):
sys.exit(0)
# Run Extended Tests
for i in range(len(testScriptsExt)):
if (run_extended or testScriptsExt[i] in opts
or re.sub(".py$", "", testScriptsExt[i]) in opts):
print(
"Running 2nd level testscript "
+ "%s%s%s ..." % (bold[1], testScriptsExt[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScriptsExt[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir %s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
| |
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urlparse,
compat_HTTPError,
)
from ..utils import (
ExtractorError,
int_or_none,
parse_iso8601,
)
class VevoBaseIE(InfoExtractor):
def _extract_json(self, webpage, video_id):
return self._parse_json(
self._search_regex(
r'window\.__INITIAL_STORE__\s*=\s*({.+?});\s*</script>',
webpage, 'initial store'),
video_id)
class VevoIE(VevoBaseIE):
'''
Accepts urls from vevo.com or in the format 'vevo:{id}'
(currently used by MTVIE and MySpaceIE)
'''
_VALID_URL = r'''(?x)
(?:https?://(?:www\.)?vevo\.com/watch/(?!playlist|genre)(?:[^/]+/(?:[^/]+/)?)?|
https?://cache\.vevo\.com/m/html/embed\.html\?video=|
https?://videoplayer\.vevo\.com/embed/embedded\?videoId=|
https?://embed\.vevo\.com/.*?[?&]isrc=|
vevo:)
(?P<id>[^&?#]+)'''
_TESTS = [{
'url': 'http://www.vevo.com/watch/hurts/somebody-to-die-for/GB1101300280',
'md5': '95ee28ee45e70130e3ab02b0f579ae23',
'info_dict': {
'id': 'GB1101300280',
'ext': 'mp4',
'title': 'Hurts - Somebody to Die For',
'timestamp': 1372057200,
'upload_date': '20130624',
'uploader': 'Hurts',
'track': 'Somebody to Die For',
'artist': 'Hurts',
'genre': 'Pop',
},
'expected_warnings': ['Unable to download SMIL file', 'Unable to download info'],
}, {
'note': 'v3 SMIL format',
'url': 'http://www.vevo.com/watch/cassadee-pope/i-wish-i-could-break-your-heart/USUV71302923',
'md5': 'f6ab09b034f8c22969020b042e5ac7fc',
'info_dict': {
'id': 'USUV71302923',
'ext': 'mp4',
'title': 'Cassadee Pope - I Wish I Could Break Your Heart',
'timestamp': 1392796919,
'upload_date': '20140219',
'uploader': 'Cassadee Pope',
'track': 'I Wish I Could Break Your Heart',
'artist': 'Cassadee Pope',
'genre': 'Country',
},
'expected_warnings': ['Unable to download SMIL file', 'Unable to download info'],
}, {
'note': 'Age-limited video',
'url': 'https://www.vevo.com/watch/justin-timberlake/tunnel-vision-explicit/USRV81300282',
'info_dict': {
'id': 'USRV81300282',
'ext': 'mp4',
'title': 'Justin Timberlake - Tunnel Vision (Explicit)',
'age_limit': 18,
'timestamp': 1372888800,
'upload_date': '20130703',
'uploader': 'Justin Timberlake',
'track': 'Tunnel Vision (Explicit)',
'artist': 'Justin Timberlake',
'genre': 'Pop',
},
'expected_warnings': ['Unable to download SMIL file', 'Unable to download info'],
}, {
'note': 'No video_info',
'url': 'http://www.vevo.com/watch/k-camp-1/Till-I-Die/USUV71503000',
'md5': '8b83cc492d72fc9cf74a02acee7dc1b0',
'info_dict': {
'id': 'USUV71503000',
'ext': 'mp4',
'title': 'K Camp ft. T.I. - Till I Die',
'age_limit': 18,
'timestamp': 1449468000,
'upload_date': '20151207',
'uploader': 'K Camp',
'track': 'Till I Die',
'artist': 'K Camp',
'genre': 'Hip-Hop',
},
'expected_warnings': ['Unable to download SMIL file', 'Unable to download info'],
}, {
'note': 'Featured test',
'url': 'https://www.vevo.com/watch/lemaitre/Wait/USUV71402190',
'md5': 'd28675e5e8805035d949dc5cf161071d',
'info_dict': {
'id': 'USUV71402190',
'ext': 'mp4',
'title': 'Lemaitre ft. LoLo - Wait',
'age_limit': 0,
'timestamp': 1413432000,
'upload_date': '20141016',
'uploader': 'Lemaitre',
'track': 'Wait',
'artist': 'Lemaitre',
'genre': 'Electronic',
},
'expected_warnings': ['Unable to download SMIL file', 'Unable to download info'],
}, {
'note': 'Only available via webpage',
'url': 'http://www.vevo.com/watch/GBUV71600656',
'md5': '67e79210613865b66a47c33baa5e37fe',
'info_dict': {
'id': 'GBUV71600656',
'ext': 'mp4',
'title': 'ABC - Viva Love',
'age_limit': 0,
'timestamp': 1461830400,
'upload_date': '20160428',
'uploader': 'ABC',
'track': 'Viva Love',
'artist': 'ABC',
'genre': 'Pop',
},
'expected_warnings': ['Failed to download video versions info'],
}, {
# no genres available
'url': 'http://www.vevo.com/watch/INS171400764',
'only_matching': True,
}, {
# Another case available only via the webpage; using streams/streamsV3 formats
# Geo-restricted to Netherlands/Germany
'url': 'http://www.vevo.com/watch/boostee/pop-corn-clip-officiel/FR1A91600909',
'only_matching': True,
}, {
'url': 'https://embed.vevo.com/?isrc=USH5V1923499&partnerId=4d61b777-8023-4191-9ede-497ed6c24647&partnerAdCode=',
'only_matching': True,
}]
_VERSIONS = {
0: 'youtube', # only in AuthenticateVideo videoVersions
1: 'level3',
2: 'akamai',
3: 'level3',
4: 'amazon',
}
def _initialize_api(self, video_id):
webpage = self._download_webpage(
'https://accounts.vevo.com/token', None,
note='Retrieving oauth token',
errnote='Unable to retrieve oauth token',
data=json.dumps({
'client_id': 'SPupX1tvqFEopQ1YS6SS',
'grant_type': 'urn:vevo:params:oauth:grant-type:anonymous',
}).encode('utf-8'),
headers={
'Content-Type': 'application/json',
})
if re.search(r'(?i)THIS PAGE IS CURRENTLY UNAVAILABLE IN YOUR REGION', webpage):
self.raise_geo_restricted(
'%s said: This page is currently unavailable in your region' % self.IE_NAME)
auth_info = self._parse_json(webpage, video_id)
self._api_url_template = self.http_scheme() + '//apiv2.vevo.com/%s?token=' + auth_info['legacy_token']
def _call_api(self, path, *args, **kwargs):
try:
data = self._download_json(self._api_url_template % path, *args, **kwargs)
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError):
errors = self._parse_json(e.cause.read().decode(), None)['errors']
error_message = ', '.join([error['message'] for error in errors])
raise ExtractorError('%s said: %s' % (self.IE_NAME, error_message), expected=True)
raise
return data
def _real_extract(self, url):
video_id = self._match_id(url)
self._initialize_api(video_id)
video_info = self._call_api(
'video/%s' % video_id, video_id, 'Downloading api video info',
'Failed to download video info')
video_versions = self._call_api(
'video/%s/streams' % video_id, video_id,
'Downloading video versions info',
'Failed to download video versions info',
fatal=False)
# Some videos are only available via webpage (e.g.
# https://github.com/ytdl-org/youtube-dl/issues/9366)
if not video_versions:
webpage = self._download_webpage(url, video_id)
json_data = self._extract_json(webpage, video_id)
if 'streams' in json_data.get('default', {}):
video_versions = json_data['default']['streams'][video_id][0]
else:
video_versions = [
value
for key, value in json_data['apollo']['data'].items()
if key.startswith('%s.streams' % video_id)]
uploader = None
artist = None
featured_artist = None
artists = video_info.get('artists')
for curr_artist in artists:
if curr_artist.get('role') == 'Featured':
featured_artist = curr_artist['name']
else:
artist = uploader = curr_artist['name']
formats = []
for video_version in video_versions:
version = self._VERSIONS.get(video_version.get('version'), 'generic')
version_url = video_version.get('url')
if not version_url:
continue
if '.ism' in version_url:
continue
elif '.mpd' in version_url:
formats.extend(self._extract_mpd_formats(
version_url, video_id, mpd_id='dash-%s' % version,
note='Downloading %s MPD information' % version,
errnote='Failed to download %s MPD information' % version,
fatal=False))
elif '.m3u8' in version_url:
formats.extend(self._extract_m3u8_formats(
version_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls-%s' % version,
note='Downloading %s m3u8 information' % version,
errnote='Failed to download %s m3u8 information' % version,
fatal=False))
else:
m = re.search(r'''(?xi)
_(?P<width>[0-9]+)x(?P<height>[0-9]+)
_(?P<vcodec>[a-z0-9]+)
_(?P<vbr>[0-9]+)
_(?P<acodec>[a-z0-9]+)
_(?P<abr>[0-9]+)
\.(?P<ext>[a-z0-9]+)''', version_url)
if not m:
continue
formats.append({
'url': version_url,
'format_id': 'http-%s-%s' % (version, video_version['quality']),
'vcodec': m.group('vcodec'),
'acodec': m.group('acodec'),
'vbr': int(m.group('vbr')),
'abr': int(m.group('abr')),
'ext': m.group('ext'),
'width': int(m.group('width')),
'height': int(m.group('height')),
})
self._sort_formats(formats)
track = video_info['title']
if featured_artist:
artist = '%s ft. %s' % (artist, featured_artist)
title = '%s - %s' % (artist, track) if artist else track
genres = video_info.get('genres')
genre = (
genres[0] if genres and isinstance(genres, list)
and isinstance(genres[0], compat_str) else None)
is_explicit = video_info.get('isExplicit')
if is_explicit is True:
age_limit = 18
elif is_explicit is False:
age_limit = 0
else:
age_limit = None
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': video_info.get('imageUrl') or video_info.get('thumbnailUrl'),
'timestamp': parse_iso8601(video_info.get('releaseDate')),
'uploader': uploader,
'duration': int_or_none(video_info.get('duration')),
'view_count': int_or_none(video_info.get('views', {}).get('total')),
'age_limit': age_limit,
'track': track,
'artist': uploader,
'genre': genre,
}
class VevoPlaylistIE(VevoBaseIE):
_VALID_URL = r'https?://(?:www\.)?vevo\.com/watch/(?P<kind>playlist|genre)/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'http://www.vevo.com/watch/playlist/dadbf4e7-b99f-4184-9670-6f0e547b6a29',
'info_dict': {
'id': 'dadbf4e7-b99f-4184-9670-6f0e547b6a29',
'title': 'Best-Of: Birdman',
},
'playlist_count': 10,
}, {
'url': 'http://www.vevo.com/watch/genre/rock',
'info_dict': {
'id': 'rock',
'title': 'Rock',
},
'playlist_count': 20,
}, {
'url': 'http://www.vevo.com/watch/playlist/dadbf4e7-b99f-4184-9670-6f0e547b6a29?index=0',
'md5': '32dcdfddddf9ec6917fc88ca26d36282',
'info_dict': {
'id': 'USCMV1100073',
'ext': 'mp4',
'title': 'Birdman - Y.U. MAD',
'timestamp': 1323417600,
'upload_date': '20111209',
'uploader': 'Birdman',
'track': 'Y.U. MAD',
'artist': 'Birdman',
'genre': 'Rap/Hip-Hop',
},
'expected_warnings': ['Unable to download SMIL file'],
}, {
'url': 'http://www.vevo.com/watch/genre/rock?index=0',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
playlist_kind = mobj.group('kind')
webpage = self._download_webpage(url, playlist_id)
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
index = qs.get('index', [None])[0]
if index:
video_id = self._search_regex(
r'<meta[^>]+content=(["\'])vevo://video/(?P<id>.+?)\1[^>]*>',
webpage, 'video id', default=None, group='id')
if video_id:
return self.url_result('vevo:%s' % video_id, VevoIE.ie_key())
playlists = self._extract_json(webpage, playlist_id)['default']['%ss' % playlist_kind]
playlist = (list(playlists.values())[0]
if playlist_kind == 'playlist' else playlists[playlist_id])
entries = [
self.url_result('vevo:%s' % src, VevoIE.ie_key())
for src in playlist['isrcs']]
return self.playlist_result(
entries, playlist.get('playlistId') or playlist_id,
playlist.get('name'), playlist.get('description'))
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training script for RetinaNet.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
import numpy as np
import tensorflow as tf
import dataloader
import retinanet_model
# Cloud TPU Cluster Resolvers
flags.DEFINE_string(
'tpu',
default=None,
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 '
'url.')
flags.DEFINE_string(
'gcp_project',
default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone',
default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
# Model specific paramenters
flags.DEFINE_string(
'eval_master',
default='',
help='GRPC URL of the eval master. Set to an appropiate value when running '
'on CPU/GPU')
flags.DEFINE_bool('use_tpu', True, 'Use TPUs rather than CPUs')
flags.DEFINE_bool(
'use_xla', False,
'Use XLA even if use_tpu is false. If use_tpu is true, we always use XLA, '
'and this flag has no effect.')
flags.DEFINE_string('model_dir', None, 'Location of model_dir')
flags.DEFINE_string(
'resnet_checkpoint', '',
'Location of the ResNet50 checkpoint to use for model '
'initialization.')
flags.DEFINE_string('hparams', '',
'Comma separated k=v pairs of hyperparameters.')
flags.DEFINE_integer(
'num_cores', default=8, help='Number of TPU cores for training')
flags.DEFINE_bool('use_spatial_partition', False, 'Use spatial partition.')
flags.DEFINE_integer(
'num_cores_per_replica',
default=8,
help='Number of TPU cores per'
'replica when using spatial partition.')
flags.DEFINE_multi_integer(
'input_partition_dims', [1, 4, 2, 1],
'A list that describes the partition dims for all the tensors.')
flags.DEFINE_integer('train_batch_size', 64, 'training batch size')
flags.DEFINE_integer('eval_batch_size', 1, 'evaluation batch size')
flags.DEFINE_integer('eval_samples', 5000, 'The number of samples for '
'evaluation.')
flags.DEFINE_integer('iterations_per_loop', 100,
'Number of iterations per TPU training loop')
flags.DEFINE_string(
'training_file_pattern', None,
'Glob for training data files (e.g., COCO train - minival set)')
flags.DEFINE_string('validation_file_pattern', None,
'Glob for evaluation tfrecords (e.g., COCO val2017 set)')
flags.DEFINE_string('val_json_file', None,
'COCO validation JSON containing golden bounding boxes.')
flags.DEFINE_integer('num_examples_per_epoch', 120000,
'Number of examples in one epoch')
flags.DEFINE_integer('num_epochs', 15, 'Number of epochs for training')
flags.DEFINE_string('mode', 'train',
'Mode to run: train or eval (default: train)')
flags.DEFINE_bool('eval_after_training', False, 'Run one eval after the '
'training finishes.')
# For Eval mode
flags.DEFINE_integer('min_eval_interval', 180,
'Minimum seconds between evaluations.')
flags.DEFINE_integer(
'eval_timeout', None,
'Maximum seconds between checkpoints before evaluation terminates.')
FLAGS = flags.FLAGS
def serving_input_fn(image_size):
"""Input function for SavedModels and TF serving."""
def _decode_and_crop(img_bytes):
img = tf.image.decode_jpeg(img_bytes)
img = tf.image.resize_image_with_crop_or_pad(img, image_size, image_size)
img = tf.image.convert_image_dtype(img, tf.float32)
return img
image_bytes_list = tf.placeholder(shape=[None], dtype=tf.string)
images = tf.map_fn(
_decode_and_crop, image_bytes_list, back_prop=False, dtype=tf.float32)
images = tf.reshape(images, [-1, image_size, image_size, 3])
return tf.estimator.export.TensorServingInputReceiver(
images, {'image_bytes': image_bytes_list})
def main(argv):
del argv # Unused.
if FLAGS.use_tpu:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
tpu_grpc_url = tpu_cluster_resolver.get_master()
tf.Session.reset(tpu_grpc_url)
else:
tpu_cluster_resolver = None
# Check data path
if FLAGS.mode in ('train',
'train_and_eval') and FLAGS.training_file_pattern is None:
raise RuntimeError('You must specify --training_file_pattern for training.')
if FLAGS.mode in ('eval', 'train_and_eval'):
if FLAGS.validation_file_pattern is None:
raise RuntimeError('You must specify --validation_file_pattern '
'for evaluation.')
if FLAGS.val_json_file is None:
raise RuntimeError('You must specify --val_json_file for evaluation.')
# Parse hparams
hparams = retinanet_model.default_hparams()
hparams.parse(FLAGS.hparams)
# The following is for spatial partitioning. `features` has one tensor while
# `labels` had 4 + (`max_level` - `min_level` + 1) * 2 tensors. The input
# partition is performed on `features` and all partitionable tensors of
# `labels`, see the partition logic below.
# In the TPUEstimator context, the meaning of `shard` and `replica` is the
# same; follwing the API, here has mixed use of both.
if FLAGS.use_spatial_partition:
# Checks input_partition_dims agrees with num_cores_per_replica.
if FLAGS.num_cores_per_replica != np.prod(FLAGS.input_partition_dims):
raise RuntimeError('--num_cores_per_replica must be a product of array'
'elements in --input_partition_dims.')
labels_partition_dims = {
'mean_num_positives': None,
'source_ids': None,
'groundtruth_data': None,
'image_scales': None,
}
# The Input Partition Logic: We partition only the partition-able tensors.
# Spatial partition requires that the to-be-partitioned tensors must have a
# dimension that is a multiple of `partition_dims`. Depending on the
# `partition_dims` and the `image_size` and the `max_level` in hparams, some
# high-level anchor labels (i.e., `cls_targets` and `box_targets`) cannot
# be partitioned. For example, when `partition_dims` is [1, 4, 2, 1], image
# size is 1536, `max_level` is 9, `cls_targets_8` has a shape of
# [batch_size, 6, 6, 9], which cannot be partitioned (6 % 4 != 0). In this
# case, the level-8 and level-9 target tensors are not partition-able, and
# the highest partition-able level is 7.
image_size = hparams.get('image_size')
for level in range(hparams.get('min_level'), hparams.get('max_level') + 1):
def _can_partition(spatial_dim):
partitionable_index = np.where(
spatial_dim % np.array(FLAGS.input_partition_dims) == 0)
return len(partitionable_index[0]) == len(FLAGS.input_partition_dims)
spatial_dim = image_size // (2**level)
if _can_partition(spatial_dim):
labels_partition_dims['box_targets_%d' %
level] = FLAGS.input_partition_dims
labels_partition_dims['cls_targets_%d' %
level] = FLAGS.input_partition_dims
else:
labels_partition_dims['box_targets_%d' % level] = None
labels_partition_dims['cls_targets_%d' % level] = None
num_cores_per_replica = FLAGS.num_cores_per_replica
input_partition_dims = [FLAGS.input_partition_dims, labels_partition_dims]
num_shards = FLAGS.num_cores // num_cores_per_replica
else:
num_cores_per_replica = None
input_partition_dims = None
num_shards = FLAGS.num_cores
params = dict(
hparams.values(),
num_shards=num_shards,
num_examples_per_epoch=FLAGS.num_examples_per_epoch,
use_tpu=FLAGS.use_tpu,
resnet_checkpoint=FLAGS.resnet_checkpoint,
val_json_file=FLAGS.val_json_file,
mode=FLAGS.mode,
)
config_proto = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False)
if FLAGS.use_xla and not FLAGS.use_tpu:
config_proto.graph_options.optimizer_options.global_jit_level = (
tf.OptimizerOptions.ON_1)
tpu_config = tf.contrib.tpu.TPUConfig(
FLAGS.iterations_per_loop,
num_shards=num_shards,
num_cores_per_replica=num_cores_per_replica,
input_partition_dims=input_partition_dims,
per_host_input_for_training=tf.contrib.tpu.InputPipelineConfig
.PER_HOST_V2)
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
evaluation_master=FLAGS.eval_master,
model_dir=FLAGS.model_dir,
log_step_count_steps=FLAGS.iterations_per_loop,
session_config=config_proto,
tpu_config=tpu_config,
)
# TPU Estimator
if FLAGS.mode == 'train':
tf.logging.info(params)
train_estimator = tf.contrib.tpu.TPUEstimator(
model_fn=retinanet_model.retinanet_model_fn,
use_tpu=FLAGS.use_tpu,
train_batch_size=FLAGS.train_batch_size,
config=run_config,
params=params)
train_estimator.train(
input_fn=dataloader.InputReader(
FLAGS.training_file_pattern, is_training=True),
max_steps=int((FLAGS.num_epochs * FLAGS.num_examples_per_epoch) /
FLAGS.train_batch_size))
# Run evaluation after training finishes.
eval_params = dict(
params,
use_tpu=False,
input_rand_hflip=False,
resnet_checkpoint=None,
is_training_bn=False,
use_bfloat16=False,
)
eval_estimator = tf.contrib.tpu.TPUEstimator(
model_fn=retinanet_model.retinanet_model_fn,
use_tpu=False,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
config=run_config,
params=eval_params)
if FLAGS.eval_after_training:
eval_results = eval_estimator.evaluate(
input_fn=dataloader.InputReader(
FLAGS.validation_file_pattern, is_training=False),
steps=FLAGS.eval_samples // FLAGS.eval_batch_size)
tf.logging.info('Eval results: %s' % eval_results)
if FLAGS.model_dir:
eval_estimator.export_saved_model(
export_dir_base=FLAGS.model_dir,
serving_input_receiver_fn=lambda: serving_input_fn(hparams.image_size)
)
elif FLAGS.mode == 'eval':
# Eval only runs on CPU or GPU host with batch_size = 1.
# Override the default options: disable randomization in the input pipeline
# and don't run on the TPU.
# Also, disable use_bfloat16 for eval on CPU/GPU.
eval_params = dict(
params,
use_tpu=False,
input_rand_hflip=False,
resnet_checkpoint=None,
is_training_bn=False,
use_bfloat16=False,
)
eval_estimator = tf.contrib.tpu.TPUEstimator(
model_fn=retinanet_model.retinanet_model_fn,
use_tpu=False,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
config=run_config,
params=eval_params)
def terminate_eval():
tf.logging.info('Terminating eval after %d seconds of no checkpoints' %
FLAGS.eval_timeout)
return True
# Run evaluation when there's a new checkpoint
for ckpt in tf.contrib.training.checkpoints_iterator(
FLAGS.model_dir,
min_interval_secs=FLAGS.min_eval_interval,
timeout=FLAGS.eval_timeout,
timeout_fn=terminate_eval):
tf.logging.info('Starting to evaluate.')
try:
eval_results = eval_estimator.evaluate(
input_fn=dataloader.InputReader(
FLAGS.validation_file_pattern, is_training=False),
steps=FLAGS.eval_samples // FLAGS.eval_batch_size)
tf.logging.info('Eval results: %s' % eval_results)
# Terminate eval job when final checkpoint is reached
current_step = int(os.path.basename(ckpt).split('-')[1])
total_step = int((FLAGS.num_epochs * FLAGS.num_examples_per_epoch) /
FLAGS.train_batch_size)
if current_step >= total_step:
tf.logging.info(
'Evaluation finished after training step %d' % current_step)
break
eval_estimator.export_saved_model(
export_dir_base=FLAGS.model_dir,
serving_input_receiver_fn=
lambda: serving_input_fn(hparams.image_size))
except tf.errors.NotFoundError:
# Since the coordinator is on a different job than the TPU worker,
# sometimes the TPU worker does not finish initializing until long after
# the CPU job tells it to start evaluating. In this case, the checkpoint
# file could have been deleted already.
tf.logging.info(
'Checkpoint %s no longer exists, skipping checkpoint' % ckpt)
elif FLAGS.mode == 'train_and_eval':
for cycle in range(FLAGS.num_epochs):
tf.logging.info('Starting training cycle, epoch: %d.' % cycle)
train_estimator = tf.contrib.tpu.TPUEstimator(
model_fn=retinanet_model.retinanet_model_fn,
use_tpu=FLAGS.use_tpu,
train_batch_size=FLAGS.train_batch_size,
config=run_config,
params=params)
train_estimator.train(
input_fn=dataloader.InputReader(
FLAGS.training_file_pattern, is_training=True),
steps=int(FLAGS.num_examples_per_epoch / FLAGS.train_batch_size))
tf.logging.info('Starting evaluation cycle, epoch: %d.' % cycle)
# Run evaluation after every epoch.
eval_params = dict(
params,
use_tpu=False,
input_rand_hflip=False,
resnet_checkpoint=None,
is_training_bn=False,
)
eval_estimator = tf.contrib.tpu.TPUEstimator(
model_fn=retinanet_model.retinanet_model_fn,
use_tpu=False,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
config=run_config,
params=eval_params)
eval_results = eval_estimator.evaluate(
input_fn=dataloader.InputReader(
FLAGS.validation_file_pattern, is_training=False),
steps=FLAGS.eval_samples // FLAGS.eval_batch_size)
tf.logging.info('Evaluation results: %s' % eval_results)
eval_estimator.export_saved_model(
export_dir_base=FLAGS.model_dir,
serving_input_receiver_fn=lambda: serving_input_fn(hparams.image_size))
else:
tf.logging.info('Mode not found.')
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
| |
""" Parser for bibtex files """
DEBUG = False
from os.path import dirname, join as pjoin
import sys
from .ply import lex
from .ply import yacc
# We are doing our own error recovery, and so don't allow post error tokens
# without further p_error calls. This is a module global
yacc.error_count = 1
from .btlex import tokens, make_lexer, reset_lexer
class Macro(object):
""" Class to encapsulate undefined macro references """
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Macro("%s")' % self.name
def __eq__(self, other):
return self.name == other.name
def __ne__(self, other):
return self.name != other.name
def make_error(t):
""" Create error marker token to signal read error to parser """
tok = lex.LexToken()
tok.type = 'ERRORMARKER'
tok.value = 'error marker'
tok.lineno = t.lineno
tok.lexpos = t.lexpos
return tok
tokens += ('ERRORMARKER',)
_MYPATH=dirname(__file__)
class BibTeXEntries(object):
def __init__(self, entries = None,
preamble = None,
defined_macros = None,
undefined_macros = None):
if entries is None:
entries = {}
if preamble is None:
preamble = []
if defined_macros is None:
defined_macros = {}
if undefined_macros is None:
undefined_macros = {}
self.entries = entries
self.preamble = preamble
self.defined_macros = defined_macros
self.undefined_macros = undefined_macros
def __eq__(self, other):
return (self.entries == other.entries and
self.preamble == other.preamble)
class BibTeXParser(object):
def __init__(self,
lexer=None,
tokens=tokens,
debug=DEBUG,
picklefile=None,
**kwargs):
if lexer is None:
lexer = make_lexer()
if not debug and picklefile is None:
picklefile = pjoin(_MYPATH, 'btparse.pkl')
self.lexer = lexer
self._stack = []
self.tokens = tokens
self.debug = debug
self.parser = yacc.yacc(
debug=DEBUG,
module=self,
picklefile=picklefile,
**kwargs)
def parse(self, txt, debug=False):
reset_lexer(self.lexer)
self._results = BibTeXEntries()
self._results.entries = self.parser.parse(txt,
lexer=self.lexer,
tokenfunc = self.get_token,
debug=debug)
return self._results
def warn(self, msg):
""" Emit warning `msg` """
sys.stderr.write(msg + '\n')
def get_token(self):
stack = self._stack
if len(stack):
return stack.pop()
return self.lexer.token()
def p_start(self, p):
""" definitions : entry
| throwout
| empty
"""
p[0] = {}
if not p[1] is None: # entry is tuple, others return None
key, value = p[1]
p[0][key] = value
def p_empty(self, p):
"empty :"
pass
def p_definitions_throwout(self, p):
" definitions : definitions throwout "
p[0] = p[1]
def p_definitionss_entry(self, p):
" definitions : definitions entry "
p[0] = p[1]
key, value = p[2]
if key in p[0]:
self.warn('Duplicate key "%s"; replacing' % key)
p[0][key] = value
def p_throwouts(self, p):
""" throwout : macro
| preamble
| IMPLICIT_COMMENT
| EXPLICIT_COMMENT
"""
def p_entry(self, p):
""" entry : AT ENTRY LBRACKET CITEKEY COMMA fieldlist RBRACKET
| AT ENTRY LBRACKET CITEKEY COMMA fieldlist COMMA RBRACKET
| AT ENTRY LBRACKET CITEKEY COMMA fieldlisterr
"""
# entries are (citekey, dict) tuples. Entry types are in ENTRY, and are not
# case senstive. They go in the fieldlist dictionary as key 'entry type'.
# The space in the key makes it an illegal bibtex field name, so it can't
# clash with bibtex fields. Citekeys are case sensitive
p[6]['entry type'] = p[2].lower()
p[0] = (p[4], p[6])
def p_entry_error(self, p):
" throwout : AT error ERRORMARKER "
# Entry is unrecoverable
self.warn("Syntax error in entry; discarding")
def p_macro(self, p):
"macro : AT MACRO LBRACKET NAME EQUALS expression RBRACKET"
name = p[4].lower()
self._results.defined_macros[name] = p[6]
def p_macro_error(self, p):
"macro : AT MACRO error ERRORMARKER"
self.warn("Syntax error in macro; discarding")
def p_preamble(self, p):
"preamble : AT PREAMBLE LBRACKET expression RBRACKET"
self._results.preamble += p[4]
def p_preamble_error(self, p):
"preamble : AT PREAMBLE error ERRORMARKER"
self.warn("Syntax error in preamble; discarding")
def p_fieldlist_from_def(self, p):
""" fieldlist : fielddef """
# fieldef is a tuple
p[0] = dict((p[1],))
def p_fieldlisterr_from_def(self, p):
""" fieldlisterr : error ERRORMARKER """
self.warn("Error in field definition, discarding")
p[0] = {}
def p_fieldlist_from_list_def(self, p):
""" fieldlist : fieldlist COMMA fielddef
"""
# fielddef is a tuple, fieldlist is a dictionary
key, value = p[3]
p[0] = p[1]
p[0][key] = value
def p_fieldlisterr_from_list_error(self, p):
""" fieldlisterr : fieldlist error ERRORMARKER
"""
# Signals error to entry
self.warn("Syntax error in field list; discarding remainder")
# Try and keep fieldlist up til now
p[0] = p[1]
def p_fielddef(self, p):
""" fielddef : NAME EQUALS expression"""
p[0] = (p[1].lower(), p[3])
def p_expr_name(self, p):
" expression : NAME "
# reference to a macro
name = p[1].lower()
d_macros = self._results.defined_macros
if name in d_macros:
p[0] = d_macros[name]
return
# Placeholder and reference to undefined macros
p[0] = [Macro(name)]
ud_macros = self._results.undefined_macros
if name not in ud_macros:
ud_macros[name] = [p[0]]
else:
ud_macros[name].append(p[0])
def p_expr_number(self, p):
""" expression : NUMBER """
p[0] = [p[1]]
def p_expr_string(self, p):
""" expression : quotedstring
| curlystring
"""
p[0] = p[1]
def p_string_quoted(self, p):
" quotedstring : QUOTE stringcontents QUOTE "
p[0] = p[2]
def p_string_curlied(self, p):
" curlystring : LCURLY stringcontents RCURLY "
p[0] = p[2]
def p_scont_basic(self, p):
""" stringcontents : STRING
| curlystring
"""
p[0] = [p[1]]
def p_scont_empty(self, p):
" stringcontents : empty "
p[0] = []
def p_scont_appending(self, p):
""" stringcontents : stringcontents STRING
| stringcontents curlystring
"""
p[0] = p[1] + [p[2]]
def p_expr_hash(self, p):
""" expression : expression HASH expression """
p[0] = p[1] + p[3]
def p_error(self, t):
self.warn("Syntax error at token %s, value %s, line no %d"
% (t.type, t.value, t.lineno))
# Read ahead to next AT, and put back on the stack
while 1:
tok = self.get_token() # Get the next token
if not tok or tok.type == 'AT': break
if tok:
self._stack.append(tok)
# Put an error marker on the stack to resynchronize
self._stack.append(make_error(t))
# Top level convenience object and function
parser = BibTeXParser()
parse = parser.parse
| |
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import audioop
from time import sleep, time as get_time
import collections
import datetime
import json
import os
import pyaudio
import requests
import speech_recognition
from hashlib import md5
from io import BytesIO, StringIO
from speech_recognition import (
Microphone,
AudioSource,
AudioData
)
from threading import Thread, Lock
from mycroft.api import DeviceApi
from mycroft.configuration import Configuration
from mycroft.session import SessionManager
from mycroft.util import (
check_for_signal,
get_ipc_directory,
resolve_resource_file,
play_wav
)
from mycroft.util.log import LOG
class MutableStream:
def __init__(self, wrapped_stream, format, muted=False):
assert wrapped_stream is not None
self.wrapped_stream = wrapped_stream
self.muted = muted
self.SAMPLE_WIDTH = pyaudio.get_sample_size(format)
self.muted_buffer = b''.join([b'\x00' * self.SAMPLE_WIDTH])
def mute(self):
self.muted = True
def unmute(self):
self.muted = False
def read(self, size, of_exc=False):
"""
Read data from stream.
Arguments:
size (int): Number of bytes to read
of_exc (bool): flag determining if the audio producer thread
should throw IOError at overflows.
Returns:
Data read from device
"""
frames = collections.deque()
remaining = size
while remaining > 0:
to_read = min(self.wrapped_stream.get_read_available(), remaining)
if to_read == 0:
sleep(.01)
continue
result = self.wrapped_stream.read(to_read,
exception_on_overflow=of_exc)
frames.append(result)
remaining -= to_read
if self.muted:
return self.muted_buffer
input_latency = self.wrapped_stream.get_input_latency()
if input_latency > 0.2:
LOG.warning("High input latency: %f" % input_latency)
audio = b"".join(list(frames))
return audio
def close(self):
self.wrapped_stream.close()
self.wrapped_stream = None
def is_stopped(self):
return self.wrapped_stream.is_stopped()
def stop_stream(self):
return self.wrapped_stream.stop_stream()
class MutableMicrophone(Microphone):
def __init__(self, device_index=None, sample_rate=16000, chunk_size=1024,
mute=False):
Microphone.__init__(
self, device_index=device_index, sample_rate=sample_rate,
chunk_size=chunk_size)
self.muted = False
if mute:
self.mute()
def __enter__(self):
assert self.stream is None, \
"This audio source is already inside a context manager"
self.audio = pyaudio.PyAudio()
self.stream = MutableStream(self.audio.open(
input_device_index=self.device_index, channels=1,
format=self.format, rate=self.SAMPLE_RATE,
frames_per_buffer=self.CHUNK,
input=True, # stream is an input stream
), self.format, self.muted)
return self
def __exit__(self, exc_type, exc_value, traceback):
if not self.stream.is_stopped():
self.stream.stop_stream()
self.stream.close()
self.stream = None
self.audio.terminate()
def mute(self):
self.muted = True
if self.stream:
self.stream.mute()
def unmute(self):
self.muted = False
if self.stream:
self.stream.unmute()
def is_muted(self):
return self.muted
def get_silence(num_bytes):
return b'\0' * num_bytes
class ResponsiveRecognizer(speech_recognition.Recognizer):
# Padding of silence when feeding to pocketsphinx
SILENCE_SEC = 0.01
# The minimum seconds of noise before a
# phrase can be considered complete
MIN_LOUD_SEC_PER_PHRASE = 0.5
# The minimum seconds of silence required at the end
# before a phrase will be considered complete
MIN_SILENCE_AT_END = 0.25
# The maximum seconds a phrase can be recorded,
# provided there is noise the entire time
RECORDING_TIMEOUT = 10.0
# The maximum time it will continue to record silence
# when not enough noise has been detected
RECORDING_TIMEOUT_WITH_SILENCE = 3.0
# Time between pocketsphinx checks for the wake word
SEC_BETWEEN_WW_CHECKS = 0.2
def __init__(self, wake_word_recognizer):
self.config = Configuration.get()
listener_config = self.config.get('listener')
self.upload_url = listener_config['wake_word_upload']['url']
self.upload_disabled = listener_config['wake_word_upload']['disable']
self.wake_word_name = wake_word_recognizer.key_phrase
self.overflow_exc = listener_config.get('overflow_exception', False)
speech_recognition.Recognizer.__init__(self)
self.wake_word_recognizer = wake_word_recognizer
self.audio = pyaudio.PyAudio()
self.multiplier = listener_config.get('multiplier')
self.energy_ratio = listener_config.get('energy_ratio')
# check the config for the flag to save wake words.
self.save_utterances = listener_config.get('record_utterances', False)
self.upload_lock = Lock()
self.filenames_to_upload = []
self.mic_level_file = os.path.join(get_ipc_directory(), "mic_level")
self._stop_signaled = False
# The maximum audio in seconds to keep for transcribing a phrase
# The wake word must fit in this time
num_phonemes = wake_word_recognizer.num_phonemes
len_phoneme = listener_config.get('phoneme_duration', 120) / 1000.0
self.TEST_WW_SEC = num_phonemes * len_phoneme
self.SAVED_WW_SEC = max(3, self.TEST_WW_SEC)
try:
self.account_id = DeviceApi().get()['user']['uuid']
except (requests.RequestException, AttributeError):
self.account_id = '0'
def record_sound_chunk(self, source):
return source.stream.read(source.CHUNK, self.overflow_exc)
@staticmethod
def calc_energy(sound_chunk, sample_width):
return audioop.rms(sound_chunk, sample_width)
def _record_phrase(self, source, sec_per_buffer):
"""Record an entire spoken phrase.
Essentially, this code waits for a period of silence and then returns
the audio. If silence isn't detected, it will terminate and return
a buffer of RECORDING_TIMEOUT duration.
Args:
source (AudioSource): Source producing the audio chunks
sec_per_buffer (float): Fractional number of seconds in each chunk
Returns:
bytearray: complete audio buffer recorded, including any
silence at the end of the user's utterance
"""
num_loud_chunks = 0
noise = 0
max_noise = 25
min_noise = 0
silence_duration = 0
def increase_noise(level):
if level < max_noise:
return level + 200 * sec_per_buffer
return level
def decrease_noise(level):
if level > min_noise:
return level - 100 * sec_per_buffer
return level
# Smallest number of loud chunks required to return
min_loud_chunks = int(self.MIN_LOUD_SEC_PER_PHRASE / sec_per_buffer)
# Maximum number of chunks to record before timing out
max_chunks = int(self.RECORDING_TIMEOUT / sec_per_buffer)
num_chunks = 0
# Will return if exceeded this even if there's not enough loud chunks
max_chunks_of_silence = int(self.RECORDING_TIMEOUT_WITH_SILENCE /
sec_per_buffer)
# bytearray to store audio in
byte_data = get_silence(source.SAMPLE_WIDTH)
phrase_complete = False
while num_chunks < max_chunks and not phrase_complete:
chunk = self.record_sound_chunk(source)
byte_data += chunk
num_chunks += 1
energy = self.calc_energy(chunk, source.SAMPLE_WIDTH)
test_threshold = self.energy_threshold * self.multiplier
is_loud = energy > test_threshold
if is_loud:
noise = increase_noise(noise)
num_loud_chunks += 1
else:
noise = decrease_noise(noise)
self._adjust_threshold(energy, sec_per_buffer)
if num_chunks % 10 == 0:
with open(self.mic_level_file, 'w') as f:
f.write("Energy: cur=" + str(energy) + " thresh=" +
str(self.energy_threshold))
f.close()
was_loud_enough = num_loud_chunks > min_loud_chunks
quiet_enough = noise <= min_noise
if quiet_enough:
silence_duration += sec_per_buffer
if silence_duration < self.MIN_SILENCE_AT_END:
quiet_enough = False # gotta be silent for min of 1/4 sec
else:
silence_duration = 0
recorded_too_much_silence = num_chunks > max_chunks_of_silence
if quiet_enough and (was_loud_enough or recorded_too_much_silence):
phrase_complete = True
# Pressing top-button will end recording immediately
if check_for_signal('buttonPress'):
phrase_complete = True
return byte_data
@staticmethod
def sec_to_bytes(sec, source):
return int(sec * source.SAMPLE_RATE) * source.SAMPLE_WIDTH
def _skip_wake_word(self):
# Check if told programatically to skip the wake word, like
# when we are in a dialog with the user.
if check_for_signal('startListening'):
return True
# Pressing the Mark 1 button can start recording (unless
# it is being used to mean 'stop' instead)
if check_for_signal('buttonPress', 1):
# give other processes time to consume this signal if
# it was meant to be a 'stop'
sleep(0.25)
if check_for_signal('buttonPress'):
# Signal is still here, assume it was intended to
# begin recording
LOG.debug("Button Pressed, wakeword not needed")
return True
return False
def stop(self):
"""
Signal stop and exit waiting state.
"""
self._stop_signaled = True
def _upload_wake_word(self, audio):
ww_module = self.wake_word_recognizer.__class__.__name__
if ww_module == 'PreciseHotword':
model_path = self.wake_word_recognizer.precise_model
with open(model_path, 'rb') as f:
model_hash = md5(f.read()).hexdigest()
else:
model_hash = '0'
metadata = {
'name': self.wake_word_name.replace(' ', '-'),
'engine': md5(ww_module.encode('utf-8')).hexdigest(),
'time': str(int(1000 * get_time())),
'sessionId': SessionManager.get().session_id,
'accountId': self.account_id,
'model': str(model_hash)
}
requests.post(
self.upload_url, files={
'audio': BytesIO(audio.get_wav_data()),
'metadata': StringIO(json.dumps(metadata))
}
)
def _wait_until_wake_word(self, source, sec_per_buffer):
"""Listen continuously on source until a wake word is spoken
Args:
source (AudioSource): Source producing the audio chunks
sec_per_buffer (float): Fractional number of seconds in each chunk
"""
num_silent_bytes = int(self.SILENCE_SEC * source.SAMPLE_RATE *
source.SAMPLE_WIDTH)
silence = get_silence(num_silent_bytes)
# bytearray to store audio in
byte_data = silence
buffers_per_check = self.SEC_BETWEEN_WW_CHECKS / sec_per_buffer
buffers_since_check = 0.0
# Max bytes for byte_data before audio is removed from the front
max_size = self.sec_to_bytes(self.SAVED_WW_SEC, source)
test_size = self.sec_to_bytes(self.TEST_WW_SEC, source)
said_wake_word = False
# Rolling buffer to track the audio energy (loudness) heard on
# the source recently. An average audio energy is maintained
# based on these levels.
energies = []
idx_energy = 0
avg_energy = 0.0
energy_avg_samples = int(5 / sec_per_buffer) # avg over last 5 secs
counter = 0
while not said_wake_word and not self._stop_signaled:
if self._skip_wake_word():
break
chunk = self.record_sound_chunk(source)
energy = self.calc_energy(chunk, source.SAMPLE_WIDTH)
if energy < self.energy_threshold * self.multiplier:
self._adjust_threshold(energy, sec_per_buffer)
if len(energies) < energy_avg_samples:
# build the average
energies.append(energy)
avg_energy += float(energy) / energy_avg_samples
else:
# maintain the running average and rolling buffer
avg_energy -= float(energies[idx_energy]) / energy_avg_samples
avg_energy += float(energy) / energy_avg_samples
energies[idx_energy] = energy
idx_energy = (idx_energy + 1) % energy_avg_samples
# maintain the threshold using average
if energy < avg_energy * 1.5:
if energy > self.energy_threshold:
# bump the threshold to just above this value
self.energy_threshold = energy * 1.2
# Periodically output energy level stats. This can be used to
# visualize the microphone input, e.g. a needle on a meter.
if counter % 3:
with open(self.mic_level_file, 'w') as f:
f.write("Energy: cur=" + str(energy) + " thresh=" +
str(self.energy_threshold))
f.close()
counter += 1
# At first, the buffer is empty and must fill up. After that
# just drop the first chunk bytes to keep it the same size.
needs_to_grow = len(byte_data) < max_size
if needs_to_grow:
byte_data += chunk
else: # Remove beginning of audio and add new chunk to end
byte_data = byte_data[len(chunk):] + chunk
buffers_since_check += 1.0
self.wake_word_recognizer.update(chunk)
if buffers_since_check > buffers_per_check:
buffers_since_check -= buffers_per_check
chopped = byte_data[-test_size:] \
if test_size < len(byte_data) else byte_data
audio_data = chopped + silence
said_wake_word = \
self.wake_word_recognizer.found_wake_word(audio_data)
# if a wake word is success full then upload wake word
if said_wake_word and self.config['opt_in'] and not \
self.upload_disabled:
Thread(
target=self._upload_wake_word, daemon=True,
args=[self._create_audio_data(byte_data, source)]
).start()
@staticmethod
def _create_audio_data(raw_data, source):
"""
Constructs an AudioData instance with the same parameters
as the source and the specified frame_data
"""
return AudioData(raw_data, source.SAMPLE_RATE, source.SAMPLE_WIDTH)
def listen(self, source, emitter):
"""Listens for chunks of audio that Mycroft should perform STT on.
This will listen continuously for a wake-up-word, then return the
audio chunk containing the spoken phrase that comes immediately
afterwards.
Args:
source (AudioSource): Source producing the audio chunks
emitter (EventEmitter): Emitter for notifications of when recording
begins and ends.
Returns:
AudioData: audio with the user's utterance, minus the wake-up-word
"""
assert isinstance(source, AudioSource), "Source must be an AudioSource"
# bytes_per_sec = source.SAMPLE_RATE * source.SAMPLE_WIDTH
sec_per_buffer = float(source.CHUNK) / source.SAMPLE_RATE
# Every time a new 'listen()' request begins, reset the threshold
# used for silence detection. This is as good of a reset point as
# any, as we expect the user and Mycroft to not be talking.
# NOTE: adjust_for_ambient_noise() doc claims it will stop early if
# speech is detected, but there is no code to actually do that.
self.adjust_for_ambient_noise(source, 1.0)
LOG.debug("Waiting for wake word...")
self._wait_until_wake_word(source, sec_per_buffer)
if self._stop_signaled:
return
LOG.debug("Recording...")
emitter.emit("recognizer_loop:record_begin")
# If enabled, play a wave file with a short sound to audibly
# indicate recording has begun.
if self.config.get('confirm_listening'):
file = resolve_resource_file(
self.config.get('sounds').get('start_listening'))
if file:
play_wav(file)
frame_data = self._record_phrase(source, sec_per_buffer)
audio_data = self._create_audio_data(frame_data, source)
emitter.emit("recognizer_loop:record_end")
if self.save_utterances:
LOG.info("Recording utterance")
stamp = str(datetime.datetime.now())
filename = "/tmp/mycroft_utterance%s.wav" % stamp
with open(filename, 'wb') as filea:
filea.write(audio_data.get_wav_data())
LOG.debug("Thinking...")
return audio_data
def _adjust_threshold(self, energy, seconds_per_buffer):
if self.dynamic_energy_threshold and energy > 0:
# account for different chunk sizes and rates
damping = (
self.dynamic_energy_adjustment_damping ** seconds_per_buffer)
target_energy = energy * self.energy_ratio
self.energy_threshold = (
self.energy_threshold * damping +
target_energy * (1 - damping))
| |
from __future__ import absolute_import, division, print_function
from . import ops
from .groupby import DataArrayGroupBy, DatasetGroupBy
from .pycompat import OrderedDict, dask_array_type
RESAMPLE_DIM = '__resample_dim__'
class Resample(object):
"""An object that extends the `GroupBy` object with additional logic
for handling specialized re-sampling operations.
You should create a `Resample` object by using the `DataArray.resample` or
`Dataset.resample` methods. The dimension along re-sampling
See Also
--------
DataArray.resample
Dataset.resample
"""
def _upsample(self, method, *args, **kwargs):
"""Dispatch function to call appropriate up-sampling methods on
data.
This method should not be called directly; instead, use one of the
wrapper functions supplied by `Resample`.
Parameters
----------
method : str {'asfreq', 'pad', 'ffill', 'backfill', 'bfill', 'nearest',
'interpolate'}
Method to use for up-sampling
See Also
--------
Resample.asfreq
Resample.pad
Resample.backfill
Resample.interpolate
"""
upsampled_index = self._full_index
# Drop non-dimension coordinates along the resampled dimension
for k, v in self._obj.coords.items():
if k == self._dim:
continue
if self._dim in v.dims:
self._obj = self._obj.drop(k)
if method == 'asfreq':
return self.mean(self._dim)
elif method in ['pad', 'ffill', 'backfill', 'bfill', 'nearest']:
kwargs = kwargs.copy()
kwargs.update(**{self._dim: upsampled_index})
return self._obj.reindex(method=method, *args, **kwargs)
elif method == 'interpolate':
return self._interpolate(*args, **kwargs)
else:
raise ValueError('Specified method was "{}" but must be one of'
'"asfreq", "ffill", "bfill", or "interpolate"'
.format(method))
def asfreq(self):
"""Return values of original object at the new up-sampling frequency;
essentially a re-index with new times set to NaN.
"""
return self._upsample('asfreq')
def pad(self):
"""Forward fill new values at up-sampled frequency.
"""
return self._upsample('pad')
ffill = pad
def backfill(self):
"""Backward fill new values at up-sampled frequency.
"""
return self._upsample('backfill')
bfill = backfill
def nearest(self):
"""Take new values from nearest original coordinate to up-sampled
frequency coordinates.
"""
return self._upsample('nearest')
def interpolate(self, kind='linear'):
"""Interpolate up-sampled data using the original data
as knots.
Parameters
----------
kind : str {'linear', 'nearest', 'zero', 'slinear',
'quadratic', 'cubic'}
Interpolation scheme to use
See Also
--------
scipy.interpolate.interp1d
"""
return self._interpolate(kind=kind)
def _interpolate(self, kind='linear'):
raise NotImplementedError
class DataArrayResample(DataArrayGroupBy, Resample):
"""DataArrayGroupBy object specialized to time resampling operations over a
specified dimension
"""
def __init__(self, *args, **kwargs):
self._dim = kwargs.pop('dim', None)
self._resample_dim = kwargs.pop('resample_dim', None)
if self._dim == self._resample_dim:
raise ValueError("Proxy resampling dimension ('{}') "
"cannot have the same name as actual dimension "
"('{}')! ".format(self._resample_dim, self._dim))
super(DataArrayResample, self).__init__(*args, **kwargs)
def apply(self, func, shortcut=False, **kwargs):
"""Apply a function over each array in the group and concatenate them
together into a new array.
`func` is called like `func(ar, *args, **kwargs)` for each array `ar`
in this group.
Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how
to stack together the array. The rule is:
1. If the dimension along which the group coordinate is defined is
still in the first grouped array after applying `func`, then stack
over this dimension.
2. Otherwise, stack over the new dimension given by name of this
grouping (the argument to the `groupby` function).
Parameters
----------
func : function
Callable to apply to each array.
shortcut : bool, optional
Whether or not to shortcut evaluation under the assumptions that:
(1) The action of `func` does not depend on any of the array
metadata (attributes or coordinates) but only on the data and
dimensions.
(2) The action of `func` creates arrays with homogeneous metadata,
that is, with the same dimensions and attributes.
If these conditions are satisfied `shortcut` provides significant
speedup. This should be the case for many common groupby operations
(e.g., applying numpy ufuncs).
**kwargs
Used to call `func(ar, **kwargs)` for each array `ar`.
Returns
-------
applied : DataArray or DataArray
The result of splitting, applying and combining this array.
"""
combined = super(DataArrayResample, self).apply(
func, shortcut=shortcut, **kwargs)
# If the aggregation function didn't drop the original resampling
# dimension, then we need to do so before we can rename the proxy
# dimension we used.
if self._dim in combined.coords:
combined = combined.drop(self._dim)
if self._resample_dim in combined.dims:
combined = combined.rename({self._resample_dim: self._dim})
return combined
def _interpolate(self, kind='linear'):
"""Apply scipy.interpolate.interp1d along resampling dimension."""
from .dataarray import DataArray
from scipy.interpolate import interp1d
if isinstance(self._obj.data, dask_array_type):
raise TypeError(
"Up-sampling via interpolation was attempted on the the "
"variable '{}', but it is a dask array; dask arrays are not "
"yet supported in resample.interpolate(). Load into "
"memory with Dataset.load() before resampling."
.format(self._obj.data.name)
)
x = self._obj[self._dim].astype('float')
y = self._obj.data
axis = self._obj.get_axis_num(self._dim)
f = interp1d(x, y, kind=kind, axis=axis, bounds_error=True,
assume_sorted=True)
new_x = self._full_index.values.astype('float')
# construct new up-sampled DataArray
dummy = self._obj.copy()
dims = dummy.dims
# drop any existing non-dimension coordinates along the resampling
# dimension
coords = OrderedDict()
for k, v in dummy.coords.items():
# is the resampling dimension
if k == self._dim:
coords[self._dim] = self._full_index
# else, check if resampling dim is in coordinate dimensions
elif self._dim not in v.dims:
coords[k] = v
return DataArray(f(new_x), coords, dims, name=dummy.name,
attrs=dummy.attrs)
ops.inject_reduce_methods(DataArrayResample)
ops.inject_binary_ops(DataArrayResample)
class DatasetResample(DatasetGroupBy, Resample):
"""DatasetGroupBy object specialized to resampling a specified dimension
"""
def __init__(self, *args, **kwargs):
self._dim = kwargs.pop('dim', None)
self._resample_dim = kwargs.pop('resample_dim', None)
if self._dim == self._resample_dim:
raise ValueError("Proxy resampling dimension ('{}') "
"cannot have the same name as actual dimension "
"('{}')! ".format(self._resample_dim, self._dim))
super(DatasetResample, self).__init__(*args, **kwargs)
def apply(self, func, **kwargs):
"""Apply a function over each Dataset in the groups generated for
resampling and concatenate them together into a new Dataset.
`func` is called like `func(ds, *args, **kwargs)` for each dataset `ds`
in this group.
Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how
to stack together the datasets. The rule is:
1. If the dimension along which the group coordinate is defined is
still in the first grouped item after applying `func`, then stack
over this dimension.
2. Otherwise, stack over the new dimension given by name of this
grouping (the argument to the `groupby` function).
Parameters
----------
func : function
Callable to apply to each sub-dataset.
**kwargs
Used to call `func(ds, **kwargs)` for each sub-dataset `ar`.
Returns
-------
applied : Dataset or DataArray
The result of splitting, applying and combining this dataset.
"""
kwargs.pop('shortcut', None) # ignore shortcut if set (for now)
applied = (func(ds, **kwargs) for ds in self._iter_grouped())
combined = self._combine(applied)
return combined.rename({self._resample_dim: self._dim})
def reduce(self, func, dim=None, keep_attrs=False, **kwargs):
"""Reduce the items in this group by applying `func` along the
pre-defined resampling dimension.
Note that `dim` is by default here and ignored if passed by the user;
this ensures compatibility with the existing reduce interface.
Parameters
----------
func : function
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of collapsing
an np.ndarray over an integer valued axis.
keep_attrs : bool, optional
If True, the datasets's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
return super(DatasetResample, self).reduce(
func, self._dim, keep_attrs, **kwargs)
def _interpolate(self, kind='linear'):
"""Apply scipy.interpolate.interp1d along resampling dimension."""
from .dataset import Dataset
from .variable import Variable
from scipy.interpolate import interp1d
old_times = self._obj[self._dim].astype(float)
new_times = self._full_index.values.astype(float)
data_vars = OrderedDict()
coords = OrderedDict()
# Apply the interpolation to each DataArray in our original Dataset
for name, variable in self._obj.variables.items():
if name in self._obj.coords:
if name == self._dim:
coords[self._dim] = self._full_index
elif self._dim not in variable.dims:
coords[name] = variable
else:
if isinstance(variable.data, dask_array_type):
raise TypeError(
"Up-sampling via interpolation was attempted on the "
"variable '{}', but it is a dask array; dask arrays "
"are not yet supprted in resample.interpolate(). Load "
"into memory with Dataset.load() before resampling."
.format(name)
)
axis = variable.get_axis_num(self._dim)
# We've previously checked for monotonicity along the
# re-sampling dimension (in __init__ via the GroupBy
# constructor), so we can avoid sorting the data again by
# passing 'assume_sorted=True'
f = interp1d(old_times, variable.data, kind=kind,
axis=axis, bounds_error=True,
assume_sorted=True)
interpolated = Variable(variable.dims, f(new_times))
data_vars[name] = interpolated
return Dataset(data_vars, coords)
ops.inject_reduce_methods(DatasetResample)
ops.inject_binary_ops(DatasetResample)
| |
# Copyright 2010 OpenStack Foundation
# Copyright 2012 University Of Minho
# Copyright 2014 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import uuid
import eventlet
from eventlet import greenthread
import mock
from nova.compute import arch
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.virt.libvirt import fakelibvirt
from nova.virt import event
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import driver as libvirt_driver
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import host
host.libvirt = fakelibvirt
libvirt_guest.libvirt = fakelibvirt
class FakeVirtDomain(object):
def __init__(self, id=-1, name=None):
self._id = id
self._name = name
self._uuid = str(uuid.uuid4())
def name(self):
return self._name
def ID(self):
return self._id
def UUIDString(self):
return self._uuid
class HostTestCase(test.NoDBTestCase):
def setUp(self):
super(HostTestCase, self).setUp()
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.host = host.Host("qemu:///system")
@mock.patch.object(fakelibvirt.virConnect, "registerCloseCallback")
def test_close_callback(self, mock_close):
self.close_callback = None
def set_close_callback(cb, opaque):
self.close_callback = cb
mock_close.side_effect = set_close_callback
# verify that the driver registers for the close callback
self.host.get_connection()
self.assertTrue(self.close_callback)
@mock.patch.object(fakelibvirt.virConnect, "registerCloseCallback")
def test_close_callback_bad_signature(self, mock_close):
'''Validates that a connection to libvirt exist,
even when registerCloseCallback method has a different
number of arguments in the libvirt python library.
'''
mock_close.side_effect = TypeError('dd')
connection = self.host.get_connection()
self.assertTrue(connection)
@mock.patch.object(fakelibvirt.virConnect, "registerCloseCallback")
def test_close_callback_not_defined(self, mock_close):
'''Validates that a connection to libvirt exist,
even when registerCloseCallback method missing from
the libvirt python library.
'''
mock_close.side_effect = AttributeError('dd')
connection = self.host.get_connection()
self.assertTrue(connection)
@mock.patch.object(fakelibvirt.virConnect, "getLibVersion")
def test_broken_connection(self, mock_ver):
for (error, domain) in (
(fakelibvirt.VIR_ERR_SYSTEM_ERROR,
fakelibvirt.VIR_FROM_REMOTE),
(fakelibvirt.VIR_ERR_SYSTEM_ERROR,
fakelibvirt.VIR_FROM_RPC),
(fakelibvirt.VIR_ERR_INTERNAL_ERROR,
fakelibvirt.VIR_FROM_RPC)):
conn = self.host._connect("qemu:///system", False)
mock_ver.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"Connection broken",
error_code=error,
error_domain=domain)
self.assertFalse(self.host._test_connection(conn))
@mock.patch.object(host, 'LOG')
def test_connect_auth_cb_exception(self, log_mock):
creds = dict(authname='nova', password='verybadpass')
self.assertRaises(exception.NovaException,
self.host._connect_auth_cb, creds, False)
self.assertEqual(0, len(log_mock.method_calls),
'LOG should not be used in _connect_auth_cb.')
@mock.patch.object(greenthread, 'spawn_after')
def test_event_dispatch(self, mock_spawn_after):
# Validate that the libvirt self-pipe for forwarding
# events between threads is working sanely
def handler(event):
got_events.append(event)
hostimpl = host.Host("qemu:///system",
lifecycle_event_handler=handler)
got_events = []
hostimpl._init_events_pipe()
event1 = event.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
event.EVENT_LIFECYCLE_STARTED)
event2 = event.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
event.EVENT_LIFECYCLE_PAUSED)
hostimpl._queue_event(event1)
hostimpl._queue_event(event2)
hostimpl._dispatch_events()
want_events = [event1, event2]
self.assertEqual(want_events, got_events)
event3 = event.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
event.EVENT_LIFECYCLE_RESUMED)
event4 = event.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
event.EVENT_LIFECYCLE_STOPPED)
hostimpl._queue_event(event3)
hostimpl._queue_event(event4)
hostimpl._dispatch_events()
want_events = [event1, event2, event3]
self.assertEqual(want_events, got_events)
# STOPPED is delayed so it's handled separately
mock_spawn_after.assert_called_once_with(
hostimpl._lifecycle_delay, hostimpl._event_emit, event4)
def test_event_lifecycle(self):
got_events = []
# Validate that libvirt events are correctly translated
# to Nova events
def spawn_after(seconds, func, *args, **kwargs):
got_events.append(args[0])
return mock.Mock(spec=greenthread.GreenThread)
greenthread.spawn_after = mock.Mock(side_effect=spawn_after)
hostimpl = host.Host("qemu:///system",
lifecycle_event_handler=lambda e: None)
conn = hostimpl.get_connection()
hostimpl._init_events_pipe()
fake_dom_xml = """
<domain type='kvm'>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
</devices>
</domain>
"""
dom = fakelibvirt.Domain(conn,
fake_dom_xml,
False)
hostimpl._event_lifecycle_callback(
conn, dom, fakelibvirt.VIR_DOMAIN_EVENT_STOPPED, 0, hostimpl)
hostimpl._dispatch_events()
self.assertEqual(len(got_events), 1)
self.assertIsInstance(got_events[0], event.LifecycleEvent)
self.assertEqual(got_events[0].uuid,
"cef19ce0-0ca2-11df-855d-b19fbce37686")
self.assertEqual(got_events[0].transition,
event.EVENT_LIFECYCLE_STOPPED)
def test_event_emit_delayed_call_delayed(self):
ev = event.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
event.EVENT_LIFECYCLE_STOPPED)
for uri in ("qemu:///system", "xen:///"):
spawn_after_mock = mock.Mock()
greenthread.spawn_after = spawn_after_mock
hostimpl = host.Host(uri,
lifecycle_event_handler=lambda e: None)
hostimpl._event_emit_delayed(ev)
spawn_after_mock.assert_called_once_with(
15, hostimpl._event_emit, ev)
@mock.patch.object(greenthread, 'spawn_after')
def test_event_emit_delayed_call_delayed_pending(self, spawn_after_mock):
hostimpl = host.Host("xen:///",
lifecycle_event_handler=lambda e: None)
uuid = "cef19ce0-0ca2-11df-855d-b19fbce37686"
gt_mock = mock.Mock()
hostimpl._events_delayed[uuid] = gt_mock
ev = event.LifecycleEvent(
uuid, event.EVENT_LIFECYCLE_STOPPED)
hostimpl._event_emit_delayed(ev)
gt_mock.cancel.assert_called_once_with()
self.assertTrue(spawn_after_mock.called)
def test_event_delayed_cleanup(self):
hostimpl = host.Host("xen:///",
lifecycle_event_handler=lambda e: None)
uuid = "cef19ce0-0ca2-11df-855d-b19fbce37686"
ev = event.LifecycleEvent(
uuid, event.EVENT_LIFECYCLE_STARTED)
gt_mock = mock.Mock()
hostimpl._events_delayed[uuid] = gt_mock
hostimpl._event_emit_delayed(ev)
gt_mock.cancel.assert_called_once_with()
self.assertNotIn(uuid, hostimpl._events_delayed.keys())
@mock.patch.object(fakelibvirt.virConnect, "domainEventRegisterAny")
@mock.patch.object(host.Host, "_connect")
def test_get_connection_serial(self, mock_conn, mock_event):
def get_conn_currency(host):
host.get_connection().getLibVersion()
def connect_with_block(*a, **k):
# enough to allow another connect to run
eventlet.sleep(0)
self.connect_calls += 1
return fakelibvirt.openAuth("qemu:///system",
[[], lambda: 1, None], 0)
def fake_register(*a, **k):
self.register_calls += 1
self.connect_calls = 0
self.register_calls = 0
mock_conn.side_effect = connect_with_block
mock_event.side_effect = fake_register
# call serially
get_conn_currency(self.host)
get_conn_currency(self.host)
self.assertEqual(self.connect_calls, 1)
self.assertEqual(self.register_calls, 1)
@mock.patch.object(fakelibvirt.virConnect, "domainEventRegisterAny")
@mock.patch.object(host.Host, "_connect")
def test_get_connection_concurrency(self, mock_conn, mock_event):
def get_conn_currency(host):
host.get_connection().getLibVersion()
def connect_with_block(*a, **k):
# enough to allow another connect to run
eventlet.sleep(0)
self.connect_calls += 1
return fakelibvirt.openAuth("qemu:///system",
[[], lambda: 1, None], 0)
def fake_register(*a, **k):
self.register_calls += 1
self.connect_calls = 0
self.register_calls = 0
mock_conn.side_effect = connect_with_block
mock_event.side_effect = fake_register
# call concurrently
thr1 = eventlet.spawn(get_conn_currency, self.host)
thr2 = eventlet.spawn(get_conn_currency, self.host)
# let threads run
eventlet.sleep(0)
thr1.wait()
thr2.wait()
self.assertEqual(self.connect_calls, 1)
self.assertEqual(self.register_calls, 1)
@mock.patch.object(fakelibvirt.virConnect, "getLibVersion")
@mock.patch.object(fakelibvirt.virConnect, "getVersion")
@mock.patch.object(fakelibvirt.virConnect, "getType")
def test_has_min_version(self, fake_hv_type, fake_hv_ver, fake_lv_ver):
fake_lv_ver.return_value = 1002003
fake_hv_ver.return_value = 4005006
fake_hv_type.return_value = 'xyz'
lv_ver = (1, 2, 3)
hv_ver = (4, 5, 6)
hv_type = 'xyz'
self.assertTrue(self.host.has_min_version(lv_ver, hv_ver, hv_type))
self.assertFalse(self.host.has_min_version(lv_ver, hv_ver, 'abc'))
self.assertFalse(self.host.has_min_version(lv_ver, (4, 5, 7), hv_type))
self.assertFalse(self.host.has_min_version((1, 3, 3), hv_ver, hv_type))
self.assertTrue(self.host.has_min_version(lv_ver, hv_ver, None))
self.assertTrue(self.host.has_min_version(lv_ver, None, hv_type))
self.assertTrue(self.host.has_min_version(None, hv_ver, hv_type))
@mock.patch.object(fakelibvirt.virConnect, "getLibVersion")
@mock.patch.object(fakelibvirt.virConnect, "getVersion")
@mock.patch.object(fakelibvirt.virConnect, "getType")
def test_has_version(self, fake_hv_type, fake_hv_ver, fake_lv_ver):
fake_lv_ver.return_value = 1002003
fake_hv_ver.return_value = 4005006
fake_hv_type.return_value = 'xyz'
lv_ver = (1, 2, 3)
hv_ver = (4, 5, 6)
hv_type = 'xyz'
self.assertTrue(self.host.has_version(lv_ver, hv_ver, hv_type))
for lv_ver_ in [(1, 2, 2), (1, 2, 4)]:
self.assertFalse(self.host.has_version(lv_ver_, hv_ver, hv_type))
for hv_ver_ in [(4, 4, 6), (4, 6, 6)]:
self.assertFalse(self.host.has_version(lv_ver, hv_ver_, hv_type))
self.assertFalse(self.host.has_version(lv_ver, hv_ver, 'abc'))
self.assertTrue(self.host.has_min_version(lv_ver, hv_ver, None))
self.assertTrue(self.host.has_min_version(lv_ver, None, hv_type))
self.assertTrue(self.host.has_min_version(None, hv_ver, hv_type))
@mock.patch.object(fakelibvirt.virConnect, "lookupByID")
def test_get_domain_by_id(self, fake_lookup):
dom = fakelibvirt.virDomain(self.host.get_connection(),
"<domain id='7'/>")
fake_lookup.return_value = dom
self.assertEqual(dom, self.host._get_domain_by_id(7))
fake_lookup.assert_called_once_with(7)
@mock.patch.object(fakelibvirt.virConnect, "lookupByID")
def test_get_domain_by_id_raises(self, fake_lookup):
fake_lookup.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'Domain not found: no domain with matching id 7',
error_code=fakelibvirt.VIR_ERR_NO_DOMAIN,
error_domain=fakelibvirt.VIR_FROM_QEMU)
self.assertRaises(exception.InstanceNotFound,
self.host._get_domain_by_id,
7)
fake_lookup.assert_called_once_with(7)
@mock.patch.object(fakelibvirt.virConnect, "lookupByName")
def test_get_domain_by_name(self, fake_lookup):
dom = fakelibvirt.virDomain(self.host.get_connection(),
"<domain id='7'/>")
fake_lookup.return_value = dom
self.assertEqual(dom, self.host._get_domain_by_name("wibble"))
fake_lookup.assert_called_once_with("wibble")
@mock.patch.object(fakelibvirt.virConnect, "lookupByName")
def test_get_domain_by_name_raises(self, fake_lookup):
fake_lookup.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'Domain not found: no domain with matching name',
error_code=fakelibvirt.VIR_ERR_NO_DOMAIN,
error_domain=fakelibvirt.VIR_FROM_QEMU)
self.assertRaises(exception.InstanceNotFound,
self.host._get_domain_by_name,
"wibble")
fake_lookup.assert_called_once_with("wibble")
@mock.patch.object(host.Host, "_get_domain_by_name")
def test_get_domain(self, fake_get_domain):
dom = fakelibvirt.virDomain(self.host.get_connection(),
"<domain id='7'/>")
fake_get_domain.return_value = dom
instance = objects.Instance(id="124")
self.assertEqual(dom, self.host.get_domain(instance))
fake_get_domain.assert_called_once_with("instance-0000007c")
@mock.patch.object(host.Host, "_get_domain_by_name")
def test_get_guest(self, fake_get_domain):
dom = fakelibvirt.virDomain(self.host.get_connection(),
"<domain id='7'/>")
fake_get_domain.return_value = dom
instance = objects.Instance(id="124")
guest = self.host.get_guest(instance)
self.assertEqual(dom, guest._domain)
self.assertIsInstance(guest, libvirt_guest.Guest)
fake_get_domain.assert_called_once_with("instance-0000007c")
@mock.patch.object(fakelibvirt.Connection, "listAllDomains")
def test_list_instance_domains_fast(self, mock_list_all):
vm1 = FakeVirtDomain(id=3, name="instance00000001")
vm2 = FakeVirtDomain(id=17, name="instance00000002")
vm3 = FakeVirtDomain(name="instance00000003")
vm4 = FakeVirtDomain(name="instance00000004")
def fake_list_all(flags):
vms = []
if flags & fakelibvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE:
vms.extend([vm1, vm2])
if flags & fakelibvirt.VIR_CONNECT_LIST_DOMAINS_INACTIVE:
vms.extend([vm3, vm4])
return vms
mock_list_all.side_effect = fake_list_all
doms = self.host._list_instance_domains_fast()
mock_list_all.assert_called_once_with(
fakelibvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE)
mock_list_all.reset_mock()
self.assertEqual(len(doms), 2)
self.assertEqual(doms[0].name(), vm1.name())
self.assertEqual(doms[1].name(), vm2.name())
doms = self.host._list_instance_domains_fast(only_running=False)
mock_list_all.assert_called_once_with(
fakelibvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE |
fakelibvirt.VIR_CONNECT_LIST_DOMAINS_INACTIVE)
self.assertEqual(len(doms), 4)
self.assertEqual(doms[0].name(), vm1.name())
self.assertEqual(doms[1].name(), vm2.name())
self.assertEqual(doms[2].name(), vm3.name())
self.assertEqual(doms[3].name(), vm4.name())
@mock.patch.object(fakelibvirt.Connection, "numOfDomains")
@mock.patch.object(fakelibvirt.Connection, "listDefinedDomains")
@mock.patch.object(fakelibvirt.Connection, "listDomainsID")
@mock.patch.object(host.Host, "_get_domain_by_name")
@mock.patch.object(host.Host, "_get_domain_by_id")
def test_list_instance_domains_slow(self,
mock_get_id, mock_get_name,
mock_list_ids, mock_list_names,
mock_num_ids):
vm1 = FakeVirtDomain(id=3, name="instance00000001")
vm2 = FakeVirtDomain(id=17, name="instance00000002")
vm3 = FakeVirtDomain(name="instance00000003")
vm4 = FakeVirtDomain(name="instance00000004")
vms = [vm1, vm2, vm3, vm4]
def fake_get_domain_by_id(id):
for vm in vms:
if vm.ID() == id:
return vm
raise exception.InstanceNotFound(instance_id=id)
def fake_get_domain_by_name(name):
for vm in vms:
if vm.name() == name:
return vm
raise exception.InstanceNotFound(instance_id=name)
def fake_list_ids():
# Include one ID that no longer exists
return [vm1.ID(), vm2.ID(), 666]
def fake_list_names():
# Include one name that no longer exists and
# one dup from running list to show race in
# transition from inactive -> running
return [vm1.name(), vm3.name(), vm4.name(), "fishfood"]
mock_get_id.side_effect = fake_get_domain_by_id
mock_get_name.side_effect = fake_get_domain_by_name
mock_list_ids.side_effect = fake_list_ids
mock_list_names.side_effect = fake_list_names
mock_num_ids.return_value = 2
doms = self.host._list_instance_domains_slow()
mock_list_ids.assert_called_once_with()
mock_num_ids.assert_called_once_with()
self.assertFalse(mock_list_names.called)
mock_list_ids.reset_mock()
mock_list_names.reset_mock()
mock_num_ids.reset_mock()
self.assertEqual(len(doms), 2)
self.assertEqual(doms[0].name(), vm1.name())
self.assertEqual(doms[1].name(), vm2.name())
doms = self.host._list_instance_domains_slow(only_running=False)
mock_list_ids.assert_called_once_with()
mock_num_ids.assert_called_once_with()
mock_list_names.assert_called_once_with()
self.assertEqual(len(doms), 4)
self.assertEqual(doms[0].name(), vm1.name())
self.assertEqual(doms[1].name(), vm2.name())
self.assertEqual(doms[2].name(), vm3.name())
self.assertEqual(doms[3].name(), vm4.name())
@mock.patch.object(fakelibvirt.Connection, "listAllDomains")
@mock.patch.object(fakelibvirt.Connection, "numOfDomains")
@mock.patch.object(fakelibvirt.Connection, "listDomainsID")
@mock.patch.object(host.Host, "_get_domain_by_id")
def test_list_instance_domains_fallback(self,
mock_get_id, mock_list_ids,
mock_num_ids, mock_list_all):
vm1 = FakeVirtDomain(id=3, name="instance00000001")
vm2 = FakeVirtDomain(id=17, name="instance00000002")
vms = [vm1, vm2]
def fake_get_domain_by_id(id):
for vm in vms:
if vm.ID() == id:
return vm
raise exception.InstanceNotFound(instance_id=id)
def fake_list_doms():
return [vm1.ID(), vm2.ID()]
def fake_list_all(flags):
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"API is not supported",
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
raise ex
mock_get_id.side_effect = fake_get_domain_by_id
mock_list_ids.side_effect = fake_list_doms
mock_num_ids.return_value = 2
mock_list_all.side_effect = fake_list_all
doms = self.host.list_instance_domains()
mock_list_all.assert_called_once_with(
fakelibvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE)
mock_list_ids.assert_called_once_with()
mock_num_ids.assert_called_once_with()
self.assertEqual(len(doms), 2)
self.assertEqual(doms[0].ID(), vm1.ID())
self.assertEqual(doms[1].ID(), vm2.ID())
@mock.patch.object(host.Host, "_list_instance_domains_fast")
def test_list_instance_domains_filtering(self, mock_list):
vm0 = FakeVirtDomain(id=0, name="Domain-0") # Xen dom-0
vm1 = FakeVirtDomain(id=3, name="instance00000001")
vm2 = FakeVirtDomain(id=17, name="instance00000002")
vm3 = FakeVirtDomain(name="instance00000003")
vm4 = FakeVirtDomain(name="instance00000004")
mock_list.return_value = [vm0, vm1, vm2]
doms = self.host.list_instance_domains()
self.assertEqual(len(doms), 2)
self.assertEqual(doms[0].name(), vm1.name())
self.assertEqual(doms[1].name(), vm2.name())
mock_list.assert_called_with(True)
mock_list.return_value = [vm0, vm1, vm2, vm3, vm4]
doms = self.host.list_instance_domains(only_running=False)
self.assertEqual(len(doms), 4)
self.assertEqual(doms[0].name(), vm1.name())
self.assertEqual(doms[1].name(), vm2.name())
self.assertEqual(doms[2].name(), vm3.name())
self.assertEqual(doms[3].name(), vm4.name())
mock_list.assert_called_with(False)
mock_list.return_value = [vm0, vm1, vm2]
doms = self.host.list_instance_domains(only_guests=False)
self.assertEqual(len(doms), 3)
self.assertEqual(doms[0].name(), vm0.name())
self.assertEqual(doms[1].name(), vm1.name())
self.assertEqual(doms[2].name(), vm2.name())
mock_list.assert_called_with(True)
def test_cpu_features_bug_1217630(self):
self.host.get_connection()
# Test old version of libvirt, it shouldn't see the `aes' feature
with mock.patch('nova.virt.libvirt.host.libvirt') as mock_libvirt:
del mock_libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
caps = self.host.get_capabilities()
self.assertNotIn('aes', [x.name for x in caps.host.cpu.features])
# Cleanup the capabilities cache firstly
self.host._caps = None
# Test new version of libvirt, should find the `aes' feature
with mock.patch('nova.virt.libvirt.host.libvirt') as mock_libvirt:
mock_libvirt['VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'] = 1
caps = self.host.get_capabilities()
self.assertIn('aes', [x.name for x in caps.host.cpu.features])
def test_cpu_features_are_not_duplicated(self):
self.host.get_connection()
# Test old version of libvirt. Should return single 'hypervisor'
with mock.patch('nova.virt.libvirt.host.libvirt') as mock_libvirt:
del mock_libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
caps = self.host.get_capabilities()
cnt = [x.name for x in caps.host.cpu.features].count('xtpr')
self.assertEqual(1, cnt)
# Cleanup the capabilities cache firstly
self.host._caps = None
# Test new version of libvirt. Should still return single 'hypervisor'
with mock.patch('nova.virt.libvirt.host.libvirt') as mock_libvirt:
mock_libvirt['VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'] = 1
caps = self.host.get_capabilities()
cnt = [x.name for x in caps.host.cpu.features].count('xtpr')
self.assertEqual(1, cnt)
def test_baseline_cpu_not_supported(self):
# Handle just the NO_SUPPORT error
not_supported_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'this function is not supported by the connection driver:'
' virConnectBaselineCPU',
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
with mock.patch.object(fakelibvirt.virConnect, 'baselineCPU',
side_effect=not_supported_exc):
caps = self.host.get_capabilities()
self.assertEqual(vconfig.LibvirtConfigCaps, type(caps))
self.assertNotIn('aes', [x.name for x in caps.host.cpu.features])
# Clear cached result so we can test again...
self.host._caps = None
# Other errors should not be caught
other_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'other exc',
error_code=fakelibvirt.VIR_ERR_NO_DOMAIN)
with mock.patch.object(fakelibvirt.virConnect, 'baselineCPU',
side_effect=other_exc):
self.assertRaises(fakelibvirt.libvirtError,
self.host.get_capabilities)
def test_lxc_get_host_capabilities_failed(self):
with mock.patch.object(fakelibvirt.virConnect, 'baselineCPU',
return_value=-1):
caps = self.host.get_capabilities()
self.assertEqual(vconfig.LibvirtConfigCaps, type(caps))
self.assertNotIn('aes', [x.name for x in caps.host.cpu.features])
def test_get_capabilities_no_host_cpu_model(self):
"""Tests that cpu features are not retrieved when the host cpu model
is not in the capabilities.
"""
fake_caps_xml = '''
<capabilities>
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>x86_64</arch>
<vendor>Intel</vendor>
</cpu>
</host>
</capabilities>'''
with mock.patch.object(fakelibvirt.virConnect, 'getCapabilities',
return_value=fake_caps_xml):
caps = self.host.get_capabilities()
self.assertEqual(vconfig.LibvirtConfigCaps, type(caps))
self.assertIsNone(caps.host.cpu.model)
self.assertEqual(0, len(caps.host.cpu.features))
@mock.patch.object(fakelibvirt.virConnect, "getHostname")
def test_get_hostname_caching(self, mock_hostname):
mock_hostname.return_value = "foo"
self.assertEqual('foo', self.host.get_hostname())
mock_hostname.assert_called_with()
mock_hostname.reset_mock()
mock_hostname.return_value = "bar"
self.assertEqual('foo', self.host.get_hostname())
mock_hostname.assert_called_with()
@mock.patch.object(fakelibvirt.virConnect, "getType")
def test_get_driver_type(self, mock_type):
mock_type.return_value = "qemu"
self.assertEqual("qemu", self.host.get_driver_type())
mock_type.assert_called_once_with()
@mock.patch.object(fakelibvirt.virConnect, "getVersion")
def test_get_version(self, mock_version):
mock_version.return_value = 1005001
self.assertEqual(1005001, self.host.get_version())
mock_version.assert_called_once_with()
@mock.patch.object(fakelibvirt.virConnect, "secretLookupByUsage")
def test_find_secret(self, mock_sec):
"""finding secrets with various usage_type."""
expected = [
mock.call(fakelibvirt.VIR_SECRET_USAGE_TYPE_CEPH, 'rbdvol'),
mock.call(fakelibvirt.VIR_SECRET_USAGE_TYPE_CEPH, 'cephvol'),
mock.call(fakelibvirt.VIR_SECRET_USAGE_TYPE_ISCSI, 'iscsivol'),
mock.call(fakelibvirt.VIR_SECRET_USAGE_TYPE_VOLUME, 'vol')]
self.host.find_secret('rbd', 'rbdvol')
self.host.find_secret('ceph', 'cephvol')
self.host.find_secret('iscsi', 'iscsivol')
self.host.find_secret('volume', 'vol')
self.assertEqual(expected, mock_sec.mock_calls)
self.assertRaises(exception.NovaException,
self.host.find_secret, "foo", "foovol")
mock_sec.side_effect = fakelibvirt.libvirtError("")
mock_sec.side_effect.err = (66, )
self.assertIsNone(self.host.find_secret('rbd', 'rbdvol'))
@mock.patch.object(fakelibvirt.virConnect, "secretDefineXML")
def test_create_secret(self, mock_sec):
"""creating secrets with various usage_type."""
self.host.create_secret('rbd', 'rbdvol')
self.host.create_secret('ceph', 'cephvol')
self.host.create_secret('iscsi', 'iscsivol')
self.host.create_secret('volume', 'vol')
self.assertRaises(exception.NovaException,
self.host.create_secret, "foo", "foovol")
secret = mock.MagicMock()
mock_sec.return_value = secret
self.host.create_secret('iscsi', 'iscsivol', password="foo")
secret.setValue.assert_called_once_with("foo")
@mock.patch('nova.virt.libvirt.host.Host.find_secret')
def test_delete_secret(self, mock_find_secret):
"""deleting secret."""
secret = mock.MagicMock()
mock_find_secret.return_value = secret
expected = [mock.call('rbd', 'rbdvol'),
mock.call().undefine()]
self.host.delete_secret('rbd', 'rbdvol')
self.assertEqual(expected, mock_find_secret.mock_calls)
mock_find_secret.return_value = None
self.host.delete_secret("rbd", "rbdvol")
def test_get_cpu_count(self):
with mock.patch.object(host.Host, "get_connection") as mock_conn:
mock_conn().getInfo.return_value = ['zero', 'one', 'two']
self.assertEqual('two', self.host.get_cpu_count())
def test_get_memory_total(self):
with mock.patch.object(host.Host, "get_connection") as mock_conn:
mock_conn().getInfo.return_value = ['zero', 'one', 'two']
self.assertEqual('one', self.host.get_memory_mb_total())
def test_get_memory_used(self):
m = mock.mock_open(read_data="""
MemTotal: 16194180 kB
MemFree: 233092 kB
MemAvailable: 8892356 kB
Buffers: 567708 kB
Cached: 8362404 kB
SwapCached: 0 kB
Active: 8381604 kB
""")
with contextlib.nested(
mock.patch("__builtin__.open", m, create=True),
mock.patch.object(host.Host,
"get_connection"),
mock.patch('sys.platform', 'linux2'),
) as (mock_file, mock_conn, mock_platform):
mock_conn().getInfo.return_value = [
arch.X86_64, 15814, 8, 1208, 1, 1, 4, 2]
self.assertEqual(6866, self.host.get_memory_mb_used())
def test_get_memory_used_xen(self):
self.flags(virt_type='xen', group='libvirt')
class DiagFakeDomain(object):
def __init__(self, id, memmb):
self.id = id
self.memmb = memmb
def info(self):
return [0, 0, self.memmb * 1024]
def ID(self):
return self.id
def name(self):
return "instance000001"
def UUIDString(self):
return str(uuid.uuid4())
m = mock.mock_open(read_data="""
MemTotal: 16194180 kB
MemFree: 233092 kB
MemAvailable: 8892356 kB
Buffers: 567708 kB
Cached: 8362404 kB
SwapCached: 0 kB
Active: 8381604 kB
""")
with contextlib.nested(
mock.patch("__builtin__.open", m, create=True),
mock.patch.object(host.Host,
"list_instance_domains"),
mock.patch.object(libvirt_driver.LibvirtDriver,
"_conn"),
mock.patch('sys.platform', 'linux2'),
) as (mock_file, mock_list, mock_conn, mock_platform):
mock_list.return_value = [
DiagFakeDomain(0, 15814),
DiagFakeDomain(1, 750),
DiagFakeDomain(2, 1042)]
mock_conn.getInfo.return_value = [
arch.X86_64, 15814, 8, 1208, 1, 1, 4, 2]
self.assertEqual(8657, self.host.get_memory_mb_used())
mock_list.assert_called_with(only_guests=False)
def test_get_cpu_stats(self):
stats = self.host.get_cpu_stats()
self.assertEqual(
{'kernel': 5664160000000,
'idle': 1592705190000000,
'frequency': 800,
'user': 26728850000000,
'iowait': 6121490000000},
stats)
@mock.patch.object(fakelibvirt.virConnect, "defineXML")
def test_write_instance_config(self, mock_defineXML):
xml = "<x><name>foo</name></x>"
self.host.write_instance_config(xml)
mock_defineXML.assert_called_once_with(xml)
@mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName")
def test_device_lookup_by_name(self, mock_nodeDeviceLookupByName):
self.host.device_lookup_by_name("foo")
mock_nodeDeviceLookupByName.assert_called_once_with("foo")
@mock.patch.object(fakelibvirt.virConnect, "listDevices")
def test_list_pci_devices(self, mock_listDevices):
self.host.list_pci_devices(8)
mock_listDevices.assert_called_once_with('pci', 8)
@mock.patch.object(fakelibvirt.virConnect, "compareCPU")
def test_compare_cpu(self, mock_compareCPU):
self.host.compare_cpu("cpuxml")
mock_compareCPU.assert_called_once_with("cpuxml", 0)
class DomainJobInfoTestCase(test.NoDBTestCase):
def setUp(self):
super(DomainJobInfoTestCase, self).setUp()
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.conn = fakelibvirt.openAuth("qemu:///system",
[[], lambda: True])
xml = ("<domain type='kvm'>"
" <name>instance-0000000a</name>"
"</domain>")
self.dom = self.conn.createXML(xml, 0)
host.DomainJobInfo._have_job_stats = True
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_stats(self, mock_stats, mock_info):
mock_stats.return_value = {
"type": fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED,
"memory_total": 75,
"memory_processed": 50,
"memory_remaining": 33,
"some_new_libvirt_stat_we_dont_know_about": 83
}
info = host.DomainJobInfo.for_domain(self.dom)
self.assertIsInstance(info, host.DomainJobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, info.type)
self.assertEqual(75, info.memory_total)
self.assertEqual(50, info.memory_processed)
self.assertEqual(33, info.memory_remaining)
self.assertEqual(0, info.disk_total)
self.assertEqual(0, info.disk_processed)
self.assertEqual(0, info.disk_remaining)
mock_stats.assert_called_once_with()
self.assertFalse(mock_info.called)
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_info_no_support(self, mock_stats, mock_info):
mock_stats.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"virDomainGetJobStats not implemented",
fakelibvirt.VIR_ERR_NO_SUPPORT)
mock_info.return_value = [
fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED,
100, 99, 10, 11, 12, 75, 50, 33, 1, 2, 3]
info = host.DomainJobInfo.for_domain(self.dom)
self.assertIsInstance(info, host.DomainJobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, info.type)
self.assertEqual(100, info.time_elapsed)
self.assertEqual(99, info.time_remaining)
self.assertEqual(10, info.data_total)
self.assertEqual(11, info.data_processed)
self.assertEqual(12, info.data_remaining)
self.assertEqual(75, info.memory_total)
self.assertEqual(50, info.memory_processed)
self.assertEqual(33, info.memory_remaining)
self.assertEqual(1, info.disk_total)
self.assertEqual(2, info.disk_processed)
self.assertEqual(3, info.disk_remaining)
mock_stats.assert_called_once_with()
mock_info.assert_called_once_with()
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_info_attr_error(self, mock_stats, mock_info):
mock_stats.side_effect = AttributeError("No such API")
mock_info.return_value = [
fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED,
100, 99, 10, 11, 12, 75, 50, 33, 1, 2, 3]
info = host.DomainJobInfo.for_domain(self.dom)
self.assertIsInstance(info, host.DomainJobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, info.type)
self.assertEqual(100, info.time_elapsed)
self.assertEqual(99, info.time_remaining)
self.assertEqual(10, info.data_total)
self.assertEqual(11, info.data_processed)
self.assertEqual(12, info.data_remaining)
self.assertEqual(75, info.memory_total)
self.assertEqual(50, info.memory_processed)
self.assertEqual(33, info.memory_remaining)
self.assertEqual(1, info.disk_total)
self.assertEqual(2, info.disk_processed)
self.assertEqual(3, info.disk_remaining)
mock_stats.assert_called_once_with()
mock_info.assert_called_once_with()
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_stats_no_domain(self, mock_stats, mock_info):
mock_stats.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"No such domain with UUID blah",
fakelibvirt.VIR_ERR_NO_DOMAIN)
info = host.DomainJobInfo.for_domain(self.dom)
self.assertIsInstance(info, host.DomainJobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_COMPLETED, info.type)
self.assertEqual(0, info.time_elapsed)
self.assertEqual(0, info.time_remaining)
self.assertEqual(0, info.memory_total)
self.assertEqual(0, info.memory_processed)
self.assertEqual(0, info.memory_remaining)
mock_stats.assert_called_once_with()
self.assertFalse(mock_info.called)
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_info_no_domain(self, mock_stats, mock_info):
mock_stats.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"virDomainGetJobStats not implemented",
fakelibvirt.VIR_ERR_NO_SUPPORT)
mock_info.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"No such domain with UUID blah",
fakelibvirt.VIR_ERR_NO_DOMAIN)
info = host.DomainJobInfo.for_domain(self.dom)
self.assertIsInstance(info, host.DomainJobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_COMPLETED, info.type)
self.assertEqual(0, info.time_elapsed)
self.assertEqual(0, info.time_remaining)
self.assertEqual(0, info.memory_total)
self.assertEqual(0, info.memory_processed)
self.assertEqual(0, info.memory_remaining)
mock_stats.assert_called_once_with()
mock_info.assert_called_once_with()
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_stats_operation_invalid(self, mock_stats, mock_info):
mock_stats.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"Domain is not running",
fakelibvirt.VIR_ERR_OPERATION_INVALID)
info = host.DomainJobInfo.for_domain(self.dom)
self.assertIsInstance(info, host.DomainJobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_COMPLETED, info.type)
self.assertEqual(0, info.time_elapsed)
self.assertEqual(0, info.time_remaining)
self.assertEqual(0, info.memory_total)
self.assertEqual(0, info.memory_processed)
self.assertEqual(0, info.memory_remaining)
mock_stats.assert_called_once_with()
self.assertFalse(mock_info.called)
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_info_operation_invalid(self, mock_stats, mock_info):
mock_stats.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"virDomainGetJobStats not implemented",
fakelibvirt.VIR_ERR_NO_SUPPORT)
mock_info.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"Domain is not running",
fakelibvirt.VIR_ERR_OPERATION_INVALID)
info = host.DomainJobInfo.for_domain(self.dom)
self.assertIsInstance(info, host.DomainJobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_COMPLETED, info.type)
self.assertEqual(0, info.time_elapsed)
self.assertEqual(0, info.time_remaining)
self.assertEqual(0, info.memory_total)
self.assertEqual(0, info.memory_processed)
self.assertEqual(0, info.memory_remaining)
mock_stats.assert_called_once_with()
mock_info.assert_called_once_with()
| |
# -*- coding: utf-8 -*-
import json
import re
from distutils.version import LooseVersion
from django.conf.urls import url
from django.contrib.admin.utils import unquote
from django.core import signing
from django.core.exceptions import PermissionDenied, ValidationError
from django.db import transaction
from django.forms.fields import CharField
from django.http import (
Http404, HttpResponse, HttpResponseBadRequest, HttpResponseForbidden,
HttpResponseRedirect,
)
from django.shortcuts import get_object_or_404
from django.template import RequestContext
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.encoding import force_text
from django.utils.translation import ugettext
from django.views.decorators.clickjacking import xframe_options_sameorigin
from django.views.decorators.http import require_POST
import cms
from cms.models import CMSPlugin
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.utils.placeholder import get_toolbar_plugin_struct
from cms.utils.urlutils import admin_reverse
from six import text_type
from . import settings
from .forms import (
ActionTokenValidationForm, DeleteOnCancelForm, RenderPluginForm, TextForm,
)
from .models import Text
from .utils import (
OBJ_ADMIN_WITH_CONTENT_RE_PATTERN, _plugin_tags_to_html,
plugin_tags_to_admin_html, plugin_tags_to_id_list,
plugin_tags_to_user_html, plugin_to_tag, random_comment_exempt,
replace_plugin_tags,
)
from .widgets import TextEditorWidget
CMS_34 = LooseVersion(cms.__version__) >= LooseVersion('3.4')
def _user_can_change_placeholder(request, placeholder):
if CMS_34:
return placeholder.has_change_permission(request.user)
return placeholder.has_change_permission(request)
def post_add_plugin(operation, **kwargs):
from djangocms_history.actions import ADD_PLUGIN
from djangocms_history.helpers import get_bound_plugins, get_plugin_data
from djangocms_history.models import dump_json
text_plugin = kwargs['plugin']
new_plugin_ids = set(text_plugin._get_inline_plugin_ids())
if not new_plugin_ids:
# User has not embedded any plugins on the text
return
new_plugins = CMSPlugin.objects.filter(pk__in=new_plugin_ids)
new_plugins = get_bound_plugins(new_plugins)
# Extend the recorded added plugins to include the inline plugins (if any)
action = operation.actions.only('post_action_data').get(action=ADD_PLUGIN, order=1)
post_data = json.loads(action.post_action_data)
post_data['plugins'].extend(get_plugin_data(plugin) for plugin in new_plugins)
action.post_action_data = dump_json(post_data)
action.save(update_fields=['post_action_data'])
def pre_change_plugin(operation, **kwargs):
from djangocms_history.actions import ADD_PLUGIN, DELETE_PLUGIN
from djangocms_history.helpers import get_bound_plugins, get_plugin_data
old_text_plugin = kwargs['old_plugin']
old_plugin_ids = set(old_text_plugin._get_inline_plugin_ids())
new_text_plugin = kwargs['new_plugin']
new_plugin_ids = set(new_text_plugin._get_inline_plugin_ids())
added_plugin_ids = new_plugin_ids.difference(old_plugin_ids)
deleted_plugin_ids = old_plugin_ids.difference(new_plugin_ids)
plugin_ids = added_plugin_ids | deleted_plugin_ids
if added_plugin_ids == deleted_plugin_ids:
# User has not added or removed embedded plugins
return
order = 1
# This app is a special case.
# We know the old and new tree orders because inline plugins
# have already been set on the database when this pre operation
# is executed.
old_tree = (
old_text_plugin
.cmsplugin_set
.filter(pk__in=old_plugin_ids)
.order_by('position')
.values_list('pk', flat=True)
)
old_tree = list(old_tree)
new_tree = (
new_text_plugin
.cmsplugin_set
.filter(pk__in=new_plugin_ids)
.order_by('position')
.values_list('pk', flat=True)
)
new_tree = list(new_tree)
plugins = CMSPlugin.objects.filter(pk__in=plugin_ids)
bound_plugins = list(get_bound_plugins(plugins))
if added_plugin_ids:
order += 1
pre_action_data = {
'order': old_tree,
'parent_id': old_text_plugin.pk,
}
post_plugin_data = [get_plugin_data(plugin) for plugin in bound_plugins
if plugin.pk in added_plugin_ids]
post_action_data = {
'order': new_tree,
'parent_id': old_text_plugin.pk,
'plugins': post_plugin_data,
}
operation.create_action(
action=ADD_PLUGIN,
language=old_text_plugin.language,
placeholder=kwargs['placeholder'],
pre_data=pre_action_data,
post_data=post_action_data,
order=order,
)
if deleted_plugin_ids:
order += 1
deleted_plugins = [plugin for plugin in bound_plugins if plugin.pk in deleted_plugin_ids]
pre_plugin_data = [get_plugin_data(plugin) for plugin in deleted_plugins]
pre_action_data = {
'order': old_tree,
'parent_id': old_text_plugin.pk,
'plugins': pre_plugin_data,
}
post_plugin_data = [get_plugin_data(plugin, only_meta=True) for plugin in deleted_plugins]
post_action_data = {
'order': new_tree,
'parent_id': old_text_plugin.pk,
'plugins': post_plugin_data,
}
operation.create_action(
action=DELETE_PLUGIN,
language=old_text_plugin.language,
placeholder=kwargs['placeholder'],
pre_data=pre_action_data,
post_data=post_action_data,
order=order,
)
class TextPlugin(CMSPluginBase):
model = Text
name = settings.TEXT_PLUGIN_NAME
module = settings.TEXT_PLUGIN_MODULE_NAME
form = TextForm
render_template = 'cms/plugins/text.html'
change_form_template = 'cms/plugins/text_plugin_change_form.html'
ckeditor_configuration = settings.TEXT_CKEDITOR_CONFIGURATION
disable_child_plugins = True
# These are executed by the djangocms-history app
# We use them to inject inline plugin data
operation_handler_callbacks = {
'post_add_plugin': post_add_plugin,
'pre_change_plugin': pre_change_plugin,
}
if CMS_34:
# On django CMS 3.5 this attribute is set automatically
# when do_post_copy is defined in the plugin class.
_has_do_post_copy = True
@classmethod
def do_post_copy(self, instance, source_map):
ids = plugin_tags_to_id_list(instance.body)
ids_map = {pk: source_map[pk].pk for pk in ids if pk in source_map}
new_text = replace_plugin_tags(instance.body, ids_map)
self.model.objects.filter(pk=instance.pk).update(body=new_text)
@staticmethod
def get_translation_export_content(field, plugin_data):
def _render_plugin_with_content(obj, match):
from djangocms_translations.utils import get_text_field_child_label
field = get_text_field_child_label(obj.plugin_type)
content = getattr(obj, field) if field else ''
return plugin_to_tag(obj, content)
content = _plugin_tags_to_html(plugin_data[field], output_func=_render_plugin_with_content)
subplugins_within_this_content = plugin_tags_to_id_list(content)
return content, subplugins_within_this_content
@staticmethod
def set_translation_import_content(content, plugin):
data = [x.groups() for x in re.finditer(OBJ_ADMIN_WITH_CONTENT_RE_PATTERN, content)]
data = {int(pk): value for pk, value in data}
return {
subplugin_id: data[subplugin_id]
for subplugin_id in plugin_tags_to_id_list(content)
}
def get_editor_widget(self, request, plugins, plugin):
"""
Returns the Django form Widget to be used for
the text area
"""
cancel_url_name = self.get_admin_url_name('delete_on_cancel')
cancel_url = reverse('admin:%s' % cancel_url_name)
render_plugin_url_name = self.get_admin_url_name('render_plugin')
render_plugin_url = reverse('admin:%s' % render_plugin_url_name)
action_token = self.get_action_token(request, plugin)
# should we delete the text plugin when
# the user cancels?
delete_text_on_cancel = (
'delete-on-cancel' in request.GET and # noqa
not plugin.get_plugin_instance()[0]
)
widget = TextEditorWidget(
installed_plugins=plugins, pk=plugin.pk,
placeholder=plugin.placeholder,
plugin_language=plugin.language,
configuration=self.ckeditor_configuration,
render_plugin_url=render_plugin_url,
cancel_url=cancel_url,
action_token=action_token,
delete_on_cancel=delete_text_on_cancel,
)
return widget
def get_form_class(self, request, plugins, plugin):
"""
Returns a subclass of Form to be used by this plugin
"""
widget = self.get_editor_widget(
request=request,
plugins=plugins,
plugin=plugin,
)
instance = plugin.get_plugin_instance()[0]
if instance:
context = RequestContext(request)
context['request'] = request
rendered_text = plugin_tags_to_admin_html(
text=instance.body,
context=context,
)
else:
rendered_text = None
# We avoid mutating the Form declared above by subclassing
class TextPluginForm(self.form):
body = CharField(widget=widget, required=False)
def __init__(self, *args, **kwargs):
initial = kwargs.pop('initial', {})
if rendered_text:
initial['body'] = rendered_text
super(TextPluginForm, self).__init__(*args, initial=initial, **kwargs)
return TextPluginForm
@xframe_options_sameorigin
def add_view(self, request, form_url='', extra_context=None):
if 'plugin' in request.GET:
# CMS >= 3.4 compatibility
self.cms_plugin_instance = self._get_plugin_or_404(request.GET['plugin'])
if getattr(self, 'cms_plugin_instance', None):
# This can happen if the user did not properly cancel the plugin
# and so a "ghost" plugin instance is left over.
# The instance is a record that points to the Text plugin
# but is not a real text plugin instance.
return super(TextPlugin, self).add_view(
request, form_url, extra_context
)
if not self.has_add_permission(request):
# this permission check is done by Django on the normal
# workflow of adding a plugin.
# This is NOT the normal workflow because we create a plugin
# on GET request to the /add/ endpoint and so we bypass
# django's add_view, thus bypassing permission check.
message = ugettext('You do not have permission to add a plugin.')
return HttpResponseForbidden(force_text(message))
try:
# CMS 3.3 compatibility
data = self.validate_add_request(request)
except AttributeError:
# CMS >= 3.4 compatibility
_data = self._cms_initial_attributes
data = {
'plugin_language': _data['language'],
'placeholder_id': _data['placeholder'],
'parent': _data['parent'],
'position': _data['position'],
'plugin_type': _data['plugin_type'],
'plugin_parent': _data['parent'],
}
except PermissionDenied:
message = ugettext('You do not have permission to add a plugin.')
return HttpResponseForbidden(force_text(message))
except ValidationError as error:
return HttpResponseBadRequest(error.message)
# Sadly we have to create the CMSPlugin record on add GET request
# because we need this record in order to allow the user to add
# child plugins to the text (image, link, etc..)
plugin = CMSPlugin.objects.create(
language=data['plugin_language'],
plugin_type=data['plugin_type'],
position=data['position'],
placeholder=data['placeholder_id'],
parent=data.get('plugin_parent'),
)
query = request.GET.copy()
query['plugin'] = text_type(plugin.pk)
success_url = admin_reverse('cms_page_add_plugin')
# Because we've created the cmsplugin record
# we need to delete the plugin when a user cancels.
success_url += '?delete-on-cancel&' + query.urlencode()
return HttpResponseRedirect(success_url)
def get_plugin_urls(self):
def pattern(regex, func):
name = self.get_admin_url_name(func.__name__)
return url(regex, func, name=name)
url_patterns = [
pattern(r'^render-plugin/$', self.render_plugin),
pattern(r'^delete-on-cancel/$', self.delete_on_cancel),
]
return url_patterns
def get_admin_url_name(self, name):
plugin_type = self.__class__.__name__.lower()
url_name = '%s_%s_%s' % (self.model._meta.app_label, plugin_type, name)
return url_name
def _get_text_plugin_from_request(self, request, data):
if not (request.user.is_active and request.user.is_staff):
raise PermissionDenied
form = ActionTokenValidationForm(data)
if form.is_valid():
session_key = request.session.session_key
text_plugin_id = form.get_id_from_token(session_key)
if text_plugin_id:
return self._get_plugin_or_404(text_plugin_id)
message = ugettext('Unable to process your request. Invalid token.')
raise ValidationError(message=force_text(message))
@random_comment_exempt
@xframe_options_sameorigin
def render_plugin(self, request):
try:
text_plugin = self._get_text_plugin_from_request(request, data=request.GET)
except ValidationError as error:
return HttpResponseBadRequest(error.message)
form = RenderPluginForm(request.GET, text_plugin=text_plugin)
if not form.is_valid():
message = ugettext('Unable to process your request.')
return HttpResponseBadRequest(message)
plugin_class = text_plugin.get_plugin_class_instance()
# The following is needed for permission checking
plugin_class.opts = plugin_class.model._meta
if not (plugin_class.has_change_permission(request, obj=text_plugin) and # noqa
_user_can_change_placeholder(request, text_plugin.placeholder)):
raise PermissionDenied
return HttpResponse(form.render_plugin(request))
@method_decorator(require_POST)
@xframe_options_sameorigin
@transaction.atomic
def delete_on_cancel(self, request):
# This view is responsible for deleting a plugin
# bypassing the delete permissions.
try:
text_plugin = self._get_text_plugin_from_request(request, data=request.POST)
except ValidationError as error:
return HttpResponseBadRequest(error.message)
# This form validates the the given plugin is a child
# of the text plugin or is a text plugin.
# If the plugin is a child then we validate that this child
# is not present in the text plugin (because then it's not a cancel).
# If the plugin is a text plugin then we validate that the text
# plugin does NOT have a real instance attached.
form = DeleteOnCancelForm(request.POST, text_plugin=text_plugin)
if not form.is_valid():
message = ugettext('Unable to process your request.')
return HttpResponseBadRequest(message)
plugin_class = text_plugin.get_plugin_class_instance()
# The following is needed for permission checking
plugin_class.opts = plugin_class.model._meta
# Check for add permissions because this view is meant
# only for plugins created through the ckeditor
# and the ckeditor plugin itself.
if not (plugin_class.has_add_permission(request) and # noqa
_user_can_change_placeholder(request, text_plugin.placeholder)):
raise PermissionDenied
# Token is validated after checking permissions
# to avoid non-auth users from triggering validation mechanism.
form.delete()
# 204 -> request was successful but no response returned.
return HttpResponse(status=204)
@classmethod
def get_child_plugin_candidates(cls, slot, page):
# This plugin can only have text_enabled plugins
# as children.
text_enabled_plugins = plugin_pool.get_text_enabled_plugins(
placeholder=slot,
page=page,
)
return text_enabled_plugins
def get_form(self, request, obj=None, **kwargs):
plugin = getattr(self, 'cms_plugin_instance', None) or obj
get_plugin = plugin_pool.get_plugin
child_plugin_types = self.get_child_classes(
slot=plugin.placeholder.slot,
page=self.page,
)
child_plugins = (get_plugin(name) for name in child_plugin_types)
plugins = get_toolbar_plugin_struct(
child_plugins,
plugin.placeholder.slot,
self.page,
)
form = self.get_form_class(
request=request,
plugins=plugins,
plugin=plugin,
)
kwargs['form'] = form # override standard form
return super(TextPlugin, self).get_form(request, obj, **kwargs)
def render(self, context, instance, placeholder):
context.update({
'body': plugin_tags_to_user_html(
instance.body,
context,
),
'placeholder': placeholder,
'object': instance
})
return context
def save_model(self, request, obj, form, change):
if getattr(self, 'cms_plugin_instance', None):
# Because the plugin was created by manually
# creating the CMSPlugin record, it's important
# to assign all the values from the CMSPlugin record
# to the real "non ghost" instance.
fields = self.cms_plugin_instance._meta.fields
for field in fields:
# assign all the fields - we can do this, because object is
# subclassing cms_plugin_instance (one to one relation)
value = getattr(self.cms_plugin_instance, field.name)
setattr(obj, field.name, value)
super(TextPlugin, self).save_model(request, obj, form, change)
# This must come after calling save
# If `clean_plugins()` deletes child plugins, django-treebeard will call
# save() again on the Text instance (aka obj in this context) to update mptt values (numchild, etc).
# See this ticket for details https://github.com/divio/djangocms-text-ckeditor/issues/212
obj.clean_plugins()
def get_action_token(self, request, obj):
plugin_id = force_text(obj.pk)
# salt is different for every user
signer = signing.Signer(salt=request.session.session_key)
return signer.sign(plugin_id)
def _get_plugin_or_404(self, pk):
plugin_type = self.__class__.__name__
plugins = (
CMSPlugin
.objects
.select_related('placeholder', 'parent')
.filter(plugin_type=plugin_type)
)
field = self.model._meta.pk
try:
object_id = field.to_python(unquote(pk))
except (ValidationError, ValueError):
raise Http404('Invalid plugin id')
return get_object_or_404(plugins, pk=object_id)
plugin_pool.register_plugin(TextPlugin)
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.net.proto import ProtocolBuffer
import array
import dummy_thread as thread
__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
unusednames=printElemNumber,debug_strs no-special"""
from google.appengine.api.api_base_pb import *
class BlobstoreServiceError(ProtocolBuffer.ProtocolMessage):
OK = 0
INTERNAL_ERROR = 1
URL_TOO_LONG = 2
PERMISSION_DENIED = 3
BLOB_NOT_FOUND = 4
DATA_INDEX_OUT_OF_RANGE = 5
BLOB_FETCH_SIZE_TOO_LARGE = 6
_ErrorCode_NAMES = {
0: "OK",
1: "INTERNAL_ERROR",
2: "URL_TOO_LONG",
3: "PERMISSION_DENIED",
4: "BLOB_NOT_FOUND",
5: "DATA_INDEX_OUT_OF_RANGE",
6: "BLOB_FETCH_SIZE_TOO_LARGE",
}
def ErrorCode_Name(cls, x): return cls._ErrorCode_NAMES.get(x, "")
ErrorCode_Name = classmethod(ErrorCode_Name)
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n + 0
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class CreateUploadURLRequest(ProtocolBuffer.ProtocolMessage):
has_success_path_ = 0
success_path_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def success_path(self): return self.success_path_
def set_success_path(self, x):
self.has_success_path_ = 1
self.success_path_ = x
def clear_success_path(self):
if self.has_success_path_:
self.has_success_path_ = 0
self.success_path_ = ""
def has_success_path(self): return self.has_success_path_
def MergeFrom(self, x):
assert x is not self
if (x.has_success_path()): self.set_success_path(x.success_path())
def Equals(self, x):
if x is self: return 1
if self.has_success_path_ != x.has_success_path_: return 0
if self.has_success_path_ and self.success_path_ != x.success_path_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_success_path_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: success_path not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.success_path_))
return n + 1
def Clear(self):
self.clear_success_path()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.success_path_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_success_path(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_success_path_: res+=prefix+("success_path: %s\n" % self.DebugFormatString(self.success_path_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
ksuccess_path = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "success_path",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class CreateUploadURLResponse(ProtocolBuffer.ProtocolMessage):
has_url_ = 0
url_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def url(self): return self.url_
def set_url(self, x):
self.has_url_ = 1
self.url_ = x
def clear_url(self):
if self.has_url_:
self.has_url_ = 0
self.url_ = ""
def has_url(self): return self.has_url_
def MergeFrom(self, x):
assert x is not self
if (x.has_url()): self.set_url(x.url())
def Equals(self, x):
if x is self: return 1
if self.has_url_ != x.has_url_: return 0
if self.has_url_ and self.url_ != x.url_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_url_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: url not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.url_))
return n + 1
def Clear(self):
self.clear_url()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.url_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_url(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_url_: res+=prefix+("url: %s\n" % self.DebugFormatString(self.url_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kurl = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "url",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class DeleteBlobRequest(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
self.blob_key_ = []
if contents is not None: self.MergeFromString(contents)
def blob_key_size(self): return len(self.blob_key_)
def blob_key_list(self): return self.blob_key_
def blob_key(self, i):
return self.blob_key_[i]
def set_blob_key(self, i, x):
self.blob_key_[i] = x
def add_blob_key(self, x):
self.blob_key_.append(x)
def clear_blob_key(self):
self.blob_key_ = []
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.blob_key_size()): self.add_blob_key(x.blob_key(i))
def Equals(self, x):
if x is self: return 1
if len(self.blob_key_) != len(x.blob_key_): return 0
for e1, e2 in zip(self.blob_key_, x.blob_key_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.blob_key_)
for i in xrange(len(self.blob_key_)): n += self.lengthString(len(self.blob_key_[i]))
return n + 0
def Clear(self):
self.clear_blob_key()
def OutputUnchecked(self, out):
for i in xrange(len(self.blob_key_)):
out.putVarInt32(10)
out.putPrefixedString(self.blob_key_[i])
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.add_blob_key(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.blob_key_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("blob_key%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kblob_key = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "blob_key",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class FetchDataRequest(ProtocolBuffer.ProtocolMessage):
has_blob_key_ = 0
blob_key_ = ""
has_start_index_ = 0
start_index_ = 0
has_end_index_ = 0
end_index_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def blob_key(self): return self.blob_key_
def set_blob_key(self, x):
self.has_blob_key_ = 1
self.blob_key_ = x
def clear_blob_key(self):
if self.has_blob_key_:
self.has_blob_key_ = 0
self.blob_key_ = ""
def has_blob_key(self): return self.has_blob_key_
def start_index(self): return self.start_index_
def set_start_index(self, x):
self.has_start_index_ = 1
self.start_index_ = x
def clear_start_index(self):
if self.has_start_index_:
self.has_start_index_ = 0
self.start_index_ = 0
def has_start_index(self): return self.has_start_index_
def end_index(self): return self.end_index_
def set_end_index(self, x):
self.has_end_index_ = 1
self.end_index_ = x
def clear_end_index(self):
if self.has_end_index_:
self.has_end_index_ = 0
self.end_index_ = 0
def has_end_index(self): return self.has_end_index_
def MergeFrom(self, x):
assert x is not self
if (x.has_blob_key()): self.set_blob_key(x.blob_key())
if (x.has_start_index()): self.set_start_index(x.start_index())
if (x.has_end_index()): self.set_end_index(x.end_index())
def Equals(self, x):
if x is self: return 1
if self.has_blob_key_ != x.has_blob_key_: return 0
if self.has_blob_key_ and self.blob_key_ != x.blob_key_: return 0
if self.has_start_index_ != x.has_start_index_: return 0
if self.has_start_index_ and self.start_index_ != x.start_index_: return 0
if self.has_end_index_ != x.has_end_index_: return 0
if self.has_end_index_ and self.end_index_ != x.end_index_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_blob_key_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: blob_key not set.')
if (not self.has_start_index_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: start_index not set.')
if (not self.has_end_index_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: end_index not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.blob_key_))
n += self.lengthVarInt64(self.start_index_)
n += self.lengthVarInt64(self.end_index_)
return n + 3
def Clear(self):
self.clear_blob_key()
self.clear_start_index()
self.clear_end_index()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.blob_key_)
out.putVarInt32(16)
out.putVarInt64(self.start_index_)
out.putVarInt32(24)
out.putVarInt64(self.end_index_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_blob_key(d.getPrefixedString())
continue
if tt == 16:
self.set_start_index(d.getVarInt64())
continue
if tt == 24:
self.set_end_index(d.getVarInt64())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_blob_key_: res+=prefix+("blob_key: %s\n" % self.DebugFormatString(self.blob_key_))
if self.has_start_index_: res+=prefix+("start_index: %s\n" % self.DebugFormatInt64(self.start_index_))
if self.has_end_index_: res+=prefix+("end_index: %s\n" % self.DebugFormatInt64(self.end_index_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kblob_key = 1
kstart_index = 2
kend_index = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "blob_key",
2: "start_index",
3: "end_index",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.NUMERIC,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class FetchDataResponse(ProtocolBuffer.ProtocolMessage):
has_data_ = 0
data_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def data(self): return self.data_
def set_data(self, x):
self.has_data_ = 1
self.data_ = x
def clear_data(self):
if self.has_data_:
self.has_data_ = 0
self.data_ = ""
def has_data(self): return self.has_data_
def MergeFrom(self, x):
assert x is not self
if (x.has_data()): self.set_data(x.data())
def Equals(self, x):
if x is self: return 1
if self.has_data_ != x.has_data_: return 0
if self.has_data_ and self.data_ != x.data_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_data_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: data not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.data_))
return n + 2
def Clear(self):
self.clear_data()
def OutputUnchecked(self, out):
out.putVarInt32(8002)
out.putPrefixedString(self.data_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8002:
self.set_data(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_data_: res+=prefix+("data: %s\n" % self.DebugFormatString(self.data_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kdata = 1000
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1000: "data",
}, 1000)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1000: ProtocolBuffer.Encoder.STRING,
}, 1000, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class DecodeBlobKeyRequest(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
self.blob_key_ = []
if contents is not None: self.MergeFromString(contents)
def blob_key_size(self): return len(self.blob_key_)
def blob_key_list(self): return self.blob_key_
def blob_key(self, i):
return self.blob_key_[i]
def set_blob_key(self, i, x):
self.blob_key_[i] = x
def add_blob_key(self, x):
self.blob_key_.append(x)
def clear_blob_key(self):
self.blob_key_ = []
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.blob_key_size()): self.add_blob_key(x.blob_key(i))
def Equals(self, x):
if x is self: return 1
if len(self.blob_key_) != len(x.blob_key_): return 0
for e1, e2 in zip(self.blob_key_, x.blob_key_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.blob_key_)
for i in xrange(len(self.blob_key_)): n += self.lengthString(len(self.blob_key_[i]))
return n + 0
def Clear(self):
self.clear_blob_key()
def OutputUnchecked(self, out):
for i in xrange(len(self.blob_key_)):
out.putVarInt32(10)
out.putPrefixedString(self.blob_key_[i])
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.add_blob_key(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.blob_key_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("blob_key%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kblob_key = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "blob_key",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class DecodeBlobKeyResponse(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
self.decoded_ = []
if contents is not None: self.MergeFromString(contents)
def decoded_size(self): return len(self.decoded_)
def decoded_list(self): return self.decoded_
def decoded(self, i):
return self.decoded_[i]
def set_decoded(self, i, x):
self.decoded_[i] = x
def add_decoded(self, x):
self.decoded_.append(x)
def clear_decoded(self):
self.decoded_ = []
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.decoded_size()): self.add_decoded(x.decoded(i))
def Equals(self, x):
if x is self: return 1
if len(self.decoded_) != len(x.decoded_): return 0
for e1, e2 in zip(self.decoded_, x.decoded_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.decoded_)
for i in xrange(len(self.decoded_)): n += self.lengthString(len(self.decoded_[i]))
return n + 0
def Clear(self):
self.clear_decoded()
def OutputUnchecked(self, out):
for i in xrange(len(self.decoded_)):
out.putVarInt32(10)
out.putPrefixedString(self.decoded_[i])
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.add_decoded(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.decoded_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("decoded%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kdecoded = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "decoded",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
__all__ = ['BlobstoreServiceError','CreateUploadURLRequest','CreateUploadURLResponse','DeleteBlobRequest','FetchDataRequest','FetchDataResponse','DecodeBlobKeyRequest','DecodeBlobKeyResponse']
| |
# Based on github.com/lambci/docker-lambda/python2.7/run/runtime-mock.py version cf93693
from __future__ import print_function
import sys
import os
import random
import uuid
import time
import resource
import datetime
import json
# hijack stdout so it won't be taken by mistake
orig_stdout = os.fdopen(os.dup(1),"w")
os.dup2(2,1)
orig_stderr = sys.stderr
if os.environ.get('_MLESS_DEBUGGER') == "ptvsd":
import ptvsd
ptvsd.enable_attach(secret=None)
def eprint(*args, **kwargs):
print(*args, file=orig_stderr, **kwargs)
sys.stdout.flush()
def _random_invoke_id():
return str(uuid.uuid4())
def jsonStreamReader(f):
data = ""
decoder = json.JSONDecoder()
ex = None
while True:
next = f.readline()
if next == "":
break
data = data + next
while data != "":
# print "decoding:", data
idx = json.decoder.WHITESPACE.match(data, 0).end()
data = data[idx:]
if data == "":
break
try:
obj, end = decoder.raw_decode(data)
yield obj
data = data[end:]
ex = None
except ValueError as e:
if ex and ex.message == e.message:
raise ValueError("%s while decoding '%s'" % (e,data))
ex = e
break
if ex:
raise ValueError("%s while decoding '%s'" % (ex,data))
_GLOBAL_MEM_SIZE = os.environ.get('AWS_LAMBDA_FUNCTION_MEMORY_SIZE', '1536') # TODO
_GLOBAL_TIMEOUT = int(os.environ.get('AWS_LAMBDA_FUNCTION_TIMEOUT', '300')) # TODO
_GLOBAL_MODE = 'event' # Either 'http' or 'event'
_GLOBAL_SUPRESS_INIT = True # Forces calling _get_handlers_delayed()
_GLOBAL_DATA_SOCK = -1
_GLOBAL_INVOKED = False
_GLOBAL_ERRORED = False
_GLOBAL_START_TIME = None
_GLOBAL_MESSAGE_READER = jsonStreamReader(sys.stdin)
def report_user_init_start():
return
def report_user_init_end():
return
def report_user_invoke_start():
return
def report_user_invoke_end():
return
def receive_start():
sys.stdout = orig_stderr
sys.stderr = orig_stderr
eprint("recieved start called")
msg=next(_GLOBAL_MESSAGE_READER)
eprint("start message:", msg)
handler = msg["handler"]
os.environ["_HANDLER"] = handler
eprint("Setting handler:",msg["handler"])
global _GLOBAL_CREDENTIALS
_GLOBAL_CREDENTIALS = {
"key": os.environ.get("AWS_ACCESS_KEY_ID"),
"secret": os.environ.get("AWS_SECRET_ACCESS_KEY"),
"session":os.environ.get("AWS_SESSION_TOKEN"),
}
for k in msg["env"]:
eprint("setting env %s=%s" % (k,msg["env"][k]))
os.environ[k] = msg["env"][k]
eprint("Debugger: ", os.environ.get('_MLESS_DEBUGGER'))
if os.environ.get('_MLESS_DEBUGGER') == "pydevd":
dhost = os.environ.get('_MLESS_DEBUG_HOST')
eprint("Need to start debugger: "+dhost)
if os.environ.get('PATHS_FROM_ECLIPSE_TO_PYTHON') == None:
os.environ['PATHS_FROM_ECLIPSE_TO_PYTHON'] = json.dumps( [ [os.environ.get('_MLESS_DESKTOP_SOURCES'),'/var/task']])
eprint("Pathes: " + os.environ['PATHS_FROM_ECLIPSE_TO_PYTHON'])
import pydevd
pydevd.settrace(host=dhost, suspend=False, stdoutToServer=True, stderrToServer=True)
eprint("after calling settrace")
elif os.environ.get('_MLESS_DEBUGGER') == "ptvsd":
eprint("Waiting for debugger to attach")
ptvsd.wait_for_attach()
eprint("Debugger attached")
return (
_random_invoke_id(),
_GLOBAL_MODE,
handler,
_GLOBAL_SUPRESS_INIT,
_GLOBAL_CREDENTIALS
)
def report_running(invokeid):
res = { "ok": True }
print(json.dumps(res)+"\n",file=orig_stdout)
orig_stdout.flush()
return
def receive_invoke():
eprint("receive_invoke:")
msg=next(_GLOBAL_MESSAGE_READER)
global _GLOBAL_INVOKED
global _GLOBAL_START_TIME
global _GLOBAL_DEADLINE
_GLOBAL_INVOKED = True
_GLOBAL_START_TIME = time.time()
_GLOBAL_DEADLINE = msg.get("deadline")/1000
context = msg['context']
context_objs = {
'clientcontext': context.get('client_context'),
}
identity=context.get('identity')
if identity != None:
context_objs['cognitoidentityid'] = identity['cognito_identity_id']
context_objs['cognitopoolid'] = identity['cognito_identity_pool_id']
eprint("Start RequestId: %s Version: %s" % (context['aws_request_id'], os.environ.get("AWS_LAMBDA_FUNCTION_VERSION") ))
return (
context['aws_request_id'],
_GLOBAL_DATA_SOCK,
_GLOBAL_CREDENTIALS,
json.dumps(msg['event']),
context_objs,
context['invoked_function_arn'],
None, # What do we for xray_trace_id?
)
def report_fault(invokeid, msg, except_value, trace):
global _GLOBAL_ERRORED
_GLOBAL_ERRORED = True
if msg and except_value:
eprint('%s: %s' % (msg, except_value))
if trace:
eprint('%s' % trace)
return
def report_done(invokeid, errortype, result):
global _GLOBAL_INVOKED
global _GLOBAL_ERRORED
if _GLOBAL_INVOKED:
eprint("END RequestId: %s" % invokeid)
duration = int((time.time() - _GLOBAL_START_TIME) * 1000)
billed_duration = min(100 * int((duration / 100) + 1), _GLOBAL_TIMEOUT * 1000)
max_mem = int(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024)
res = {
"result": result,
"errortype": errortype,
"invokeid": invokeid, # TODO needed?
"errors": _GLOBAL_ERRORED,
"billing": {
"duration": duration,
"memory": _GLOBAL_MEM_SIZE,
"used": max_mem,
}
}
print(json.dumps(res)+"\n",file=orig_stdout)
orig_stdout.flush()
_GLOBAL_ERRORED = False
else:
return
def report_xray_exception(xray_json):
return
def log_bytes(msg, fileno):
eprint(msg)
return
def log_sb(msg):
return
def get_remaining_time():
return int(1000*(_GLOBAL_DEADLINE - time.time()))
def send_console_message(msg):
eprint(msg)
return
| |
from __future__ import unicode_literals
from __future__ import absolute_import
import os
from os import path
from docker.errors import APIError
from mock import patch
import tempfile
import shutil
from six import StringIO, text_type
from compose import __version__
from compose.const import (
LABEL_CONTAINER_NUMBER,
LABEL_ONE_OFF,
LABEL_PROJECT,
LABEL_SERVICE,
LABEL_VERSION,
)
from compose.service import (
ConfigError,
ConvergencePlan,
Service,
build_extra_hosts,
)
from compose.container import Container
from .testcases import DockerClientTestCase
def create_and_start_container(service, **override_options):
container = service.create_container(**override_options)
return service.start_container(container)
class ServiceTest(DockerClientTestCase):
def test_containers(self):
foo = self.create_service('foo')
bar = self.create_service('bar')
create_and_start_container(foo)
self.assertEqual(len(foo.containers()), 1)
self.assertEqual(foo.containers()[0].name, 'composetest_foo_1')
self.assertEqual(len(bar.containers()), 0)
create_and_start_container(bar)
create_and_start_container(bar)
self.assertEqual(len(foo.containers()), 1)
self.assertEqual(len(bar.containers()), 2)
names = [c.name for c in bar.containers()]
self.assertIn('composetest_bar_1', names)
self.assertIn('composetest_bar_2', names)
def test_containers_one_off(self):
db = self.create_service('db')
container = db.create_container(one_off=True)
self.assertEqual(db.containers(stopped=True), [])
self.assertEqual(db.containers(one_off=True, stopped=True), [container])
def test_project_is_added_to_container_name(self):
service = self.create_service('web')
create_and_start_container(service)
self.assertEqual(service.containers()[0].name, 'composetest_web_1')
def test_start_stop(self):
service = self.create_service('scalingtest')
self.assertEqual(len(service.containers(stopped=True)), 0)
service.create_container()
self.assertEqual(len(service.containers()), 0)
self.assertEqual(len(service.containers(stopped=True)), 1)
service.start()
self.assertEqual(len(service.containers()), 1)
self.assertEqual(len(service.containers(stopped=True)), 1)
service.stop(timeout=1)
self.assertEqual(len(service.containers()), 0)
self.assertEqual(len(service.containers(stopped=True)), 1)
service.stop(timeout=1)
self.assertEqual(len(service.containers()), 0)
self.assertEqual(len(service.containers(stopped=True)), 1)
def test_kill_remove(self):
service = self.create_service('scalingtest')
create_and_start_container(service)
self.assertEqual(len(service.containers()), 1)
service.remove_stopped()
self.assertEqual(len(service.containers()), 1)
service.kill()
self.assertEqual(len(service.containers()), 0)
self.assertEqual(len(service.containers(stopped=True)), 1)
service.remove_stopped()
self.assertEqual(len(service.containers(stopped=True)), 0)
def test_create_container_with_one_off(self):
db = self.create_service('db')
container = db.create_container(one_off=True)
self.assertEqual(container.name, 'composetest_db_run_1')
def test_create_container_with_one_off_when_existing_container_is_running(self):
db = self.create_service('db')
db.start()
container = db.create_container(one_off=True)
self.assertEqual(container.name, 'composetest_db_run_1')
def test_create_container_with_unspecified_volume(self):
service = self.create_service('db', volumes=['/var/db'])
container = service.create_container()
service.start_container(container)
self.assertIn('/var/db', container.get('Volumes'))
def test_create_container_with_volume_driver(self):
service = self.create_service('db', volume_driver='foodriver')
container = service.create_container()
service.start_container(container)
self.assertEqual('foodriver', container.get('Config.VolumeDriver'))
def test_create_container_with_cpu_shares(self):
service = self.create_service('db', cpu_shares=73)
container = service.create_container()
service.start_container(container)
self.assertEqual(container.get('HostConfig.CpuShares'), 73)
def test_build_extra_hosts(self):
# string
self.assertRaises(ConfigError, lambda: build_extra_hosts("www.example.com: 192.168.0.17"))
# list of strings
self.assertEqual(build_extra_hosts(
["www.example.com:192.168.0.17"]),
{'www.example.com': '192.168.0.17'})
self.assertEqual(build_extra_hosts(
["www.example.com: 192.168.0.17"]),
{'www.example.com': '192.168.0.17'})
self.assertEqual(build_extra_hosts(
["www.example.com: 192.168.0.17",
"static.example.com:192.168.0.19",
"api.example.com: 192.168.0.18"]),
{'www.example.com': '192.168.0.17',
'static.example.com': '192.168.0.19',
'api.example.com': '192.168.0.18'})
# list of dictionaries
self.assertRaises(ConfigError, lambda: build_extra_hosts(
[{'www.example.com': '192.168.0.17'},
{'api.example.com': '192.168.0.18'}]))
# dictionaries
self.assertEqual(build_extra_hosts(
{'www.example.com': '192.168.0.17',
'api.example.com': '192.168.0.18'}),
{'www.example.com': '192.168.0.17',
'api.example.com': '192.168.0.18'})
def test_create_container_with_extra_hosts_list(self):
extra_hosts = ['somehost:162.242.195.82', 'otherhost:50.31.209.229']
service = self.create_service('db', extra_hosts=extra_hosts)
container = service.create_container()
service.start_container(container)
self.assertEqual(set(container.get('HostConfig.ExtraHosts')), set(extra_hosts))
def test_create_container_with_extra_hosts_string(self):
extra_hosts = 'somehost:162.242.195.82'
service = self.create_service('db', extra_hosts=extra_hosts)
self.assertRaises(ConfigError, lambda: service.create_container())
def test_create_container_with_extra_hosts_list_of_dicts(self):
extra_hosts = [{'somehost': '162.242.195.82'}, {'otherhost': '50.31.209.229'}]
service = self.create_service('db', extra_hosts=extra_hosts)
self.assertRaises(ConfigError, lambda: service.create_container())
def test_create_container_with_extra_hosts_dicts(self):
extra_hosts = {'somehost': '162.242.195.82', 'otherhost': '50.31.209.229'}
extra_hosts_list = ['somehost:162.242.195.82', 'otherhost:50.31.209.229']
service = self.create_service('db', extra_hosts=extra_hosts)
container = service.create_container()
service.start_container(container)
self.assertEqual(set(container.get('HostConfig.ExtraHosts')), set(extra_hosts_list))
def test_create_container_with_cpu_set(self):
service = self.create_service('db', cpuset='0')
container = service.create_container()
service.start_container(container)
self.assertEqual(container.get('HostConfig.CpusetCpus'), '0')
def test_create_container_with_read_only_root_fs(self):
read_only = True
service = self.create_service('db', read_only=read_only)
container = service.create_container()
service.start_container(container)
self.assertEqual(container.get('HostConfig.ReadonlyRootfs'), read_only, container.get('HostConfig'))
def test_create_container_with_security_opt(self):
security_opt = ['label:disable']
service = self.create_service('db', security_opt=security_opt)
container = service.create_container()
service.start_container(container)
self.assertEqual(set(container.get('HostConfig.SecurityOpt')), set(security_opt))
def test_create_container_with_mac_address(self):
service = self.create_service('db', mac_address='02:42:ac:11:65:43')
container = service.create_container()
service.start_container(container)
self.assertEqual(container.inspect()['Config']['MacAddress'], '02:42:ac:11:65:43')
def test_create_container_with_specified_volume(self):
host_path = '/tmp/host-path'
container_path = '/container-path'
service = self.create_service('db', volumes=['%s:%s' % (host_path, container_path)])
container = service.create_container()
service.start_container(container)
volumes = container.inspect()['Volumes']
self.assertIn(container_path, volumes)
# Match the last component ("host-path"), because boot2docker symlinks /tmp
actual_host_path = volumes[container_path]
self.assertTrue(path.basename(actual_host_path) == path.basename(host_path),
msg=("Last component differs: %s, %s" % (actual_host_path, host_path)))
def test_recreate_preserves_volume_with_trailing_slash(self):
"""
When the Compose file specifies a trailing slash in the container path, make
sure we copy the volume over when recreating.
"""
service = self.create_service('data', volumes=['/data/'])
old_container = create_and_start_container(service)
volume_path = old_container.get('Volumes')['/data']
new_container = service.recreate_container(old_container)
self.assertEqual(new_container.get('Volumes')['/data'], volume_path)
def test_duplicate_volume_trailing_slash(self):
"""
When an image specifies a volume, and the Compose file specifies a host path
but adds a trailing slash, make sure that we don't create duplicate binds.
"""
host_path = '/tmp/data'
container_path = '/data'
volumes = ['{}:{}/'.format(host_path, container_path)]
tmp_container = self.client.create_container(
'busybox', 'true',
volumes={container_path: {}},
labels={'com.docker.compose.test_image': 'true'},
)
image = self.client.commit(tmp_container)['Id']
service = self.create_service('db', image=image, volumes=volumes)
old_container = create_and_start_container(service)
self.assertEqual(
old_container.get('Config.Volumes'),
{container_path: {}},
)
service = self.create_service('db', image=image, volumes=volumes)
new_container = service.recreate_container(old_container)
self.assertEqual(
new_container.get('Config.Volumes'),
{container_path: {}},
)
self.assertEqual(service.containers(stopped=False), [new_container])
@patch.dict(os.environ)
def test_create_container_with_home_and_env_var_in_volume_path(self):
os.environ['VOLUME_NAME'] = 'my-volume'
os.environ['HOME'] = '/tmp/home-dir'
expected_host_path = os.path.join(os.environ['HOME'], os.environ['VOLUME_NAME'])
host_path = '~/${VOLUME_NAME}'
container_path = '/container-path'
service = self.create_service('db', volumes=['%s:%s' % (host_path, container_path)])
container = service.create_container()
service.start_container(container)
actual_host_path = container.get('Volumes')[container_path]
components = actual_host_path.split('/')
self.assertTrue(components[-2:] == ['home-dir', 'my-volume'],
msg="Last two components differ: %s, %s" % (actual_host_path, expected_host_path))
def test_create_container_with_volumes_from(self):
volume_service = self.create_service('data')
volume_container_1 = volume_service.create_container()
volume_container_2 = Container.create(
self.client,
image='busybox:latest',
command=["top"],
labels={LABEL_PROJECT: 'composetest'},
)
host_service = self.create_service('host', volumes_from=[volume_service, volume_container_2])
host_container = host_service.create_container()
host_service.start_container(host_container)
self.assertIn(volume_container_1.id,
host_container.get('HostConfig.VolumesFrom'))
self.assertIn(volume_container_2.id,
host_container.get('HostConfig.VolumesFrom'))
def test_execute_convergence_plan_recreate(self):
service = self.create_service(
'db',
environment={'FOO': '1'},
volumes=['/etc'],
entrypoint=['top'],
command=['-d', '1']
)
old_container = service.create_container()
self.assertEqual(old_container.get('Config.Entrypoint'), ['top'])
self.assertEqual(old_container.get('Config.Cmd'), ['-d', '1'])
self.assertIn('FOO=1', old_container.get('Config.Env'))
self.assertEqual(old_container.name, 'composetest_db_1')
service.start_container(old_container)
old_container.inspect() # reload volume data
volume_path = old_container.get('Volumes')['/etc']
num_containers_before = len(self.client.containers(all=True))
service.options['environment']['FOO'] = '2'
new_container, = service.execute_convergence_plan(
ConvergencePlan('recreate', [old_container]))
self.assertEqual(new_container.get('Config.Entrypoint'), ['top'])
self.assertEqual(new_container.get('Config.Cmd'), ['-d', '1'])
self.assertIn('FOO=2', new_container.get('Config.Env'))
self.assertEqual(new_container.name, 'composetest_db_1')
self.assertEqual(new_container.get('Volumes')['/etc'], volume_path)
self.assertIn(
'affinity:container==%s' % old_container.id,
new_container.get('Config.Env'))
self.assertEqual(len(self.client.containers(all=True)), num_containers_before)
self.assertNotEqual(old_container.id, new_container.id)
self.assertRaises(APIError,
self.client.inspect_container,
old_container.id)
def test_execute_convergence_plan_when_containers_are_stopped(self):
service = self.create_service(
'db',
environment={'FOO': '1'},
volumes=['/var/db'],
entrypoint=['top'],
command=['-d', '1']
)
service.create_container()
containers = service.containers(stopped=True)
self.assertEqual(len(containers), 1)
container, = containers
self.assertFalse(container.is_running)
service.execute_convergence_plan(ConvergencePlan('start', [container]))
containers = service.containers()
self.assertEqual(len(containers), 1)
container.inspect()
self.assertEqual(container, containers[0])
self.assertTrue(container.is_running)
def test_execute_convergence_plan_with_image_declared_volume(self):
service = Service(
project='composetest',
name='db',
client=self.client,
build='tests/fixtures/dockerfile-with-volume',
)
old_container = create_and_start_container(service)
self.assertEqual(old_container.get('Volumes').keys(), ['/data'])
volume_path = old_container.get('Volumes')['/data']
new_container, = service.execute_convergence_plan(
ConvergencePlan('recreate', [old_container]))
self.assertEqual(new_container.get('Volumes').keys(), ['/data'])
self.assertEqual(new_container.get('Volumes')['/data'], volume_path)
def test_start_container_passes_through_options(self):
db = self.create_service('db')
create_and_start_container(db, environment={'FOO': 'BAR'})
self.assertEqual(db.containers()[0].environment['FOO'], 'BAR')
def test_start_container_inherits_options_from_constructor(self):
db = self.create_service('db', environment={'FOO': 'BAR'})
create_and_start_container(db)
self.assertEqual(db.containers()[0].environment['FOO'], 'BAR')
def test_start_container_creates_links(self):
db = self.create_service('db')
web = self.create_service('web', links=[(db, None)])
create_and_start_container(db)
create_and_start_container(db)
create_and_start_container(web)
self.assertEqual(
set(web.containers()[0].links()),
set([
'composetest_db_1', 'db_1',
'composetest_db_2', 'db_2',
'db'])
)
def test_start_container_creates_links_with_names(self):
db = self.create_service('db')
web = self.create_service('web', links=[(db, 'custom_link_name')])
create_and_start_container(db)
create_and_start_container(db)
create_and_start_container(web)
self.assertEqual(
set(web.containers()[0].links()),
set([
'composetest_db_1', 'db_1',
'composetest_db_2', 'db_2',
'custom_link_name'])
)
def test_start_container_with_external_links(self):
db = self.create_service('db')
web = self.create_service('web', external_links=['composetest_db_1',
'composetest_db_2',
'composetest_db_3:db_3'])
for _ in range(3):
create_and_start_container(db)
create_and_start_container(web)
self.assertEqual(
set(web.containers()[0].links()),
set([
'composetest_db_1',
'composetest_db_2',
'db_3']),
)
def test_start_normal_container_does_not_create_links_to_its_own_service(self):
db = self.create_service('db')
create_and_start_container(db)
create_and_start_container(db)
c = create_and_start_container(db)
self.assertEqual(set(c.links()), set([]))
def test_start_one_off_container_creates_links_to_its_own_service(self):
db = self.create_service('db')
create_and_start_container(db)
create_and_start_container(db)
c = create_and_start_container(db, one_off=True)
self.assertEqual(
set(c.links()),
set([
'composetest_db_1', 'db_1',
'composetest_db_2', 'db_2',
'db'])
)
def test_start_container_builds_images(self):
service = Service(
name='test',
client=self.client,
build='tests/fixtures/simple-dockerfile',
project='composetest',
)
container = create_and_start_container(service)
container.wait()
self.assertIn('success', container.logs())
self.assertEqual(len(self.client.images(name='composetest_test')), 1)
def test_start_container_uses_tagged_image_if_it_exists(self):
self.check_build('tests/fixtures/simple-dockerfile', tag='composetest_test')
service = Service(
name='test',
client=self.client,
build='this/does/not/exist/and/will/throw/error',
project='composetest',
)
container = create_and_start_container(service)
container.wait()
self.assertIn('success', container.logs())
def test_start_container_creates_ports(self):
service = self.create_service('web', ports=[8000])
container = create_and_start_container(service).inspect()
self.assertEqual(list(container['NetworkSettings']['Ports'].keys()), ['8000/tcp'])
self.assertNotEqual(container['NetworkSettings']['Ports']['8000/tcp'][0]['HostPort'], '8000')
def test_build(self):
base_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base_dir)
with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
f.write("FROM busybox\n")
self.create_service('web', build=base_dir).build()
self.assertEqual(len(self.client.images(name='composetest_web')), 1)
def test_build_non_ascii_filename(self):
base_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base_dir)
with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
f.write("FROM busybox\n")
with open(os.path.join(base_dir, b'foo\xE2bar'), 'w') as f:
f.write("hello world\n")
self.create_service('web', build=text_type(base_dir)).build()
self.assertEqual(len(self.client.images(name='composetest_web')), 1)
def test_start_container_stays_unpriviliged(self):
service = self.create_service('web')
container = create_and_start_container(service).inspect()
self.assertEqual(container['HostConfig']['Privileged'], False)
def test_start_container_becomes_priviliged(self):
service = self.create_service('web', privileged=True)
container = create_and_start_container(service).inspect()
self.assertEqual(container['HostConfig']['Privileged'], True)
def test_expose_does_not_publish_ports(self):
service = self.create_service('web', expose=[8000])
container = create_and_start_container(service).inspect()
self.assertEqual(container['NetworkSettings']['Ports'], {'8000/tcp': None})
def test_start_container_creates_port_with_explicit_protocol(self):
service = self.create_service('web', ports=['8000/udp'])
container = create_and_start_container(service).inspect()
self.assertEqual(list(container['NetworkSettings']['Ports'].keys()), ['8000/udp'])
def test_start_container_creates_fixed_external_ports(self):
service = self.create_service('web', ports=['8000:8000'])
container = create_and_start_container(service).inspect()
self.assertIn('8000/tcp', container['NetworkSettings']['Ports'])
self.assertEqual(container['NetworkSettings']['Ports']['8000/tcp'][0]['HostPort'], '8000')
def test_start_container_creates_fixed_external_ports_when_it_is_different_to_internal_port(self):
service = self.create_service('web', ports=['8001:8000'])
container = create_and_start_container(service).inspect()
self.assertIn('8000/tcp', container['NetworkSettings']['Ports'])
self.assertEqual(container['NetworkSettings']['Ports']['8000/tcp'][0]['HostPort'], '8001')
def test_port_with_explicit_interface(self):
service = self.create_service('web', ports=[
'127.0.0.1:8001:8000',
'0.0.0.0:9001:9000/udp',
])
container = create_and_start_container(service).inspect()
self.assertEqual(container['NetworkSettings']['Ports'], {
'8000/tcp': [
{
'HostIp': '127.0.0.1',
'HostPort': '8001',
},
],
'9000/udp': [
{
'HostIp': '0.0.0.0',
'HostPort': '9001',
},
],
})
def test_create_with_image_id(self):
# Image id for the current busybox:latest
service = self.create_service('foo', image='8c2e06607696')
service.create_container()
def test_scale(self):
service = self.create_service('web')
service.scale(1)
self.assertEqual(len(service.containers()), 1)
# Ensure containers don't have stdout or stdin connected
container = service.containers()[0]
config = container.inspect()['Config']
self.assertFalse(config['AttachStderr'])
self.assertFalse(config['AttachStdout'])
self.assertFalse(config['AttachStdin'])
service.scale(3)
self.assertEqual(len(service.containers()), 3)
service.scale(1)
self.assertEqual(len(service.containers()), 1)
service.scale(0)
self.assertEqual(len(service.containers()), 0)
@patch('sys.stdout', new_callable=StringIO)
def test_scale_with_stopped_containers(self, mock_stdout):
"""
Given there are some stopped containers and scale is called with a
desired number that is the same as the number of stopped containers,
test that those containers are restarted and not removed/recreated.
"""
service = self.create_service('web')
next_number = service._next_container_number()
valid_numbers = [next_number, next_number + 1]
service.create_container(number=next_number, quiet=True)
service.create_container(number=next_number + 1, quiet=True)
for container in service.containers():
self.assertFalse(container.is_running)
service.scale(2)
self.assertEqual(len(service.containers()), 2)
for container in service.containers():
self.assertTrue(container.is_running)
self.assertTrue(container.number in valid_numbers)
captured_output = mock_stdout.getvalue()
self.assertNotIn('Creating', captured_output)
self.assertIn('Starting', captured_output)
@patch('sys.stdout', new_callable=StringIO)
def test_scale_with_stopped_containers_and_needing_creation(self, mock_stdout):
"""
Given there are some stopped containers and scale is called with a
desired number that is greater than the number of stopped containers,
test that those containers are restarted and required number are created.
"""
service = self.create_service('web')
next_number = service._next_container_number()
service.create_container(number=next_number, quiet=True)
for container in service.containers():
self.assertFalse(container.is_running)
service.scale(2)
self.assertEqual(len(service.containers()), 2)
for container in service.containers():
self.assertTrue(container.is_running)
captured_output = mock_stdout.getvalue()
self.assertIn('Creating', captured_output)
self.assertIn('Starting', captured_output)
@patch('sys.stdout', new_callable=StringIO)
def test_scale_with_api_returns_errors(self, mock_stdout):
"""
Test that when scaling if the API returns an error, that error is handled
and the remaining threads continue.
"""
service = self.create_service('web')
next_number = service._next_container_number()
service.create_container(number=next_number, quiet=True)
with patch(
'compose.container.Container.create',
side_effect=APIError(message="testing", response={}, explanation="Boom")):
service.scale(3)
self.assertEqual(len(service.containers()), 1)
self.assertTrue(service.containers()[0].is_running)
self.assertIn("ERROR: for 2 Boom", mock_stdout.getvalue())
@patch('compose.service.log')
def test_scale_with_desired_number_already_achieved(self, mock_log):
"""
Test that calling scale with a desired number that is equal to the
number of containers already running results in no change.
"""
service = self.create_service('web')
next_number = service._next_container_number()
container = service.create_container(number=next_number, quiet=True)
container.start()
self.assertTrue(container.is_running)
self.assertEqual(len(service.containers()), 1)
service.scale(1)
self.assertEqual(len(service.containers()), 1)
container.inspect()
self.assertTrue(container.is_running)
captured_output = mock_log.info.call_args[0]
self.assertIn('Desired container number already achieved', captured_output)
@patch('compose.service.log')
def test_scale_with_custom_container_name_outputs_warning(self, mock_log):
"""
Test that calling scale on a service that has a custom container name
results in warning output.
"""
service = self.create_service('web', container_name='custom-container')
self.assertEqual(service.custom_container_name(), 'custom-container')
service.scale(3)
captured_output = mock_log.warn.call_args[0][0]
self.assertEqual(len(service.containers()), 1)
self.assertIn(
"Remove the custom name to scale the service.",
captured_output
)
def test_scale_sets_ports(self):
service = self.create_service('web', ports=['8000'])
service.scale(2)
containers = service.containers()
self.assertEqual(len(containers), 2)
for container in containers:
self.assertEqual(list(container.inspect()['HostConfig']['PortBindings'].keys()), ['8000/tcp'])
def test_network_mode_none(self):
service = self.create_service('web', net='none')
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.NetworkMode'), 'none')
def test_network_mode_bridged(self):
service = self.create_service('web', net='bridge')
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.NetworkMode'), 'bridge')
def test_network_mode_host(self):
service = self.create_service('web', net='host')
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.NetworkMode'), 'host')
def test_pid_mode_none_defined(self):
service = self.create_service('web', pid=None)
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.PidMode'), '')
def test_pid_mode_host(self):
service = self.create_service('web', pid='host')
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.PidMode'), 'host')
def test_dns_no_value(self):
service = self.create_service('web')
container = create_and_start_container(service)
self.assertIsNone(container.get('HostConfig.Dns'))
def test_dns_single_value(self):
service = self.create_service('web', dns='8.8.8.8')
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.Dns'), ['8.8.8.8'])
def test_dns_list(self):
service = self.create_service('web', dns=['8.8.8.8', '9.9.9.9'])
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.Dns'), ['8.8.8.8', '9.9.9.9'])
def test_restart_always_value(self):
service = self.create_service('web', restart='always')
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.RestartPolicy.Name'), 'always')
def test_restart_on_failure_value(self):
service = self.create_service('web', restart='on-failure:5')
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.RestartPolicy.Name'), 'on-failure')
self.assertEqual(container.get('HostConfig.RestartPolicy.MaximumRetryCount'), 5)
def test_cap_add_list(self):
service = self.create_service('web', cap_add=['SYS_ADMIN', 'NET_ADMIN'])
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.CapAdd'), ['SYS_ADMIN', 'NET_ADMIN'])
def test_cap_drop_list(self):
service = self.create_service('web', cap_drop=['SYS_ADMIN', 'NET_ADMIN'])
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.CapDrop'), ['SYS_ADMIN', 'NET_ADMIN'])
def test_dns_search_no_value(self):
service = self.create_service('web')
container = create_and_start_container(service)
self.assertIsNone(container.get('HostConfig.DnsSearch'))
def test_dns_search_single_value(self):
service = self.create_service('web', dns_search='example.com')
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.DnsSearch'), ['example.com'])
def test_dns_search_list(self):
service = self.create_service('web', dns_search=['dc1.example.com', 'dc2.example.com'])
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.DnsSearch'), ['dc1.example.com', 'dc2.example.com'])
def test_working_dir_param(self):
service = self.create_service('container', working_dir='/working/dir/sample')
container = service.create_container()
self.assertEqual(container.get('Config.WorkingDir'), '/working/dir/sample')
def test_split_env(self):
service = self.create_service('web', environment=['NORMAL=F1', 'CONTAINS_EQUALS=F=2', 'TRAILING_EQUALS='])
env = create_and_start_container(service).environment
for k, v in {'NORMAL': 'F1', 'CONTAINS_EQUALS': 'F=2', 'TRAILING_EQUALS': ''}.items():
self.assertEqual(env[k], v)
def test_env_from_file_combined_with_env(self):
service = self.create_service('web', environment=['ONE=1', 'TWO=2', 'THREE=3'], env_file=['tests/fixtures/env/one.env', 'tests/fixtures/env/two.env'])
env = create_and_start_container(service).environment
for k, v in {'ONE': '1', 'TWO': '2', 'THREE': '3', 'FOO': 'baz', 'DOO': 'dah'}.items():
self.assertEqual(env[k], v)
@patch.dict(os.environ)
def test_resolve_env(self):
os.environ['FILE_DEF'] = 'E1'
os.environ['FILE_DEF_EMPTY'] = 'E2'
os.environ['ENV_DEF'] = 'E3'
service = self.create_service('web', environment={'FILE_DEF': 'F1', 'FILE_DEF_EMPTY': '', 'ENV_DEF': None, 'NO_DEF': None})
env = create_and_start_container(service).environment
for k, v in {'FILE_DEF': 'F1', 'FILE_DEF_EMPTY': '', 'ENV_DEF': 'E3', 'NO_DEF': ''}.items():
self.assertEqual(env[k], v)
def test_labels(self):
labels_dict = {
'com.example.description': "Accounting webapp",
'com.example.department': "Finance",
'com.example.label-with-empty-value': "",
}
compose_labels = {
LABEL_CONTAINER_NUMBER: '1',
LABEL_ONE_OFF: 'False',
LABEL_PROJECT: 'composetest',
LABEL_SERVICE: 'web',
LABEL_VERSION: __version__,
}
expected = dict(labels_dict, **compose_labels)
service = self.create_service('web', labels=labels_dict)
labels = create_and_start_container(service).labels.items()
for pair in expected.items():
self.assertIn(pair, labels)
service.kill()
service.remove_stopped()
labels_list = ["%s=%s" % pair for pair in labels_dict.items()]
service = self.create_service('web', labels=labels_list)
labels = create_and_start_container(service).labels.items()
for pair in expected.items():
self.assertIn(pair, labels)
def test_empty_labels(self):
labels_list = ['foo', 'bar']
service = self.create_service('web', labels=labels_list)
labels = create_and_start_container(service).labels.items()
for name in labels_list:
self.assertIn((name, ''), labels)
def test_custom_container_name(self):
service = self.create_service('web', container_name='my-web-container')
self.assertEqual(service.custom_container_name(), 'my-web-container')
container = create_and_start_container(service)
self.assertEqual(container.name, 'my-web-container')
one_off_container = service.create_container(one_off=True)
self.assertNotEqual(one_off_container.name, 'my-web-container')
def test_log_drive_invalid(self):
service = self.create_service('web', log_driver='xxx')
self.assertRaises(ValueError, lambda: create_and_start_container(service))
def test_log_drive_empty_default_jsonfile(self):
service = self.create_service('web')
log_config = create_and_start_container(service).log_config
self.assertEqual('json-file', log_config['Type'])
self.assertFalse(log_config['Config'])
def test_log_drive_none(self):
service = self.create_service('web', log_driver='none')
log_config = create_and_start_container(service).log_config
self.assertEqual('none', log_config['Type'])
self.assertFalse(log_config['Config'])
def test_devices(self):
service = self.create_service('web', devices=["/dev/random:/dev/mapped-random"])
device_config = create_and_start_container(service).get('HostConfig.Devices')
device_dict = {
'PathOnHost': '/dev/random',
'CgroupPermissions': 'rwm',
'PathInContainer': '/dev/mapped-random'
}
self.assertEqual(1, len(device_config))
self.assertDictEqual(device_dict, device_config[0])
def test_duplicate_containers(self):
service = self.create_service('web')
options = service._get_container_create_options({}, 1)
original = Container.create(service.client, **options)
self.assertEqual(set(service.containers(stopped=True)), set([original]))
self.assertEqual(set(service.duplicate_containers()), set())
options['name'] = 'temporary_container_name'
duplicate = Container.create(service.client, **options)
self.assertEqual(set(service.containers(stopped=True)), set([original, duplicate]))
self.assertEqual(set(service.duplicate_containers()), set([duplicate]))
| |
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For CellsScheduler
"""
import copy
import time
import mock
from oslo_utils import uuidutils
from nova import block_device
from nova.cells import filters
from nova.cells import weights
from nova.compute import vm_states
import nova.conf
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests.unit.cells import fakes
from nova.tests.unit import fake_block_device
from nova.tests import uuidsentinel
from nova import utils
CONF = nova.conf.CONF
class FakeFilterClass1(filters.BaseCellFilter):
pass
class FakeFilterClass2(filters.BaseCellFilter):
pass
class FakeWeightClass1(weights.BaseCellWeigher):
def _weigh_object(self, obj, weight_properties):
pass
class FakeWeightClass2(weights.BaseCellWeigher):
def _weigh_object(self, obj, weight_properties):
pass
class CellsSchedulerTestCase(test.TestCase):
"""Test case for CellsScheduler class."""
def setUp(self):
super(CellsSchedulerTestCase, self).setUp()
self.flags(scheduler_filter_classes=[], scheduler_weight_classes=[],
group='cells')
self._init_cells_scheduler()
def _init_cells_scheduler(self):
fakes.init(self)
self.msg_runner = fakes.get_message_runner('api-cell')
self.scheduler = self.msg_runner.scheduler
self.state_manager = self.msg_runner.state_manager
self.my_cell_state = self.state_manager.get_my_state()
self.ctxt = context.RequestContext('fake', 'fake')
instance_uuids = []
for x in range(3):
instance_uuids.append(uuidutils.generate_uuid())
self.instance_uuids = instance_uuids
self.instances = [objects.Instance(uuid=uuid, id=id)
for id, uuid in enumerate(instance_uuids)]
self.request_spec = {
'num_instances': len(instance_uuids),
'instance_properties': self.instances[0],
'instance_type': 'fake_type',
'image': 'fake_image'}
self.build_inst_kwargs = {
'instances': self.instances,
'image': 'fake_image',
'filter_properties': {'instance_type': 'fake_type'},
'security_groups': 'fake_sec_groups',
'block_device_mapping': 'fake_bdm'}
def test_create_instances_here(self):
# Just grab the first instance type
inst_type = objects.Flavor.get_by_id(self.ctxt, 1)
image = {'properties': {}}
instance_uuids = self.instance_uuids
instance_props = {'id': 'removed',
'security_groups': 'removed',
'info_cache': 'removed',
'name': 'instance-00000001',
'hostname': 'meow',
'display_name': 'moo',
'image_ref': uuidsentinel.fake_image_ref,
'user_id': self.ctxt.user_id,
# Test these as lists
'metadata': {'moo': 'cow'},
'system_metadata': {'meow': 'cat'},
'flavor': inst_type,
'project_id': self.ctxt.project_id}
call_info = {'uuids': []}
block_device_mapping = objects.BlockDeviceMappingList(
objects=[
objects.BlockDeviceMapping(context=self.ctxt,
**fake_block_device.FakeDbBlockDeviceDict(
block_device.create_image_bdm(
uuidsentinel.fake_image_ref),
anon=True))
])
def _fake_instance_update_at_top(_ctxt, instance):
call_info['uuids'].append(instance['uuid'])
self.stubs.Set(self.msg_runner, 'instance_update_at_top',
_fake_instance_update_at_top)
self.scheduler._create_instances_here(self.ctxt, instance_uuids,
instance_props, inst_type, image,
['default'], block_device_mapping)
self.assertEqual(instance_uuids, call_info['uuids'])
for count, instance_uuid in enumerate(instance_uuids):
bdms = db.block_device_mapping_get_all_by_instance(self.ctxt,
instance_uuid)
self.assertIsNotNone(bdms)
instance = db.instance_get_by_uuid(self.ctxt, instance_uuid)
meta = utils.instance_meta(instance)
self.assertEqual('cow', meta['moo'])
sys_meta = utils.instance_sys_meta(instance)
self.assertEqual('cat', sys_meta['meow'])
self.assertEqual('meow', instance['hostname'])
self.assertEqual('moo-%d' % (count + 1),
instance['display_name'])
self.assertEqual(uuidsentinel.fake_image_ref,
instance['image_ref'])
@mock.patch('nova.objects.Instance.update')
def test_create_instances_here_pops_problematic_properties(self,
mock_update):
values = {
'uuid': uuidsentinel.instance,
'metadata': [],
'id': 1,
'name': 'foo',
'info_cache': 'bar',
'security_groups': 'not secure',
'flavor': 'chocolate',
'pci_requests': 'no thanks',
'ec2_ids': 'prime',
}
block_device_mapping = [
objects.BlockDeviceMapping(context=self.ctxt,
**fake_block_device.FakeDbBlockDeviceDict(
block_device.create_image_bdm(
uuidsentinel.fake_image_ref),
anon=True))
]
@mock.patch.object(self.scheduler.compute_api,
'create_db_entry_for_new_instance')
@mock.patch.object(self.scheduler.compute_api,
'_bdm_validate_set_size_and_instance')
def test(mock_bdm_validate, mock_create_db):
self.scheduler._create_instances_here(
self.ctxt, [uuidsentinel.instance], values,
objects.Flavor(), 'foo', [], block_device_mapping)
test()
# NOTE(danms): Make sure that only the expected properties
# are applied to the instance object. The complex ones that
# would have been mangled over RPC should be removed.
mock_update.assert_called_once_with(
{'uuid': uuidsentinel.instance,
'metadata': {}})
def test_build_instances_selects_child_cell(self):
# Make sure there's no capacity info so we're sure to
# select a child cell
our_cell_info = self.state_manager.get_my_state()
our_cell_info.capacities = {}
call_info = {'times': 0}
orig_fn = self.msg_runner.build_instances
def msg_runner_build_instances(ctxt, target_cell, build_inst_kwargs):
# This gets called twice. Once for our running it
# in this cell.. and then it'll get called when the
# child cell is picked. So, first time.. just run it
# like normal.
if not call_info['times']:
call_info['times'] += 1
return orig_fn(ctxt, target_cell, build_inst_kwargs)
call_info['ctxt'] = ctxt
call_info['target_cell'] = target_cell
call_info['build_inst_kwargs'] = build_inst_kwargs
def fake_build_request_spec(ctxt, image, instances):
request_spec = {
'num_instances': len(instances),
'image': image}
return request_spec
self.stubs.Set(self.msg_runner, 'build_instances',
msg_runner_build_instances)
self.stubs.Set(scheduler_utils, 'build_request_spec',
fake_build_request_spec)
self.msg_runner.build_instances(self.ctxt, self.my_cell_state,
self.build_inst_kwargs)
self.assertEqual(self.ctxt, call_info['ctxt'])
self.assertEqual(self.build_inst_kwargs,
call_info['build_inst_kwargs'])
child_cells = self.state_manager.get_child_cells()
self.assertIn(call_info['target_cell'], child_cells)
def test_build_instances_selects_current_cell(self):
# Make sure there's no child cells so that we will be
# selected
self.state_manager.child_cells = {}
call_info = {}
build_inst_kwargs = copy.deepcopy(self.build_inst_kwargs)
def fake_create_instances_here(ctxt, instance_uuids,
instance_properties, instance_type, image, security_groups,
block_device_mapping):
call_info['ctxt'] = ctxt
call_info['instance_uuids'] = instance_uuids
call_info['instance_properties'] = instance_properties
call_info['instance_type'] = instance_type
call_info['image'] = image
call_info['security_groups'] = security_groups
call_info['block_device_mapping'] = block_device_mapping
return self.instances
def fake_rpc_build_instances(ctxt, **build_inst_kwargs):
call_info['build_inst_kwargs'] = build_inst_kwargs
def fake_build_request_spec(ctxt, image, instances):
request_spec = {
'num_instances': len(instances),
'image': image}
return request_spec
self.stubs.Set(self.scheduler, '_create_instances_here',
fake_create_instances_here)
self.stubs.Set(self.scheduler.compute_task_api,
'build_instances', fake_rpc_build_instances)
self.stubs.Set(scheduler_utils, 'build_request_spec',
fake_build_request_spec)
self.msg_runner.build_instances(self.ctxt, self.my_cell_state,
build_inst_kwargs)
self.assertEqual(self.ctxt, call_info['ctxt'])
self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
self.assertEqual(self.build_inst_kwargs['instances'][0]['id'],
call_info['instance_properties']['id'])
self.assertEqual(
self.build_inst_kwargs['filter_properties']['instance_type'],
call_info['instance_type'])
self.assertEqual(self.build_inst_kwargs['image'], call_info['image'])
self.assertEqual(self.build_inst_kwargs['security_groups'],
call_info['security_groups'])
self.assertEqual(self.build_inst_kwargs['block_device_mapping'],
call_info['block_device_mapping'])
self.assertEqual(build_inst_kwargs,
call_info['build_inst_kwargs'])
self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
def test_build_instances_retries_when_no_cells_avail(self):
self.flags(scheduler_retries=7, group='cells')
call_info = {'num_tries': 0, 'errored_uuids': []}
def fake_grab_target_cells(filter_properties):
call_info['num_tries'] += 1
raise exception.NoCellsAvailable()
def fake_sleep(_secs):
return
def fake_instance_save(inst):
self.assertEqual(vm_states.ERROR, inst.vm_state)
call_info['errored_uuids'].append(inst.uuid)
def fake_build_request_spec(ctxt, image, instances):
request_spec = {
'num_instances': len(instances),
'image': image}
return request_spec
self.stubs.Set(self.scheduler, '_grab_target_cells',
fake_grab_target_cells)
self.stubs.Set(time, 'sleep', fake_sleep)
self.stubs.Set(objects.Instance, 'save', fake_instance_save)
self.stubs.Set(scheduler_utils, 'build_request_spec',
fake_build_request_spec)
self.msg_runner.build_instances(self.ctxt, self.my_cell_state,
self.build_inst_kwargs)
self.assertEqual(8, call_info['num_tries'])
self.assertEqual(self.instance_uuids, call_info['errored_uuids'])
def test_schedule_method_on_random_exception(self):
self.flags(scheduler_retries=7, group='cells')
instances = [objects.Instance(uuid=uuid) for uuid in
self.instance_uuids]
method_kwargs = {
'image': 'fake_image',
'instances': instances,
'filter_properties': {}}
call_info = {'num_tries': 0,
'errored_uuids1': [],
'errored_uuids2': []}
def fake_grab_target_cells(filter_properties):
call_info['num_tries'] += 1
raise test.TestingException()
def fake_instance_save(inst):
self.assertEqual(vm_states.ERROR, inst.vm_state)
call_info['errored_uuids1'].append(inst.uuid)
def fake_instance_update_at_top(ctxt, instance):
self.assertEqual(vm_states.ERROR, instance['vm_state'])
call_info['errored_uuids2'].append(instance['uuid'])
def fake_build_request_spec(ctxt, image, instances):
request_spec = {
'num_instances': len(instances),
'image': image}
return request_spec
self.stubs.Set(self.scheduler, '_grab_target_cells',
fake_grab_target_cells)
self.stubs.Set(objects.Instance, 'save', fake_instance_save)
self.stubs.Set(self.msg_runner, 'instance_update_at_top',
fake_instance_update_at_top)
self.stubs.Set(scheduler_utils, 'build_request_spec',
fake_build_request_spec)
self.msg_runner.build_instances(self.ctxt, self.my_cell_state,
method_kwargs)
# Shouldn't retry
self.assertEqual(1, call_info['num_tries'])
self.assertEqual(self.instance_uuids, call_info['errored_uuids1'])
self.assertEqual(self.instance_uuids, call_info['errored_uuids2'])
def test_filter_schedule_skipping(self):
# if a filter handles scheduling, short circuit
mock_func = mock.Mock()
self.scheduler._grab_target_cells = mock.Mock(return_value=None)
self.scheduler._schedule_build_to_cells(None, None, None,
mock_func, None)
mock_func.assert_not_called()
def test_cells_filter_args_correct(self):
# Re-init our fakes with some filters.
our_path = 'nova.tests.unit.cells.test_cells_scheduler'
cls_names = [our_path + '.' + 'FakeFilterClass1',
our_path + '.' + 'FakeFilterClass2']
self.flags(scheduler_filter_classes=cls_names, group='cells')
self._init_cells_scheduler()
# Make sure there's no child cells so that we will be
# selected. Makes stubbing easier.
self.state_manager.child_cells = {}
call_info = {}
def fake_create_instances_here(ctxt, instance_uuids,
instance_properties, instance_type, image, security_groups,
block_device_mapping):
call_info['ctxt'] = ctxt
call_info['instance_uuids'] = instance_uuids
call_info['instance_properties'] = instance_properties
call_info['instance_type'] = instance_type
call_info['image'] = image
call_info['security_groups'] = security_groups
call_info['block_device_mapping'] = block_device_mapping
def fake_rpc_build_instances(ctxt, **host_sched_kwargs):
call_info['host_sched_kwargs'] = host_sched_kwargs
def fake_get_filtered_objs(filters, cells, filt_properties):
call_info['filt_objects'] = filters
call_info['filt_cells'] = cells
call_info['filt_props'] = filt_properties
return cells
def fake_build_request_spec(ctxt, image, instances):
request_spec = {
'num_instances': len(instances),
'instance_properties': instances[0],
'image': image,
'instance_type': 'fake_type'}
return request_spec
self.stubs.Set(self.scheduler, '_create_instances_here',
fake_create_instances_here)
self.stubs.Set(self.scheduler.compute_task_api,
'build_instances', fake_rpc_build_instances)
self.stubs.Set(scheduler_utils, 'build_request_spec',
fake_build_request_spec)
filter_handler = self.scheduler.filter_handler
self.stubs.Set(filter_handler, 'get_filtered_objects',
fake_get_filtered_objs)
host_sched_kwargs = {'image': 'fake_image',
'instances': self.instances,
'filter_properties':
{'instance_type': 'fake_type'},
'security_groups': 'fake_sec_groups',
'block_device_mapping': 'fake_bdm'}
self.msg_runner.build_instances(self.ctxt,
self.my_cell_state, host_sched_kwargs)
# Our cell was selected.
self.assertEqual(self.ctxt, call_info['ctxt'])
self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
self.assertEqual(self.request_spec['instance_properties']['id'],
call_info['instance_properties']['id'])
self.assertEqual(self.request_spec['instance_type'],
call_info['instance_type'])
self.assertEqual(self.request_spec['image'], call_info['image'])
self.assertEqual(host_sched_kwargs, call_info['host_sched_kwargs'])
# Filter args are correct
expected_filt_props = {'context': self.ctxt,
'scheduler': self.scheduler,
'routing_path': self.my_cell_state.name,
'host_sched_kwargs': host_sched_kwargs,
'request_spec': self.request_spec,
'instance_type': 'fake_type'}
self.assertEqual(expected_filt_props, call_info['filt_props'])
self.assertEqual([FakeFilterClass1, FakeFilterClass2],
[obj.__class__ for obj in call_info['filt_objects']])
self.assertEqual([self.my_cell_state], call_info['filt_cells'])
def test_cells_filter_returning_none(self):
# Re-init our fakes with some filters.
our_path = 'nova.tests.unit.cells.test_cells_scheduler'
cls_names = [our_path + '.' + 'FakeFilterClass1',
our_path + '.' + 'FakeFilterClass2']
self.flags(scheduler_filter_classes=cls_names, group='cells')
self._init_cells_scheduler()
# Make sure there's no child cells so that we will be
# selected. Makes stubbing easier.
self.state_manager.child_cells = {}
call_info = {'scheduled': False}
def fake_create_instances_here(ctxt, request_spec):
# Should not be called
call_info['scheduled'] = True
def fake_get_filtered_objs(filter_classes, cells, filt_properties):
# Should cause scheduling to be skipped. Means that the
# filter did it.
return None
self.stubs.Set(self.scheduler, '_create_instances_here',
fake_create_instances_here)
filter_handler = self.scheduler.filter_handler
self.stubs.Set(filter_handler, 'get_filtered_objects',
fake_get_filtered_objs)
self.msg_runner.build_instances(self.ctxt,
self.my_cell_state, {})
self.assertFalse(call_info['scheduled'])
def test_cells_weight_args_correct(self):
# Re-init our fakes with some filters.
our_path = 'nova.tests.unit.cells.test_cells_scheduler'
cls_names = [our_path + '.' + 'FakeWeightClass1',
our_path + '.' + 'FakeWeightClass2']
self.flags(scheduler_weight_classes=cls_names, group='cells')
self._init_cells_scheduler()
# Make sure there's no child cells so that we will be
# selected. Makes stubbing easier.
self.state_manager.child_cells = {}
call_info = {}
def fake_create_instances_here(ctxt, instance_uuids,
instance_properties, instance_type, image, security_groups,
block_device_mapping):
call_info['ctxt'] = ctxt
call_info['instance_uuids'] = instance_uuids
call_info['instance_properties'] = instance_properties
call_info['instance_type'] = instance_type
call_info['image'] = image
call_info['security_groups'] = security_groups
call_info['block_device_mapping'] = block_device_mapping
def fake_rpc_build_instances(ctxt, **host_sched_kwargs):
call_info['host_sched_kwargs'] = host_sched_kwargs
def fake_get_weighed_objs(weighers, cells, filt_properties):
call_info['weighers'] = weighers
call_info['weight_cells'] = cells
call_info['weight_props'] = filt_properties
return [weights.WeightedCell(cells[0], 0.0)]
def fake_build_request_spec(ctxt, image, instances):
request_spec = {
'num_instances': len(instances),
'instance_properties': instances[0],
'image': image,
'instance_type': 'fake_type'}
return request_spec
self.stubs.Set(self.scheduler, '_create_instances_here',
fake_create_instances_here)
self.stubs.Set(scheduler_utils, 'build_request_spec',
fake_build_request_spec)
self.stubs.Set(self.scheduler.compute_task_api,
'build_instances', fake_rpc_build_instances)
weight_handler = self.scheduler.weight_handler
self.stubs.Set(weight_handler, 'get_weighed_objects',
fake_get_weighed_objs)
host_sched_kwargs = {'image': 'fake_image',
'instances': self.instances,
'filter_properties':
{'instance_type': 'fake_type'},
'security_groups': 'fake_sec_groups',
'block_device_mapping': 'fake_bdm'}
self.msg_runner.build_instances(self.ctxt,
self.my_cell_state, host_sched_kwargs)
# Our cell was selected.
self.assertEqual(self.ctxt, call_info['ctxt'])
self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
self.assertEqual(self.request_spec['instance_properties']['id'],
call_info['instance_properties']['id'])
self.assertEqual(self.request_spec['instance_type'],
call_info['instance_type'])
self.assertEqual(self.request_spec['image'], call_info['image'])
self.assertEqual(host_sched_kwargs, call_info['host_sched_kwargs'])
# Weight args are correct
expected_filt_props = {'context': self.ctxt,
'scheduler': self.scheduler,
'routing_path': self.my_cell_state.name,
'host_sched_kwargs': host_sched_kwargs,
'request_spec': self.request_spec,
'instance_type': 'fake_type'}
self.assertEqual(expected_filt_props, call_info['weight_props'])
self.assertEqual([FakeWeightClass1, FakeWeightClass2],
[obj.__class__ for obj in call_info['weighers']])
self.assertEqual([self.my_cell_state], call_info['weight_cells'])
| |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CurrentSubscription.tax_percent'
db.add_column(u'payments_currentsubscription', 'tax_percent',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=2),
keep_default=False)
# Adding field 'Invoice.tax_percent'
db.add_column(u'payments_invoice', 'tax_percent',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=2),
keep_default=False)
# Adding field 'Invoice.tax'
db.add_column(u'payments_invoice', 'tax',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=9, decimal_places=2),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CurrentSubscription.tax_percent'
db.delete_column(u'payments_currentsubscription', 'tax_percent')
# Deleting field 'Invoice.tax_percent'
db.delete_column(u'payments_invoice', 'tax_percent')
# Deleting field 'Invoice.tax'
db.delete_column(u'payments_invoice', 'tax')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'payments.charge': {
'Meta': {'object_name': 'Charge'},
'amount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '2'}),
'amount_refunded': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '2'}),
'captured': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'card_kind': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'card_last_4': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}),
'charge_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '10'}),
'customer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'charges'", 'to': u"orm['payments.Customer']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'disputed': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'fee': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'charges'", 'null': 'True', 'to': u"orm['payments.Invoice']"}),
'paid': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'receipt_sent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'refunded': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'stripe_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'payments.currentsubscription': {
'Meta': {'object_name': 'CurrentSubscription'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
'cancel_at_period_end': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'canceled_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '10'}),
'current_period_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'current_period_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'customer': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'current_subscription'", 'unique': 'True', 'null': 'True', 'to': u"orm['payments.Customer']"}),
'ended_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'plan': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'quantity': ('django.db.models.fields.IntegerField', [], {}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'tax_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2'}),
'trial_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'trial_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'payments.customer': {
'Meta': {'object_name': 'Customer'},
'card_fingerprint': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'card_kind': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'card_last_4': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_purged': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stripe_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'null': 'True'})
},
u'payments.event': {
'Meta': {'object_name': 'Event'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'customer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['payments.Customer']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'livemode': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'stripe_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'validated_message': ('jsonfield.fields.JSONField', [], {'null': 'True'}),
'webhook_message': ('jsonfield.fields.JSONField', [], {'default': '{}'})
},
u'payments.eventprocessingexception': {
'Meta': {'object_name': 'EventProcessingException'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'data': ('django.db.models.fields.TextField', [], {}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['payments.Event']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'traceback': ('django.db.models.fields.TextField', [], {})
},
u'payments.invoice': {
'Meta': {'ordering': "['-date']", 'object_name': 'Invoice'},
'attempted': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'attempts': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'charge': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '10'}),
'customer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'invoices'", 'to': u"orm['payments.Customer']"}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'paid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'period_end': ('django.db.models.fields.DateTimeField', [], {}),
'period_start': ('django.db.models.fields.DateTimeField', [], {}),
'stripe_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'subtotal': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
'tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '2'}),
'tax_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2'}),
'total': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'})
},
u'payments.invoiceitem': {
'Meta': {'object_name': 'InvoiceItem'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '10'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': u"orm['payments.Invoice']"}),
'line_type': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'period_end': ('django.db.models.fields.DateTimeField', [], {}),
'period_start': ('django.db.models.fields.DateTimeField', [], {}),
'plan': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'proration': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'stripe_id': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'payments.transfer': {
'Meta': {'object_name': 'Transfer'},
'adjustment_count': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'adjustment_fees': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '2'}),
'adjustment_gross': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '2'}),
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
'charge_count': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'charge_fees': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '2'}),
'charge_gross': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '2'}),
'collected_fee_count': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'collected_fee_gross': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '2'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '25'}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'transfers'", 'to': u"orm['payments.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'net': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '2'}),
'refund_count': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'refund_fees': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '2'}),
'refund_gross': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '2'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'stripe_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'validation_count': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'validation_fees': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '2'})
},
u'payments.transferchargefee': {
'Meta': {'object_name': 'TransferChargeFee'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
'application': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '10'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'transfer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'charge_fee_details'", 'to': u"orm['payments.Transfer']"})
}
}
complete_apps = ['payments']
| |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for calculations to get interaction answer views.
Calculations are performed on recorded state answers.
NOTE TO DEVELOPERS: To specify calculations desired for an interaction named
<INTERACTION_NAME>, edit
extensions.interactions.<INTERACTION_NAME>.answer_visualizations
This is a list of visualizations, each of which is specified by a dict with keys
'id', 'options' and 'calculation_id'. An example for a single visualization and
calculation may look like this:
answer_visualizations = [{
'id': 'BarChart',
'options': {
'x_axis_label': 'Answer',
'y_axis_label': 'Count',
},
'calculation_id': 'AnswerFrequencies',
}]
"""
import collections
import itertools
import operator
from core.domain import exp_domain
from core.domain import stats_domain
import feconf
import utils
CLASSIFICATION_CATEGORIES = frozenset([
exp_domain.EXPLICIT_CLASSIFICATION,
exp_domain.TRAINING_DATA_CLASSIFICATION,
exp_domain.STATISTICAL_CLASSIFICATION,
exp_domain.DEFAULT_OUTCOME_CLASSIFICATION,
])
UNRESOLVED_ANSWER_CLASSIFICATION_CATEGORIES = frozenset([
exp_domain.STATISTICAL_CLASSIFICATION,
exp_domain.DEFAULT_OUTCOME_CLASSIFICATION,
])
class _HashableAnswer(object):
"""Wraps answer with object that can be placed into sets and dicts."""
def __init__(self, answer):
self.answer = answer
self.hashable_answer = utils.get_hashable_value(answer)
def __hash__(self):
return hash(self.hashable_answer)
def __eq__(self, other):
if isinstance(other, _HashableAnswer):
return self.hashable_answer == other.hashable_answer
return False
def _get_top_answers_by_frequency(answers, limit=None):
"""Computes the number of occurrences of each answer, keeping only the top
limit answers, and returns an AnswerFrequencyList.
This method is run from within the context of a MapReduce job.
Args:
answers: iterable(*). The collection of answers to be tallied.
limit: int or None. The maximum number of answers to return. When None,
all answers are returned.
Returns:
stats_domain.AnswerFrequencyList. A list of the top "limit" answers.
"""
answer_counter = utils.OrderedCounter(_HashableAnswer(a) for a in answers)
return stats_domain.AnswerFrequencyList([
stats_domain.AnswerOccurrence(hashable_answer.answer, frequency)
for hashable_answer, frequency in answer_counter.most_common(n=limit)
])
def _get_top_unresolved_answers_by_frequency(
answers_with_classification, limit=None):
"""Computes the list of unresolved answers by keeping track of their latest
classification categorization and then computes the occurrences of each
unresolved answer, keeping only limit answers, and returns an
AnswerFrequencyList.
This method is run from within the context of a MapReduce job.
Args:
answers_with_classification: iterable(*). The collection of answers
with their corresponding classification categorization.
limit: int or None. The maximum number of answers to return. When None,
all answers are returned.
Returns:
stats_domain.AnswerFrequencyList. A list of the top "limit"
unresolved answers.
"""
classification_results_dict = {}
# The list of answers is sorted according to the time of answer submission.
# Thus following loop goes through the list and aggregates the most recent
# classification categorization of each answer.
for ans in answers_with_classification:
frequency = 0
if _HashableAnswer(ans['answer']) in classification_results_dict:
frequency = classification_results_dict[_HashableAnswer(
ans['answer'])]['frequency']
classification_results_dict[_HashableAnswer(ans['answer'])] = {
'classification_categorization': (
ans['classification_categorization']),
'frequency': frequency + 1
}
unresolved_answers_with_frequency_list = [{
'answer': ans.answer,
'frequency': val['frequency']
} for ans, val in classification_results_dict.iteritems() if val[
'classification_categorization'] in (
UNRESOLVED_ANSWER_CLASSIFICATION_CATEGORIES)]
unresolved_answers_with_frequency_list.sort(
key=lambda x: x['frequency'], reverse=True)
return stats_domain.AnswerFrequencyList([
stats_domain.AnswerOccurrence(item['answer'], item['frequency'])
for item in unresolved_answers_with_frequency_list[:limit]
])
class BaseCalculation(object):
"""Base calculation class.
This is the superclass for all calculations used to generate interaction
answer views.
"""
@property
def id(self):
"""The name of the class."""
return self.__class__.__name__
def calculate_from_state_answers_dict(self, state_answers_dict):
"""Perform calculation on a single StateAnswers entity. This is run in
the context of a batch MapReduce job.
This method must be overwritten in subclasses.
"""
raise NotImplementedError(
'Subclasses of BaseCalculation should implement the '
'calculate_from_state_answers_dict(state_answers_dict) method.')
class AnswerFrequencies(BaseCalculation):
"""Calculation for answers' frequencies (how often each answer was
submitted).
"""
def calculate_from_state_answers_dict(self, state_answers_dict):
"""Computes the number of occurrences of each answer, and returns a list
of dicts; each dict has keys 'answer' and 'frequency'.
This method is run from within the context of a MapReduce job.
"""
answer_dicts = state_answers_dict['submitted_answer_list']
answer_frequency_list = (
_get_top_answers_by_frequency(d['answer'] for d in answer_dicts))
return stats_domain.StateAnswersCalcOutput(
state_answers_dict['exploration_id'],
state_answers_dict['exploration_version'],
state_answers_dict['state_name'],
state_answers_dict['interaction_id'],
self.id,
answer_frequency_list)
class Top5AnswerFrequencies(BaseCalculation):
"""Calculation for the top 5 answers, by frequency."""
def calculate_from_state_answers_dict(self, state_answers_dict):
"""Computes the number of occurrences of each answer, keeping only the
top 5 answers, and returns a list of dicts; each dict has keys 'answer'
and 'frequency'.
This method is run from within the context of a MapReduce job.
"""
answer_dicts = state_answers_dict['submitted_answer_list']
answer_frequency_list = _get_top_answers_by_frequency(
(d['answer'] for d in answer_dicts), limit=5)
return stats_domain.StateAnswersCalcOutput(
state_answers_dict['exploration_id'],
state_answers_dict['exploration_version'],
state_answers_dict['state_name'],
state_answers_dict['interaction_id'],
self.id,
answer_frequency_list)
class Top10AnswerFrequencies(BaseCalculation):
"""Calculation for the top 10 answers, by frequency."""
def calculate_from_state_answers_dict(self, state_answers_dict):
"""Computes the number of occurrences of each answer, keeping only the
top 10 answers, and returns a list of dicts; each dict has keys 'answer'
and 'frequency'.
This method is run from within the context of a MapReduce job.
"""
answer_dicts = state_answers_dict['submitted_answer_list']
answer_frequency_list = _get_top_answers_by_frequency(
(d['answer'] for d in answer_dicts), limit=10)
return stats_domain.StateAnswersCalcOutput(
state_answers_dict['exploration_id'],
state_answers_dict['exploration_version'],
state_answers_dict['state_name'],
state_answers_dict['interaction_id'],
self.id,
answer_frequency_list)
class FrequencyCommonlySubmittedElements(BaseCalculation):
"""Calculation for determining the frequency of commonly submitted
individual answers among multiple set answers (such as of type
SetOfUnicodeString).
"""
def calculate_from_state_answers_dict(self, state_answers_dict):
"""Computes the number of occurrences of each individual answer across
all given answer sets, keeping only the top 10. Returns a list of dicts;
each dict has keys 'answer' and 'frequency'.
This method is run from within the context of a MapReduce job.
"""
answer_dicts = state_answers_dict['submitted_answer_list']
answer_frequency_list = _get_top_answers_by_frequency(
itertools.chain.from_iterable(d['answer'] for d in answer_dicts),
limit=10)
return stats_domain.StateAnswersCalcOutput(
state_answers_dict['exploration_id'],
state_answers_dict['exploration_version'],
state_answers_dict['state_name'],
state_answers_dict['interaction_id'],
self.id,
answer_frequency_list)
class TopAnswersByCategorization(BaseCalculation):
"""Calculation for the top answers by both frequency and respective
categorizations. The output from this calculation is one list for each
classification category, where each list is a ranked list of answers, by
frequency.
"""
def calculate_from_state_answers_dict(self, state_answers_dict):
"""Computes the number of occurrences of each answer, split into groups
based on the number of classification categories.
This method is run from within the context of a MapReduce job.
"""
grouped_submitted_answer_dicts = itertools.groupby(
state_answers_dict['submitted_answer_list'],
operator.itemgetter('classification_categorization'))
submitted_answers_by_categorization = collections.defaultdict(list)
for category, answer_dicts in grouped_submitted_answer_dicts:
if category in CLASSIFICATION_CATEGORIES:
submitted_answers_by_categorization[category].extend(
d['answer'] for d in answer_dicts)
categorized_answer_frequency_lists = (
stats_domain.CategorizedAnswerFrequencyLists({
category: _get_top_answers_by_frequency(categorized_answers)
for category, categorized_answers in
submitted_answers_by_categorization.iteritems()}))
return stats_domain.StateAnswersCalcOutput(
state_answers_dict['exploration_id'],
state_answers_dict['exploration_version'],
state_answers_dict['state_name'],
state_answers_dict['interaction_id'],
self.id,
categorized_answer_frequency_lists)
class TopNUnresolvedAnswersByFrequency(BaseCalculation):
"""Calculation for the top unresolved answers by frequency
The output from this calculation is a ranked list of unresolved answers,
in descending order of frequency.
"""
def calculate_from_state_answers_dict(self, state_answers_dict):
"""Filters unresolved answers and then computes the number of
occurrences of each unresolved answer.
This method is run within the context of a MapReduce job.
Args:
state_answers_dict: dict. A dict containing state answers and
exploration information such as:
* exploration_id: id of the exploration.
* exploration_version: Specific version of the exploration or
VERSION_ALL is used if answers are aggragated across
multiple versions.
* state_name: Name of the state.
* interaction_id: id of the interaction.
* submitted_answer_list: A list of submitted answers.
NOTE: The answers in this list must be sorted in
chronological order of their submission.
Returns:
stats_domain.StateAnswersCalcOutput. A calculation output object
containing the list of top unresolved answers, in descending
order of frequency (up to at most limit answers).
"""
answers_with_classification = [{
'answer': ans['answer'],
'classification_categorization': (
ans['classification_categorization'])
} for ans in state_answers_dict['submitted_answer_list']]
unresolved_answers = _get_top_unresolved_answers_by_frequency(
answers_with_classification,
limit=feconf.TOP_UNRESOLVED_ANSWERS_LIMIT)
return stats_domain.StateAnswersCalcOutput(
state_answers_dict['exploration_id'],
state_answers_dict['exploration_version'],
state_answers_dict['state_name'],
state_answers_dict['interaction_id'],
self.id,
unresolved_answers)
| |
# Copyright (c) 2014, Facebook, Inc. All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
"""Tools for dynamically compiling and loading thrift code."""
import distutils.spawn
import imp
import os.path
import tempfile
from six import iterkeys, iteritems
from sparts import ctx
from sparts.compat import OrderedDict, check_output
from sparts.fileutils import NamedTemporaryDirectory
def compile(path, root='.', debug=False, **kwargs):
"""Return a compiled thrift file module from `path`
Additional kwargs may be passed to indicate options to the thrift compiler:
- new_style [default:True]: Use new-style classes
- twisted [default:False]: Generated twisted-friendly bindings
- tornado [default:False]: Generate tornado-friendly bindings
- utf8strings [default:False]: Use unicode strings instead of native
- slots [default:True]: Use __slots__ in generated structs
"""
comp = CompileContext(root=root, debug=debug)
return comp.importThrift(path, **kwargs)
def get_executable():
"""Returns the thrift compiler path if found in the PATH, else None"""
path = distutils.spawn.find_executable('thrift1')
if path is None:
path = distutils.spawn.find_executable('thrift')
return path
def require_executable():
"""Assert that the thrift compiler is in the PATH. Returns the path"""
path = get_executable()
assert path is not None, 'Unable to find thrift compiler in PATH'
return path
class CompileContext(object):
def __init__(self, root='.', debug=False):
self.root = root
self.thrift_bin = require_executable()
self.include_dirs = OrderedDict()
self.dep_files = {}
self.dep_contents = {}
self.debug = debug
self.addIncludeDir(self.root)
def makeTemporaryIncludeDir(self):
d = NamedTemporaryDirectory(prefix='tsrc_')
if self.debug:
d.keep()
for k, v in iteritems(self.dep_contents):
d.writefile(k, v)
for k, v in iteritems(self.dep_files):
d.symlink(k, v)
return d
def makeIncludeArgs(self, temp_include_dir=None):
result = []
for k in iterkeys(self.include_dirs):
result += ['-I', k]
if temp_include_dir is not None:
result += ['-I', temp_include_dir.name]
return result
def getThriftOptions(self, new_style=True, twisted=False, tornado=False,
utf8strings=False, slots=True, dynamic=False,
dynbase=None, dynexc=None, dynimport=None):
param = 'py'
options = []
if new_style:
options.append('new_style')
if twisted:
options.append('twisted')
assert not tornado
if tornado:
options.append('tornado')
if utf8strings:
options.append('utf8strings')
if slots:
options.append('slots')
# TODO: Dynamic import jonx
if len(options):
param += ':' + ','.join(options)
return param
def addIncludeDir(self, path):
assert os.path.exists(path) and os.path.isdir(path)
self.include_dirs[os.path.abspath(path)] = True
def addDependentFilePath(self, path):
assert os.path.exists(path)
self.dep_files[os.path.basename(path)] = os.path.abspath(path)
path = os.path.dirname(path) or '.'
self.addIncludeDir(path)
def addDependentFileContents(self, name, contents):
self.dep_contents[name] = contents
def importThriftStr(self, payload, **kwargs):
"""Compiles a thrift file from string `payload`"""
with tempfile.NamedTemporaryFile(suffix='.thrift', mode='w') as f:
if self.debug:
f.delete = False
f.write(payload)
f.flush()
return self.importThrift(f.name, **kwargs)
def importThrift(self, path, **kwargs):
"""Compiles a .thrift file, importing its contents into its return value"""
path = os.path.abspath(path)
assert os.path.exists(path)
assert os.path.isfile(path)
srcdir = self.makeTemporaryIncludeDir()
pathbase = os.path.basename(path)
srcdir.symlink(pathbase, path)
outdir = NamedTemporaryDirectory(prefix='to1_')
outdir_recurse = NamedTemporaryDirectory(prefix='tor_')
if self.debug:
outdir.keep()
outdir_recurse.keep()
args = [self.thrift_bin] + self.makeIncludeArgs(srcdir) + \
["--gen", self.getThriftOptions(**kwargs), '-v',
"-o", outdir.name, srcdir.join(pathbase)]
check_output(args)
args = [self.thrift_bin] + self.makeIncludeArgs(srcdir) + \
["--gen", self.getThriftOptions(**kwargs), '-v', '-r',
"-o", outdir_recurse.name, srcdir.join(pathbase)]
check_output(args)
# Prepend output directory to the path
with ctx.add_path(outdir_recurse.join('gen-py'), 0):
thriftname = os.path.splitext(pathbase)[0]
for dirpath, dirnames, filenames in os.walk(outdir.join('gen-py')):
# Emulate relative imports badly
dirpath = os.path.abspath(outdir.join('gen-py', dirpath))
with ctx.add_path(dirpath):
# Add types to module first
if 'ttypes.py' in filenames:
ttypes = self.importPython(dirpath + '/ttypes.py')
result = ttypes
filenames.remove('ttypes.py')
# Then constants
if 'constants.py' in filenames:
result = self.mergeModules(
self.importPython(dirpath + '/constants.py'),
result)
filenames.remove('constants.py')
for filename in filenames:
# Skip pyremotes
if not filename.endswith('.py') or \
filename == '__init__.py':
continue
# Attach services as attributes on the module.
svcpath = dirpath + '/' + filename
svcname = os.path.splitext(filename)[0]
svcmod = self.importPython(svcpath)
svcmod.__file__ = os.path.abspath(svcpath)
svcmod.__name__ = '%s.%s (generated)' % \
(thriftname, svcname)
setattr(result, svcname, svcmod)
assert result is not None, "No files generated by %s" % (path, )
# Set the __file__ attribute to the .thrift file instead
# of the dynamically generated jonx
result.__file__ = os.path.abspath(path)
result.__name__ = thriftname + " (generated)"
return result
def mergeModules(self, module1, module2):
if module1 is None:
return module2
if module2 is None:
return module1
for k in dir(module2):
setattr(module1, k, getattr(module2, k))
return module1
def importPython(self, path):
"""Create a new module from code at `path`.
Does not pollute python's module cache"""
assert os.path.exists(path)
# Any special variables we want to include in execution context
orig_locals = {}
exec_locals = orig_locals.copy()
# Keep a copy of the module cache prior to execution
with ctx.module_snapshot():
execfile(path, exec_locals, exec_locals)
# Generate a new module object, and assign the modified locals
# as attributes on it.
result = imp.new_module(path)
for k, v in iteritems(exec_locals):
setattr(result, k, v)
return result
| |
import numpy as np
from copy import deepcopy
from pycqed.measurement.waveform_control.block import Block
from pycqed.measurement.waveform_control import sequence
from pycqed.measurement.waveform_control import pulsar as ps
from pycqed.measurement.pulse_sequences.single_qubit_tek_seq_elts import \
sweep_pulse_params, add_preparation_pulses, pulse_list_list_seq
from pycqed.measurement.pulse_sequences.multi_qubit_tek_seq_elts import \
generate_mux_ro_pulse_list
import logging
log = logging.getLogger(__name__)
def get_pulse_dict_from_pars(pulse_pars):
'''
Returns a dictionary containing pulse_pars for all the primitive pulses
based on a single set of pulse_pars.
Using this function deepcopies the pulse parameters preventing accidently
editing the input dictionary.
input args:
pulse_pars: dictionary containing pulse_parameters
return:
pulses: dictionary of pulse_pars dictionaries
'''
pi_amp = pulse_pars['amplitude']
pi2_amp = pulse_pars['amplitude']*pulse_pars['amp90_scale']
pulses = {'I': deepcopy(pulse_pars),
'X180': deepcopy(pulse_pars),
'mX180': deepcopy(pulse_pars),
'X90': deepcopy(pulse_pars),
'mX90': deepcopy(pulse_pars),
'Y180': deepcopy(pulse_pars),
'mY180': deepcopy(pulse_pars),
'Y90': deepcopy(pulse_pars),
'mY90': deepcopy(pulse_pars)}
pulses['I']['amplitude'] = 0
pulses['mX180']['amplitude'] = -pi_amp
pulses['X90']['amplitude'] = pi2_amp
pulses['mX90']['amplitude'] = -pi2_amp
pulses['Y180']['phase'] = 90
pulses['mY180']['phase'] = 90
pulses['mY180']['amplitude'] = -pi_amp
pulses['Y90']['amplitude'] = pi2_amp
pulses['Y90']['phase'] = 90
pulses['mY90']['amplitude'] = -pi2_amp
pulses['mY90']['phase'] = 90
return pulses
def Ramsey_with_flux_pulse_meas_seq(thetas, qb, X90_separation, verbose=False,
upload=True, return_seq=False,
cal_points=False):
'''
Performs a Ramsey with interleaved Flux pulse
Timings of sequence
<----- |fluxpulse|
|X90| ------------------- |X90| --- |RO|
sweep phase
timing of the flux pulse relative to the center of the first X90 pulse
Args:
thetas: numpy array of phase shifts for the second pi/2 pulse
qb: qubit object (must have the methods get_operation_dict(),
get_drive_pars() etc.
X90_separation: float (separation of the two pi/2 pulses for Ramsey
verbose: bool
upload: bool
return_seq: bool
Returns:
if return_seq:
seq: qcodes sequence
el_list: list of pulse elements
else:
seq_name: string
'''
raise NotImplementedError(
'Ramsey_with_flux_pulse_meas_seq has not been '
'converted to the latest waveform generation code and can not be used.')
qb_name = qb.name
operation_dict = qb.get_operation_dict()
pulse_pars = qb.get_drive_pars()
RO_pars = qb.get_RO_pars()
seq_name = 'Measurement_Ramsey_sequence_with_Flux_pulse'
seq = sequence.Sequence(seq_name)
el_list = []
pulses = get_pulse_dict_from_pars(pulse_pars)
flux_pulse = operation_dict["flux "+qb_name]
# Used for checking dynamic phase compensation
# if flux_pulse['amplitude'] != 0:
# flux_pulse['basis_rotation'] = {qb_name: -80.41028958782647}
flux_pulse['ref_point'] = 'end'
X90_2 = deepcopy(pulses['X90'])
X90_2['pulse_delay'] = X90_separation - flux_pulse['pulse_delay'] \
- X90_2['nr_sigma']*X90_2['sigma']
X90_2['ref_point'] = 'start'
for i, theta in enumerate(thetas):
X90_2['phase'] = theta*180/np.pi
if cal_points and (i == (len(thetas)-4) or i == (len(thetas)-3)):
el = multi_pulse_elt(i, station, [RO_pars])
elif cal_points and (i == (len(thetas)-2) or i == (len(thetas)-1)):
flux_pulse['amplitude'] = 0
el = multi_pulse_elt(i, station,
[pulses['X90'], flux_pulse, X90_2, RO_pars])
else:
el = multi_pulse_elt(i, station,
[pulses['X90'], flux_pulse, X90_2, RO_pars])
el_list.append(el)
seq.append_element(el, trigger_wait=True)
if upload:
station.pulsar.program_awgs(seq, *el_list, verbose=verbose)
if return_seq:
return seq, el_list
else:
return seq_name
def fluxpulse_scope_sequence(
delays, freqs, qb_name, operation_dict, cz_pulse_name,
ro_pulse_delay=None, cal_points=None, prep_params=None, upload=True):
'''
Performs X180 pulse on top of a fluxpulse
Timings of sequence
| ---------- |X180| ---------------------------- |RO|
| --- | --------- fluxpulse ---------- |
<- delay ->
:param ro_pulse_delay: Can be 'auto' to start out the readout after
the end of the flux pulse or a delay in seconds to start a fixed
amount of time after the drive pulse. If not provided or set to
None, a default fixed delay of 100e-9 is used.
'''
if prep_params is None:
prep_params = {}
if ro_pulse_delay is None:
ro_pulse_delay = 100e-9
seq_name = 'Fluxpulse_scope_sequence'
ge_pulse = deepcopy(operation_dict['X180 ' + qb_name])
ge_pulse['name'] = 'FPS_Pi'
ge_pulse['element_name'] = 'FPS_Pi_el'
flux_pulse = deepcopy(operation_dict[cz_pulse_name])
flux_pulse['name'] = 'FPS_Flux'
flux_pulse['ref_pulse'] = 'FPS_Pi'
flux_pulse['ref_point'] = 'middle'
flux_pulse_delays = -np.asarray(delays) - flux_pulse.get(
'buffer_length_start', 0)
ro_pulse = deepcopy(operation_dict['RO ' + qb_name])
ro_pulse['name'] = 'FPS_Ro'
ro_pulse['ref_pulse'] = 'FPS_Pi'
ro_pulse['ref_point'] = 'end'
ro_pulse['pulse_delay'] = ro_pulse_delay
if ro_pulse_delay == 'auto':
ro_pulse['ref_point'] = 'middle'
ro_pulse['pulse_delay'] = \
flux_pulse['pulse_length'] - np.min(delays) + \
flux_pulse.get('buffer_length_end', 0) + \
flux_pulse.get('trans_length', 0)
pulses = [ge_pulse, flux_pulse, ro_pulse]
swept_pulses = sweep_pulse_params(
pulses, {'FPS_Flux.pulse_delay': flux_pulse_delays})
swept_pulses_with_prep = \
[add_preparation_pulses(p, operation_dict, [qb_name], **prep_params)
for p in swept_pulses]
seq = pulse_list_list_seq(swept_pulses_with_prep, seq_name, upload=False)
if cal_points is not None:
# add calibration segments
seq.extend(cal_points.create_segments(operation_dict, **prep_params))
seq.repeat_ro(f"RO {qb_name}", operation_dict)
log.debug(seq)
if upload:
ps.Pulsar.get_instance().program_awgs(seq)
return seq, np.arange(seq.n_acq_elements()), freqs
def fluxpulse_amplitude_sequence(amplitudes,
freqs,
qb_name,
operation_dict,
cz_pulse_name,
delay=None,
cal_points=None,
prep_params=None,
upload=True):
'''
Performs X180 pulse on top of a fluxpulse
Timings of sequence
| ---------- |X180| ------------------------ |RO|
| --- | --------- fluxpulse ---------- |
'''
if prep_params is None:
prep_params = {}
seq_name = 'Fluxpulse_amplitude_sequence'
ge_pulse = deepcopy(operation_dict['X180 ' + qb_name])
ge_pulse['name'] = 'FPA_Pi'
ge_pulse['element_name'] = 'FPA_Pi_el'
flux_pulse = deepcopy(operation_dict[cz_pulse_name])
flux_pulse['name'] = 'FPA_Flux'
flux_pulse['ref_pulse'] = 'FPA_Pi'
flux_pulse['ref_point'] = 'middle'
if delay is None:
delay = flux_pulse['pulse_length'] / 2
flux_pulse['pulse_delay'] = -flux_pulse.get('buffer_length_start',
0) - delay
ro_pulse = deepcopy(operation_dict['RO ' + qb_name])
ro_pulse['name'] = 'FPA_Ro'
ro_pulse['ref_pulse'] = 'FPA_Pi'
ro_pulse['ref_point'] = 'middle'
ro_pulse['pulse_delay'] = flux_pulse['pulse_length'] - delay + \
flux_pulse.get('buffer_length_end', 0) + \
flux_pulse.get('trans_length', 0)
pulses = [ge_pulse, flux_pulse, ro_pulse]
swept_pulses = sweep_pulse_params(pulses,
{'FPA_Flux.amplitude': amplitudes})
swept_pulses_with_prep = \
[add_preparation_pulses(p, operation_dict, [qb_name], **prep_params)
for p in swept_pulses]
seq = pulse_list_list_seq(swept_pulses_with_prep, seq_name, upload=False)
if cal_points is not None:
# add calibration segments
seq.extend(cal_points.create_segments(operation_dict, **prep_params))
log.debug(seq)
if upload:
ps.Pulsar.get_instance().program_awgs(seq)
return seq, np.arange(seq.n_acq_elements()), freqs
def T2_freq_sweep_seq(amplitudes,
qb_name,
operation_dict,
cz_pulse_name,
flux_lengths,
phases,
cal_points=None,
upload=True):
'''
Performs a X180 pulse before changing the qubit frequency with the flux
Timings of sequence
| ---|X180| ------------------------------|RO|
| --------| --------- fluxpulse ---------- |
'''
len_amp = len(amplitudes)
len_flux = len(flux_lengths)
len_phase = len(phases)
amplitudes = np.repeat(amplitudes, len_flux * len_phase)
flux_lengths = np.tile(np.repeat(flux_lengths, len_phase), len_amp)
phases = np.tile(phases, len_flux * len_amp)
seq_name = 'T2_freq_sweep_seq'
ge_pulse = deepcopy(operation_dict['X90 ' + qb_name])
ge_pulse['name'] = 'DF_X90'
ge_pulse['element_name'] = 'DF_X90_el'
flux_pulse = deepcopy(operation_dict[cz_pulse_name])
flux_pulse['name'] = 'DF_Flux'
flux_pulse['ref_pulse'] = 'DF_X90'
flux_pulse['ref_point'] = 'end'
flux_pulse['pulse_delay'] = 0 #-flux_pulse.get('buffer_length_start', 0)
ge_pulse2 = deepcopy(operation_dict['X90 ' + qb_name])
ge_pulse2['name'] = 'DF_X90_2'
ge_pulse2['ref_pulse'] = 'DF_Flux'
ge_pulse2['ref_point'] = 'end'
ge_pulse2['pulse_delay'] = 0
ge_pulse2['element_name'] = 'DF_X90_el'
ro_pulse = deepcopy(operation_dict['RO ' + qb_name])
ro_pulse['name'] = 'DF_Ro'
ro_pulse['ref_pulse'] = 'DF_X90_2'
ro_pulse['ref_point'] = 'end'
ro_pulse['pulse_delay'] = 0
pulses = [ge_pulse, flux_pulse, ge_pulse2, ro_pulse]
swept_pulses = sweep_pulse_params(
pulses, {
'DF_Flux.amplitude': amplitudes,
'DF_Flux.pulse_length': flux_lengths,
'DF_X90_2.phase': phases
})
seq = pulse_list_list_seq(swept_pulses, seq_name, upload=False)
if cal_points is not None:
# add calibration segments
seq.extend(cal_points.create_segments(operation_dict))
seq.repeat_ro('RO ' + qb_name, operation_dict)
if upload:
ps.Pulsar.get_instance().program_awgs(seq)
return seq, np.arange(seq.n_acq_elements())
def T1_freq_sweep_seq(amplitudes,
qb_name,
operation_dict,
cz_pulse_name,
flux_lengths,
cal_points=None,
upload=True,
prep_params=None):
'''
Performs a X180 pulse before changing the qubit frequency with the flux
Timings of sequence
| ---|X180| ------------------------------|RO|
| --------| --------- fluxpulse ---------- |
'''
if prep_params is None:
prep_params = {}
len_amp = len(amplitudes)
amplitudes = np.repeat(amplitudes, len(flux_lengths))
flux_lengths = np.tile(flux_lengths, len_amp)
seq_name = 'T1_freq_sweep_sequence'
ge_pulse = deepcopy(operation_dict['X180 ' + qb_name])
ge_pulse['name'] = 'DF_Pi'
ge_pulse['element_name'] = 'DF_Pi_el'
flux_pulse = deepcopy(operation_dict[cz_pulse_name])
flux_pulse['name'] = 'DF_Flux'
flux_pulse['ref_pulse'] = 'DF_Pi'
flux_pulse['ref_point'] = 'end'
flux_pulse['pulse_delay'] = 0 #-flux_pulse.get('buffer_length_start', 0)
ro_pulse = deepcopy(operation_dict['RO ' + qb_name])
ro_pulse['name'] = 'DF_Ro'
ro_pulse['ref_pulse'] = 'DF_Flux'
ro_pulse['ref_point'] = 'end'
ro_pulse['pulse_delay'] = flux_pulse.get('buffer_length_end', 0)
pulses = [ge_pulse, flux_pulse, ro_pulse]
swept_pulses = sweep_pulse_params(pulses, {
'DF_Flux.amplitude': amplitudes,
'DF_Flux.pulse_length': flux_lengths
})
swept_pulses_with_prep = \
[add_preparation_pulses(p, operation_dict, [qb_name], **prep_params)
for p in swept_pulses]
seq = pulse_list_list_seq(swept_pulses_with_prep, seq_name, upload=False)
if cal_points is not None:
# add calibration segments
seq.extend(cal_points.create_segments(operation_dict, **prep_params))
seq.repeat_ro('RO ' + qb_name, operation_dict)
if upload:
ps.Pulsar.get_instance().program_awgs(seq)
return seq, np.arange(seq.n_acq_elements())
def add_suffix(operation_list, suffix):
return [op + suffix for op in operation_list]
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Wrapper for dbghelp.dll in ctypes.
"""
__revision__ = "$Id$"
from winappdbg.win32.defines import *
from winappdbg.win32.version import *
from winappdbg.win32.kernel32 import *
# DbgHelp versions and features list:
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms679294(v=vs.85).aspx
#------------------------------------------------------------------------------
# Tries to load the newest version of dbghelp.dll if available.
def _load_latest_dbghelp_dll():
from os import getenv
from os.path import join, exists
program_files_location = getenv("ProgramFiles")
if not program_files_location:
program_files_location = "C:\Program Files"
program_files_x86_location = getenv("ProgramFiles(x86)")
if arch == ARCH_AMD64:
if wow64:
pathname = join(
program_files_x86_location or program_files_location,
"Debugging Tools for Windows (x86)",
"dbghelp.dll")
else:
pathname = join(
program_files_location,
"Debugging Tools for Windows (x64)",
"dbghelp.dll")
elif arch == ARCH_I386:
pathname = join(
program_files_location,
"Debugging Tools for Windows (x86)",
"dbghelp.dll")
else:
pathname = None
if pathname and exists(pathname):
try:
_dbghelp = ctypes.windll.LoadLibrary(pathname)
ctypes.windll.dbghelp = _dbghelp
except Exception:
pass
_load_latest_dbghelp_dll()
# Recover the old binding of the "os" symbol.
# XXX FIXME not sure if I really need to do this!
##from version import os
#------------------------------------------------------------------------------
#==============================================================================
# This is used later on to calculate the list of exported symbols.
_all = None
_all = set(vars().keys())
#==============================================================================
# SymGetHomeDirectory "type" values
hdBase = 0
hdSym = 1
hdSrc = 2
UNDNAME_32_BIT_DECODE = 0x0800
UNDNAME_COMPLETE = 0x0000
UNDNAME_NAME_ONLY = 0x1000
UNDNAME_NO_ACCESS_SPECIFIERS = 0x0080
UNDNAME_NO_ALLOCATION_LANGUAGE = 0x0010
UNDNAME_NO_ALLOCATION_MODEL = 0x0008
UNDNAME_NO_ARGUMENTS = 0x2000
UNDNAME_NO_CV_THISTYPE = 0x0040
UNDNAME_NO_FUNCTION_RETURNS = 0x0004
UNDNAME_NO_LEADING_UNDERSCORES = 0x0001
UNDNAME_NO_MEMBER_TYPE = 0x0200
UNDNAME_NO_MS_KEYWORDS = 0x0002
UNDNAME_NO_MS_THISTYPE = 0x0020
UNDNAME_NO_RETURN_UDT_MODEL = 0x0400
UNDNAME_NO_SPECIAL_SYMS = 0x4000
UNDNAME_NO_THISTYPE = 0x0060
UNDNAME_NO_THROW_SIGNATURES = 0x0100
#--- IMAGEHLP_MODULE structure and related ------------------------------------
SYMOPT_ALLOW_ABSOLUTE_SYMBOLS = 0x00000800
SYMOPT_ALLOW_ZERO_ADDRESS = 0x01000000
SYMOPT_AUTO_PUBLICS = 0x00010000
SYMOPT_CASE_INSENSITIVE = 0x00000001
SYMOPT_DEBUG = 0x80000000
SYMOPT_DEFERRED_LOADS = 0x00000004
SYMOPT_DISABLE_SYMSRV_AUTODETECT = 0x02000000
SYMOPT_EXACT_SYMBOLS = 0x00000400
SYMOPT_FAIL_CRITICAL_ERRORS = 0x00000200
SYMOPT_FAVOR_COMPRESSED = 0x00800000
SYMOPT_FLAT_DIRECTORY = 0x00400000
SYMOPT_IGNORE_CVREC = 0x00000080
SYMOPT_IGNORE_IMAGEDIR = 0x00200000
SYMOPT_IGNORE_NT_SYMPATH = 0x00001000
SYMOPT_INCLUDE_32BIT_MODULES = 0x00002000
SYMOPT_LOAD_ANYTHING = 0x00000040
SYMOPT_LOAD_LINES = 0x00000010
SYMOPT_NO_CPP = 0x00000008
SYMOPT_NO_IMAGE_SEARCH = 0x00020000
SYMOPT_NO_PROMPTS = 0x00080000
SYMOPT_NO_PUBLICS = 0x00008000
SYMOPT_NO_UNQUALIFIED_LOADS = 0x00000100
SYMOPT_OVERWRITE = 0x00100000
SYMOPT_PUBLICS_ONLY = 0x00004000
SYMOPT_SECURE = 0x00040000
SYMOPT_UNDNAME = 0x00000002
##SSRVOPT_DWORD
##SSRVOPT_DWORDPTR
##SSRVOPT_GUIDPTR
##
##SSRVOPT_CALLBACK
##SSRVOPT_DOWNSTREAM_STORE
##SSRVOPT_FLAT_DEFAULT_STORE
##SSRVOPT_FAVOR_COMPRESSED
##SSRVOPT_NOCOPY
##SSRVOPT_OVERWRITE
##SSRVOPT_PARAMTYPE
##SSRVOPT_PARENTWIN
##SSRVOPT_PROXY
##SSRVOPT_RESET
##SSRVOPT_SECURE
##SSRVOPT_SETCONTEXT
##SSRVOPT_TRACE
##SSRVOPT_UNATTENDED
# typedef enum
# {
# SymNone = 0,
# SymCoff,
# SymCv,
# SymPdb,
# SymExport,
# SymDeferred,
# SymSym,
# SymDia,
# SymVirtual,
# NumSymTypes
# } SYM_TYPE;
SymNone = 0
SymCoff = 1
SymCv = 2
SymPdb = 3
SymExport = 4
SymDeferred = 5
SymSym = 6
SymDia = 7
SymVirtual = 8
NumSymTypes = 9
# typedef struct _IMAGEHLP_MODULE64 {
# DWORD SizeOfStruct;
# DWORD64 BaseOfImage;
# DWORD ImageSize;
# DWORD TimeDateStamp;
# DWORD CheckSum;
# DWORD NumSyms;
# SYM_TYPE SymType;
# TCHAR ModuleName[32];
# TCHAR ImageName[256];
# TCHAR LoadedImageName[256];
# TCHAR LoadedPdbName[256];
# DWORD CVSig;
# TCHAR CVData[MAX_PATH*3];
# DWORD PdbSig;
# GUID PdbSig70;
# DWORD PdbAge;
# BOOL PdbUnmatched;
# BOOL DbgUnmatched;
# BOOL LineNumbers;
# BOOL GlobalSymbols;
# BOOL TypeInfo;
# BOOL SourceIndexed;
# BOOL Publics;
# } IMAGEHLP_MODULE64, *PIMAGEHLP_MODULE64;
class IMAGEHLP_MODULE (Structure):
_fields_ = [
("SizeOfStruct", DWORD),
("BaseOfImage", DWORD),
("ImageSize", DWORD),
("TimeDateStamp", DWORD),
("CheckSum", DWORD),
("NumSyms", DWORD),
("SymType", DWORD), # SYM_TYPE
("ModuleName", CHAR * 32),
("ImageName", CHAR * 256),
("LoadedImageName", CHAR * 256),
]
PIMAGEHLP_MODULE = POINTER(IMAGEHLP_MODULE)
class IMAGEHLP_MODULE64 (Structure):
_fields_ = [
("SizeOfStruct", DWORD),
("BaseOfImage", DWORD64),
("ImageSize", DWORD),
("TimeDateStamp", DWORD),
("CheckSum", DWORD),
("NumSyms", DWORD),
("SymType", DWORD), # SYM_TYPE
("ModuleName", CHAR * 32),
("ImageName", CHAR * 256),
("LoadedImageName", CHAR * 256),
("LoadedPdbName", CHAR * 256),
("CVSig", DWORD),
("CVData", CHAR * (MAX_PATH * 3)),
("PdbSig", DWORD),
("PdbSig70", GUID),
("PdbAge", DWORD),
("PdbUnmatched", BOOL),
("DbgUnmatched", BOOL),
("LineNumbers", BOOL),
("GlobalSymbols", BOOL),
("TypeInfo", BOOL),
("SourceIndexed", BOOL),
("Publics", BOOL),
]
PIMAGEHLP_MODULE64 = POINTER(IMAGEHLP_MODULE64)
class IMAGEHLP_MODULEW (Structure):
_fields_ = [
("SizeOfStruct", DWORD),
("BaseOfImage", DWORD),
("ImageSize", DWORD),
("TimeDateStamp", DWORD),
("CheckSum", DWORD),
("NumSyms", DWORD),
("SymType", DWORD), # SYM_TYPE
("ModuleName", WCHAR * 32),
("ImageName", WCHAR * 256),
("LoadedImageName", WCHAR * 256),
]
PIMAGEHLP_MODULEW = POINTER(IMAGEHLP_MODULEW)
class IMAGEHLP_MODULEW64 (Structure):
_fields_ = [
("SizeOfStruct", DWORD),
("BaseOfImage", DWORD64),
("ImageSize", DWORD),
("TimeDateStamp", DWORD),
("CheckSum", DWORD),
("NumSyms", DWORD),
("SymType", DWORD), # SYM_TYPE
("ModuleName", WCHAR * 32),
("ImageName", WCHAR * 256),
("LoadedImageName", WCHAR * 256),
("LoadedPdbName", WCHAR * 256),
("CVSig", DWORD),
("CVData", WCHAR * (MAX_PATH * 3)),
("PdbSig", DWORD),
("PdbSig70", GUID),
("PdbAge", DWORD),
("PdbUnmatched", BOOL),
("DbgUnmatched", BOOL),
("LineNumbers", BOOL),
("GlobalSymbols", BOOL),
("TypeInfo", BOOL),
("SourceIndexed", BOOL),
("Publics", BOOL),
]
PIMAGEHLP_MODULEW64 = POINTER(IMAGEHLP_MODULEW64)
#--- dbghelp.dll --------------------------------------------------------------
# XXX the ANSI versions of these functions don't end in "A" as expected!
# BOOL WINAPI MakeSureDirectoryPathExists(
# _In_ PCSTR DirPath
# );
def MakeSureDirectoryPathExistsA(DirPath):
_MakeSureDirectoryPathExists = windll.dbghelp.MakeSureDirectoryPathExists
_MakeSureDirectoryPathExists.argtypes = [LPSTR]
_MakeSureDirectoryPathExists.restype = bool
_MakeSureDirectoryPathExists.errcheck = RaiseIfZero
return _MakeSureDirectoryPathExists(DirPath)
MakeSureDirectoryPathExistsW = MakeWideVersion(MakeSureDirectoryPathExistsA)
MakeSureDirectoryPathExists = GuessStringType(MakeSureDirectoryPathExistsA, MakeSureDirectoryPathExistsW)
# BOOL WINAPI SymInitialize(
# __in HANDLE hProcess,
# __in_opt PCTSTR UserSearchPath,
# __in BOOL fInvadeProcess
# );
def SymInitializeA(hProcess, UserSearchPath = None, fInvadeProcess = False):
_SymInitialize = windll.dbghelp.SymInitialize
_SymInitialize.argtypes = [HANDLE, LPSTR, BOOL]
_SymInitialize.restype = bool
_SymInitialize.errcheck = RaiseIfZero
if not UserSearchPath:
UserSearchPath = None
_SymInitialize(hProcess, UserSearchPath, fInvadeProcess)
SymInitializeW = MakeWideVersion(SymInitializeA)
SymInitialize = GuessStringType(SymInitializeA, SymInitializeW)
# BOOL WINAPI SymCleanup(
# __in HANDLE hProcess
# );
def SymCleanup(hProcess):
_SymCleanup = windll.dbghelp.SymCleanup
_SymCleanup.argtypes = [HANDLE]
_SymCleanup.restype = bool
_SymCleanup.errcheck = RaiseIfZero
_SymCleanup(hProcess)
# BOOL WINAPI SymRefreshModuleList(
# __in HANDLE hProcess
# );
def SymRefreshModuleList(hProcess):
_SymRefreshModuleList = windll.dbghelp.SymRefreshModuleList
_SymRefreshModuleList.argtypes = [HANDLE]
_SymRefreshModuleList.restype = bool
_SymRefreshModuleList.errcheck = RaiseIfZero
_SymRefreshModuleList(hProcess)
# BOOL WINAPI SymSetParentWindow(
# __in HWND hwnd
# );
def SymSetParentWindow(hwnd):
_SymSetParentWindow = windll.dbghelp.SymSetParentWindow
_SymSetParentWindow.argtypes = [HWND]
_SymSetParentWindow.restype = bool
_SymSetParentWindow.errcheck = RaiseIfZero
_SymSetParentWindow(hwnd)
# DWORD WINAPI SymSetOptions(
# __in DWORD SymOptions
# );
def SymSetOptions(SymOptions):
_SymSetOptions = windll.dbghelp.SymSetOptions
_SymSetOptions.argtypes = [DWORD]
_SymSetOptions.restype = DWORD
_SymSetOptions.errcheck = RaiseIfZero
_SymSetOptions(SymOptions)
# DWORD WINAPI SymGetOptions(void);
def SymGetOptions():
_SymGetOptions = windll.dbghelp.SymGetOptions
_SymGetOptions.argtypes = []
_SymGetOptions.restype = DWORD
return _SymGetOptions()
# DWORD WINAPI SymLoadModule(
# __in HANDLE hProcess,
# __in_opt HANDLE hFile,
# __in_opt PCSTR ImageName,
# __in_opt PCSTR ModuleName,
# __in DWORD BaseOfDll,
# __in DWORD SizeOfDll
# );
def SymLoadModuleA(hProcess, hFile = None, ImageName = None, ModuleName = None, BaseOfDll = None, SizeOfDll = None):
_SymLoadModule = windll.dbghelp.SymLoadModule
_SymLoadModule.argtypes = [HANDLE, HANDLE, LPSTR, LPSTR, DWORD, DWORD]
_SymLoadModule.restype = DWORD
if not ImageName:
ImageName = None
if not ModuleName:
ModuleName = None
if not BaseOfDll:
BaseOfDll = 0
if not SizeOfDll:
SizeOfDll = 0
SetLastError(ERROR_SUCCESS)
lpBaseAddress = _SymLoadModule(hProcess, hFile, ImageName, ModuleName, BaseOfDll, SizeOfDll)
if lpBaseAddress == NULL:
dwErrorCode = GetLastError()
if dwErrorCode != ERROR_SUCCESS:
raise ctypes.WinError(dwErrorCode)
return lpBaseAddress
SymLoadModuleW = MakeWideVersion(SymLoadModuleA)
SymLoadModule = GuessStringType(SymLoadModuleA, SymLoadModuleW)
# DWORD64 WINAPI SymLoadModule64(
# __in HANDLE hProcess,
# __in_opt HANDLE hFile,
# __in_opt PCSTR ImageName,
# __in_opt PCSTR ModuleName,
# __in DWORD64 BaseOfDll,
# __in DWORD SizeOfDll
# );
def SymLoadModule64A(hProcess, hFile = None, ImageName = None, ModuleName = None, BaseOfDll = None, SizeOfDll = None):
_SymLoadModule64 = windll.dbghelp.SymLoadModule64
_SymLoadModule64.argtypes = [HANDLE, HANDLE, LPSTR, LPSTR, DWORD64, DWORD]
_SymLoadModule64.restype = DWORD64
if not ImageName:
ImageName = None
if not ModuleName:
ModuleName = None
if not BaseOfDll:
BaseOfDll = 0
if not SizeOfDll:
SizeOfDll = 0
SetLastError(ERROR_SUCCESS)
lpBaseAddress = _SymLoadModule64(hProcess, hFile, ImageName, ModuleName, BaseOfDll, SizeOfDll)
if lpBaseAddress == NULL:
dwErrorCode = GetLastError()
if dwErrorCode != ERROR_SUCCESS:
raise ctypes.WinError(dwErrorCode)
return lpBaseAddress
SymLoadModule64W = MakeWideVersion(SymLoadModule64A)
SymLoadModule64 = GuessStringType(SymLoadModule64A, SymLoadModule64W)
# BOOL WINAPI SymUnloadModule(
# __in HANDLE hProcess,
# __in DWORD BaseOfDll
# );
def SymUnloadModule(hProcess, BaseOfDll):
_SymUnloadModule = windll.dbghelp.SymUnloadModule
_SymUnloadModule.argtypes = [HANDLE, DWORD]
_SymUnloadModule.restype = bool
_SymUnloadModule.errcheck = RaiseIfZero
_SymUnloadModule(hProcess, BaseOfDll)
# BOOL WINAPI SymUnloadModule64(
# __in HANDLE hProcess,
# __in DWORD64 BaseOfDll
# );
def SymUnloadModule64(hProcess, BaseOfDll):
_SymUnloadModule64 = windll.dbghelp.SymUnloadModule64
_SymUnloadModule64.argtypes = [HANDLE, DWORD64]
_SymUnloadModule64.restype = bool
_SymUnloadModule64.errcheck = RaiseIfZero
_SymUnloadModule64(hProcess, BaseOfDll)
# BOOL WINAPI SymGetModuleInfo(
# __in HANDLE hProcess,
# __in DWORD dwAddr,
# __out PIMAGEHLP_MODULE ModuleInfo
# );
def SymGetModuleInfoA(hProcess, dwAddr):
_SymGetModuleInfo = windll.dbghelp.SymGetModuleInfo
_SymGetModuleInfo.argtypes = [HANDLE, DWORD, PIMAGEHLP_MODULE]
_SymGetModuleInfo.restype = bool
_SymGetModuleInfo.errcheck = RaiseIfZero
ModuleInfo = IMAGEHLP_MODULE()
ModuleInfo.SizeOfStruct = sizeof(ModuleInfo)
_SymGetModuleInfo(hProcess, dwAddr, byref(ModuleInfo))
return ModuleInfo
def SymGetModuleInfoW(hProcess, dwAddr):
_SymGetModuleInfoW = windll.dbghelp.SymGetModuleInfoW
_SymGetModuleInfoW.argtypes = [HANDLE, DWORD, PIMAGEHLP_MODULEW]
_SymGetModuleInfoW.restype = bool
_SymGetModuleInfoW.errcheck = RaiseIfZero
ModuleInfo = IMAGEHLP_MODULEW()
ModuleInfo.SizeOfStruct = sizeof(ModuleInfo)
_SymGetModuleInfoW(hProcess, dwAddr, byref(ModuleInfo))
return ModuleInfo
SymGetModuleInfo = GuessStringType(SymGetModuleInfoA, SymGetModuleInfoW)
# BOOL WINAPI SymGetModuleInfo64(
# __in HANDLE hProcess,
# __in DWORD64 dwAddr,
# __out PIMAGEHLP_MODULE64 ModuleInfo
# );
def SymGetModuleInfo64A(hProcess, dwAddr):
_SymGetModuleInfo64 = windll.dbghelp.SymGetModuleInfo64
_SymGetModuleInfo64.argtypes = [HANDLE, DWORD64, PIMAGEHLP_MODULE64]
_SymGetModuleInfo64.restype = bool
_SymGetModuleInfo64.errcheck = RaiseIfZero
ModuleInfo = IMAGEHLP_MODULE64()
ModuleInfo.SizeOfStruct = sizeof(ModuleInfo)
_SymGetModuleInfo64(hProcess, dwAddr, byref(ModuleInfo))
return ModuleInfo
def SymGetModuleInfo64W(hProcess, dwAddr):
_SymGetModuleInfo64W = windll.dbghelp.SymGetModuleInfo64W
_SymGetModuleInfo64W.argtypes = [HANDLE, DWORD64, PIMAGEHLP_MODULE64W]
_SymGetModuleInfo64W.restype = bool
_SymGetModuleInfo64W.errcheck = RaiseIfZero
ModuleInfo = IMAGEHLP_MODULE64W()
ModuleInfo.SizeOfStruct = sizeof(ModuleInfo)
_SymGetModuleInfo64W(hProcess, dwAddr, byref(ModuleInfo))
return ModuleInfo
SymGetModuleInfo64 = GuessStringType(SymGetModuleInfo64A, SymGetModuleInfo64W)
# BOOL CALLBACK SymEnumerateModulesProc(
# __in PCTSTR ModuleName,
# __in DWORD BaseOfDll,
# __in_opt PVOID UserContext
# );
PSYM_ENUMMODULES_CALLBACK = WINFUNCTYPE(BOOL, LPSTR, DWORD, PVOID)
PSYM_ENUMMODULES_CALLBACKW = WINFUNCTYPE(BOOL, LPWSTR, DWORD, PVOID)
# BOOL CALLBACK SymEnumerateModulesProc64(
# __in PCTSTR ModuleName,
# __in DWORD64 BaseOfDll,
# __in_opt PVOID UserContext
# );
PSYM_ENUMMODULES_CALLBACK64 = WINFUNCTYPE(BOOL, LPSTR, DWORD64, PVOID)
PSYM_ENUMMODULES_CALLBACKW64 = WINFUNCTYPE(BOOL, LPWSTR, DWORD64, PVOID)
# BOOL WINAPI SymEnumerateModules(
# __in HANDLE hProcess,
# __in PSYM_ENUMMODULES_CALLBACK EnumModulesCallback,
# __in_opt PVOID UserContext
# );
def SymEnumerateModulesA(hProcess, EnumModulesCallback, UserContext = None):
_SymEnumerateModules = windll.dbghelp.SymEnumerateModules
_SymEnumerateModules.argtypes = [HANDLE, PSYM_ENUMMODULES_CALLBACK, PVOID]
_SymEnumerateModules.restype = bool
_SymEnumerateModules.errcheck = RaiseIfZero
EnumModulesCallback = PSYM_ENUMMODULES_CALLBACK(EnumModulesCallback)
if UserContext:
UserContext = ctypes.pointer(UserContext)
else:
UserContext = LPVOID(NULL)
_SymEnumerateModules(hProcess, EnumModulesCallback, UserContext)
def SymEnumerateModulesW(hProcess, EnumModulesCallback, UserContext = None):
_SymEnumerateModulesW = windll.dbghelp.SymEnumerateModulesW
_SymEnumerateModulesW.argtypes = [HANDLE, PSYM_ENUMMODULES_CALLBACKW, PVOID]
_SymEnumerateModulesW.restype = bool
_SymEnumerateModulesW.errcheck = RaiseIfZero
EnumModulesCallback = PSYM_ENUMMODULES_CALLBACKW(EnumModulesCallback)
if UserContext:
UserContext = ctypes.pointer(UserContext)
else:
UserContext = LPVOID(NULL)
_SymEnumerateModulesW(hProcess, EnumModulesCallback, UserContext)
SymEnumerateModules = GuessStringType(SymEnumerateModulesA, SymEnumerateModulesW)
# BOOL WINAPI SymEnumerateModules64(
# __in HANDLE hProcess,
# __in PSYM_ENUMMODULES_CALLBACK64 EnumModulesCallback,
# __in_opt PVOID UserContext
# );
def SymEnumerateModules64A(hProcess, EnumModulesCallback, UserContext = None):
_SymEnumerateModules64 = windll.dbghelp.SymEnumerateModules64
_SymEnumerateModules64.argtypes = [HANDLE, PSYM_ENUMMODULES_CALLBACK64, PVOID]
_SymEnumerateModules64.restype = bool
_SymEnumerateModules64.errcheck = RaiseIfZero
EnumModulesCallback = PSYM_ENUMMODULES_CALLBACK64(EnumModulesCallback)
if UserContext:
UserContext = ctypes.pointer(UserContext)
else:
UserContext = LPVOID(NULL)
_SymEnumerateModules64(hProcess, EnumModulesCallback, UserContext)
def SymEnumerateModules64W(hProcess, EnumModulesCallback, UserContext = None):
_SymEnumerateModules64W = windll.dbghelp.SymEnumerateModules64W
_SymEnumerateModules64W.argtypes = [HANDLE, PSYM_ENUMMODULES_CALLBACK64W, PVOID]
_SymEnumerateModules64W.restype = bool
_SymEnumerateModules64W.errcheck = RaiseIfZero
EnumModulesCallback = PSYM_ENUMMODULES_CALLBACK64W(EnumModulesCallback)
if UserContext:
UserContext = ctypes.pointer(UserContext)
else:
UserContext = LPVOID(NULL)
_SymEnumerateModules64W(hProcess, EnumModulesCallback, UserContext)
SymEnumerateModules64 = GuessStringType(SymEnumerateModules64A, SymEnumerateModules64W)
# BOOL CALLBACK SymEnumerateSymbolsProc(
# __in PCTSTR SymbolName,
# __in DWORD SymbolAddress,
# __in ULONG SymbolSize,
# __in_opt PVOID UserContext
# );
PSYM_ENUMSYMBOLS_CALLBACK = WINFUNCTYPE(BOOL, LPSTR, DWORD, ULONG, PVOID)
PSYM_ENUMSYMBOLS_CALLBACKW = WINFUNCTYPE(BOOL, LPWSTR, DWORD, ULONG, PVOID)
# BOOL CALLBACK SymEnumerateSymbolsProc64(
# __in PCTSTR SymbolName,
# __in DWORD64 SymbolAddress,
# __in ULONG SymbolSize,
# __in_opt PVOID UserContext
# );
PSYM_ENUMSYMBOLS_CALLBACK64 = WINFUNCTYPE(BOOL, LPSTR, DWORD64, ULONG, PVOID)
PSYM_ENUMSYMBOLS_CALLBACKW64 = WINFUNCTYPE(BOOL, LPWSTR, DWORD64, ULONG, PVOID)
# BOOL WINAPI SymEnumerateSymbols(
# __in HANDLE hProcess,
# __in ULONG BaseOfDll,
# __in PSYM_ENUMSYMBOLS_CALLBACK EnumSymbolsCallback,
# __in_opt PVOID UserContext
# );
def SymEnumerateSymbolsA(hProcess, BaseOfDll, EnumSymbolsCallback, UserContext = None):
_SymEnumerateSymbols = windll.dbghelp.SymEnumerateSymbols
_SymEnumerateSymbols.argtypes = [HANDLE, ULONG, PSYM_ENUMSYMBOLS_CALLBACK, PVOID]
_SymEnumerateSymbols.restype = bool
_SymEnumerateSymbols.errcheck = RaiseIfZero
EnumSymbolsCallback = PSYM_ENUMSYMBOLS_CALLBACK(EnumSymbolsCallback)
if UserContext:
UserContext = ctypes.pointer(UserContext)
else:
UserContext = LPVOID(NULL)
_SymEnumerateSymbols(hProcess, BaseOfDll, EnumSymbolsCallback, UserContext)
def SymEnumerateSymbolsW(hProcess, BaseOfDll, EnumSymbolsCallback, UserContext = None):
_SymEnumerateSymbolsW = windll.dbghelp.SymEnumerateSymbolsW
_SymEnumerateSymbolsW.argtypes = [HANDLE, ULONG, PSYM_ENUMSYMBOLS_CALLBACKW, PVOID]
_SymEnumerateSymbolsW.restype = bool
_SymEnumerateSymbolsW.errcheck = RaiseIfZero
EnumSymbolsCallback = PSYM_ENUMSYMBOLS_CALLBACKW(EnumSymbolsCallback)
if UserContext:
UserContext = ctypes.pointer(UserContext)
else:
UserContext = LPVOID(NULL)
_SymEnumerateSymbolsW(hProcess, BaseOfDll, EnumSymbolsCallback, UserContext)
SymEnumerateSymbols = GuessStringType(SymEnumerateSymbolsA, SymEnumerateSymbolsW)
# BOOL WINAPI SymEnumerateSymbols64(
# __in HANDLE hProcess,
# __in ULONG64 BaseOfDll,
# __in PSYM_ENUMSYMBOLS_CALLBACK64 EnumSymbolsCallback,
# __in_opt PVOID UserContext
# );
def SymEnumerateSymbols64A(hProcess, BaseOfDll, EnumSymbolsCallback, UserContext = None):
_SymEnumerateSymbols64 = windll.dbghelp.SymEnumerateSymbols64
_SymEnumerateSymbols64.argtypes = [HANDLE, ULONG64, PSYM_ENUMSYMBOLS_CALLBACK64, PVOID]
_SymEnumerateSymbols64.restype = bool
_SymEnumerateSymbols64.errcheck = RaiseIfZero
EnumSymbolsCallback = PSYM_ENUMSYMBOLS_CALLBACK64(EnumSymbolsCallback)
if UserContext:
UserContext = ctypes.pointer(UserContext)
else:
UserContext = LPVOID(NULL)
_SymEnumerateSymbols64(hProcess, BaseOfDll, EnumSymbolsCallback, UserContext)
def SymEnumerateSymbols64W(hProcess, BaseOfDll, EnumSymbolsCallback, UserContext = None):
_SymEnumerateSymbols64W = windll.dbghelp.SymEnumerateSymbols64W
_SymEnumerateSymbols64W.argtypes = [HANDLE, ULONG64, PSYM_ENUMSYMBOLS_CALLBACK64W, PVOID]
_SymEnumerateSymbols64W.restype = bool
_SymEnumerateSymbols64W.errcheck = RaiseIfZero
EnumSymbolsCallback = PSYM_ENUMSYMBOLS_CALLBACK64W(EnumSymbolsCallback)
if UserContext:
UserContext = ctypes.pointer(UserContext)
else:
UserContext = LPVOID(NULL)
_SymEnumerateSymbols64W(hProcess, BaseOfDll, EnumSymbolsCallback, UserContext)
SymEnumerateSymbols64 = GuessStringType(SymEnumerateSymbols64A, SymEnumerateSymbols64W)
# DWORD WINAPI UnDecorateSymbolName(
# __in PCTSTR DecoratedName,
# __out PTSTR UnDecoratedName,
# __in DWORD UndecoratedLength,
# __in DWORD Flags
# );
def UnDecorateSymbolNameA(DecoratedName, Flags = UNDNAME_COMPLETE):
_UnDecorateSymbolNameA = windll.dbghelp.UnDecorateSymbolName
_UnDecorateSymbolNameA.argtypes = [LPSTR, LPSTR, DWORD, DWORD]
_UnDecorateSymbolNameA.restype = DWORD
_UnDecorateSymbolNameA.errcheck = RaiseIfZero
UndecoratedLength = _UnDecorateSymbolNameA(DecoratedName, None, 0, Flags)
UnDecoratedName = ctypes.create_string_buffer('', UndecoratedLength + 1)
_UnDecorateSymbolNameA(DecoratedName, UnDecoratedName, UndecoratedLength, Flags)
return UnDecoratedName.value
def UnDecorateSymbolNameW(DecoratedName, Flags = UNDNAME_COMPLETE):
_UnDecorateSymbolNameW = windll.dbghelp.UnDecorateSymbolNameW
_UnDecorateSymbolNameW.argtypes = [LPWSTR, LPWSTR, DWORD, DWORD]
_UnDecorateSymbolNameW.restype = DWORD
_UnDecorateSymbolNameW.errcheck = RaiseIfZero
UndecoratedLength = _UnDecorateSymbolNameW(DecoratedName, None, 0, Flags)
UnDecoratedName = ctypes.create_unicode_buffer(u'', UndecoratedLength + 1)
_UnDecorateSymbolNameW(DecoratedName, UnDecoratedName, UndecoratedLength, Flags)
return UnDecoratedName.value
UnDecorateSymbolName = GuessStringType(UnDecorateSymbolNameA, UnDecorateSymbolNameW)
# BOOL WINAPI SymGetSearchPath(
# __in HANDLE hProcess,
# __out PTSTR SearchPath,
# __in DWORD SearchPathLength
# );
def SymGetSearchPathA(hProcess):
_SymGetSearchPath = windll.dbghelp.SymGetSearchPath
_SymGetSearchPath.argtypes = [HANDLE, LPSTR, DWORD]
_SymGetSearchPath.restype = bool
_SymGetSearchPath.errcheck = RaiseIfZero
SearchPathLength = MAX_PATH
SearchPath = ctypes.create_string_buffer("", SearchPathLength)
_SymGetSearchPath(hProcess, SearchPath, SearchPathLength)
return SearchPath.value
def SymGetSearchPathW(hProcess):
_SymGetSearchPathW = windll.dbghelp.SymGetSearchPathW
_SymGetSearchPathW.argtypes = [HANDLE, LPWSTR, DWORD]
_SymGetSearchPathW.restype = bool
_SymGetSearchPathW.errcheck = RaiseIfZero
SearchPathLength = MAX_PATH
SearchPath = ctypes.create_unicode_buffer(u"", SearchPathLength)
_SymGetSearchPathW(hProcess, SearchPath, SearchPathLength)
return SearchPath.value
SymGetSearchPath = GuessStringType(SymGetSearchPathA, SymGetSearchPathW)
# BOOL WINAPI SymSetSearchPath(
# __in HANDLE hProcess,
# __in_opt PCTSTR SearchPath
# );
def SymSetSearchPathA(hProcess, SearchPath = None):
_SymSetSearchPath = windll.dbghelp.SymSetSearchPath
_SymSetSearchPath.argtypes = [HANDLE, LPSTR]
_SymSetSearchPath.restype = bool
_SymSetSearchPath.errcheck = RaiseIfZero
if not SearchPath:
SearchPath = None
_SymSetSearchPath(hProcess, SearchPath)
def SymSetSearchPathW(hProcess, SearchPath = None):
_SymSetSearchPathW = windll.dbghelp.SymSetSearchPathW
_SymSetSearchPathW.argtypes = [HANDLE, LPWSTR]
_SymSetSearchPathW.restype = bool
_SymSetSearchPathW.errcheck = RaiseIfZero
if not SearchPath:
SearchPath = None
_SymSetSearchPathW(hProcess, SearchPath)
SymSetSearchPath = GuessStringType(SymSetSearchPathA, SymSetSearchPathW)
# PTCHAR WINAPI SymGetHomeDirectory(
# __in DWORD type,
# __out PTSTR dir,
# __in size_t size
# );
def SymGetHomeDirectoryA(type):
_SymGetHomeDirectoryA = windll.dbghelp.SymGetHomeDirectoryA
_SymGetHomeDirectoryA.argtypes = [DWORD, LPSTR, SIZE_T]
_SymGetHomeDirectoryA.restype = LPSTR
_SymGetHomeDirectoryA.errcheck = RaiseIfZero
size = MAX_PATH
dir = ctypes.create_string_buffer("", size)
_SymGetHomeDirectoryA(type, dir, size)
return dir.value
def SymGetHomeDirectoryW(type):
_SymGetHomeDirectoryW = windll.dbghelp.SymGetHomeDirectoryW
_SymGetHomeDirectoryW.argtypes = [DWORD, LPWSTR, SIZE_T]
_SymGetHomeDirectoryW.restype = LPWSTR
_SymGetHomeDirectoryW.errcheck = RaiseIfZero
size = MAX_PATH
dir = ctypes.create_unicode_buffer(u"", size)
_SymGetHomeDirectoryW(type, dir, size)
return dir.value
SymGetHomeDirectory = GuessStringType(SymGetHomeDirectoryA, SymGetHomeDirectoryW)
# PTCHAR WINAPI SymSetHomeDirectory(
# __in HANDLE hProcess,
# __in_opt PCTSTR dir
# );
def SymSetHomeDirectoryA(hProcess, dir = None):
_SymSetHomeDirectoryA = windll.dbghelp.SymSetHomeDirectoryA
_SymSetHomeDirectoryA.argtypes = [HANDLE, LPSTR]
_SymSetHomeDirectoryA.restype = LPSTR
_SymSetHomeDirectoryA.errcheck = RaiseIfZero
if not dir:
dir = None
_SymSetHomeDirectoryA(hProcess, dir)
return dir
def SymSetHomeDirectoryW(hProcess, dir = None):
_SymSetHomeDirectoryW = windll.dbghelp.SymSetHomeDirectoryW
_SymSetHomeDirectoryW.argtypes = [HANDLE, LPWSTR]
_SymSetHomeDirectoryW.restype = LPWSTR
_SymSetHomeDirectoryW.errcheck = RaiseIfZero
if not dir:
dir = None
_SymSetHomeDirectoryW(hProcess, dir)
return dir
SymSetHomeDirectory = GuessStringType(SymSetHomeDirectoryA, SymSetHomeDirectoryW)
#--- DbgHelp 5+ support, patch by Neitsa --------------------------------------
# XXX TODO
# + use the GuessStringType decorator for ANSI/Wide versions
# + replace hardcoded struct sizes with sizeof() calls
# + StackWalk64 should raise on error, but something has to be done about it
# not setting the last error code (maybe we should call SetLastError
# ourselves with a default error code?)
# /Mario
#maximum length of a symbol name
MAX_SYM_NAME = 2000
class SYM_INFO(Structure):
_fields_ = [
("SizeOfStruct", ULONG),
("TypeIndex", ULONG),
("Reserved", ULONG64 * 2),
("Index", ULONG),
("Size", ULONG),
("ModBase", ULONG64),
("Flags", ULONG),
("Value", ULONG64),
("Address", ULONG64),
("Register", ULONG),
("Scope", ULONG),
("Tag", ULONG),
("NameLen", ULONG),
("MaxNameLen", ULONG),
("Name", CHAR * (MAX_SYM_NAME + 1)),
]
PSYM_INFO = POINTER(SYM_INFO)
class SYM_INFOW(Structure):
_fields_ = [
("SizeOfStruct", ULONG),
("TypeIndex", ULONG),
("Reserved", ULONG64 * 2),
("Index", ULONG),
("Size", ULONG),
("ModBase", ULONG64),
("Flags", ULONG),
("Value", ULONG64),
("Address", ULONG64),
("Register", ULONG),
("Scope", ULONG),
("Tag", ULONG),
("NameLen", ULONG),
("MaxNameLen", ULONG),
("Name", WCHAR * (MAX_SYM_NAME + 1)),
]
PSYM_INFOW = POINTER(SYM_INFOW)
#===============================================================================
# BOOL WINAPI SymFromName(
# __in HANDLE hProcess,
# __in PCTSTR Name,
# __inout PSYMBOL_INFO Symbol
# );
#===============================================================================
def SymFromName(hProcess, Name):
_SymFromNameA = windll.dbghelp.SymFromName
_SymFromNameA.argtypes = [HANDLE, LPSTR, PSYM_INFO]
_SymFromNameA.restype = bool
_SymFromNameA.errcheck = RaiseIfZero
SymInfo = SYM_INFO()
SymInfo.SizeOfStruct = 88 # *don't modify*: sizeof(SYMBOL_INFO) in C.
SymInfo.MaxNameLen = MAX_SYM_NAME
_SymFromNameA(hProcess, Name, byref(SymInfo))
return SymInfo
def SymFromNameW(hProcess, Name):
_SymFromNameW = windll.dbghelp.SymFromNameW
_SymFromNameW.argtypes = [HANDLE, LPWSTR, PSYM_INFOW]
_SymFromNameW.restype = bool
_SymFromNameW.errcheck = RaiseIfZero
SymInfo = SYM_INFOW()
SymInfo.SizeOfStruct = 88 # *don't modify*: sizeof(SYMBOL_INFOW) in C.
SymInfo.MaxNameLen = MAX_SYM_NAME
_SymFromNameW(hProcess, Name, byref(SymInfo))
return SymInfo
#===============================================================================
# BOOL WINAPI SymFromAddr(
# __in HANDLE hProcess,
# __in DWORD64 Address,
# __out_opt PDWORD64 Displacement,
# __inout PSYMBOL_INFO Symbol
# );
#===============================================================================
def SymFromAddr(hProcess, Address):
_SymFromAddr = windll.dbghelp.SymFromAddr
_SymFromAddr.argtypes = [HANDLE, DWORD64, PDWORD64, PSYM_INFO]
_SymFromAddr.restype = bool
_SymFromAddr.errcheck = RaiseIfZero
SymInfo = SYM_INFO()
SymInfo.SizeOfStruct = 88 # *don't modify*: sizeof(SYMBOL_INFO) in C.
SymInfo.MaxNameLen = MAX_SYM_NAME
Displacement = DWORD64(0)
_SymFromAddr(hProcess, Address, byref(Displacement), byref(SymInfo))
return (Displacement.value, SymInfo)
def SymFromAddrW(hProcess, Address):
_SymFromAddr = windll.dbghelp.SymFromAddrW
_SymFromAddr.argtypes = [HANDLE, DWORD64, PDWORD64, PSYM_INFOW]
_SymFromAddr.restype = bool
_SymFromAddr.errcheck = RaiseIfZero
SymInfo = SYM_INFOW()
SymInfo.SizeOfStruct = 88 # *don't modify*: sizeof(SYMBOL_INFOW) in C.
SymInfo.MaxNameLen = MAX_SYM_NAME
Displacement = DWORD64(0)
_SymFromAddr(hProcess, Address, byref(Displacement), byref(SymInfo))
return (Displacement.value, SymInfo)
#===============================================================================
# typedef struct _IMAGEHLP_SYMBOL64 {
# DWORD SizeOfStruct;
# DWORD64 Address;
# DWORD Size;
# DWORD Flags;
# DWORD MaxNameLength;
# CHAR Name[1];
# } IMAGEHLP_SYMBOL64, *PIMAGEHLP_SYMBOL64;
#===============================================================================
class IMAGEHLP_SYMBOL64 (Structure):
_fields_ = [
("SizeOfStruct", DWORD),
("Address", DWORD64),
("Size", DWORD),
("Flags", DWORD),
("MaxNameLength", DWORD),
("Name", CHAR * (MAX_SYM_NAME + 1)),
]
PIMAGEHLP_SYMBOL64 = POINTER(IMAGEHLP_SYMBOL64)
#===============================================================================
# typedef struct _IMAGEHLP_SYMBOLW64 {
# DWORD SizeOfStruct;
# DWORD64 Address;
# DWORD Size;
# DWORD Flags;
# DWORD MaxNameLength;
# WCHAR Name[1];
# } IMAGEHLP_SYMBOLW64, *PIMAGEHLP_SYMBOLW64;
#===============================================================================
class IMAGEHLP_SYMBOLW64 (Structure):
_fields_ = [
("SizeOfStruct", DWORD),
("Address", DWORD64),
("Size", DWORD),
("Flags", DWORD),
("MaxNameLength", DWORD),
("Name", WCHAR * (MAX_SYM_NAME + 1)),
]
PIMAGEHLP_SYMBOLW64 = POINTER(IMAGEHLP_SYMBOLW64)
#===============================================================================
# BOOL WINAPI SymGetSymFromAddr64(
# __in HANDLE hProcess,
# __in DWORD64 Address,
# __out_opt PDWORD64 Displacement,
# __inout PIMAGEHLP_SYMBOL64 Symbol
# );
#===============================================================================
def SymGetSymFromAddr64(hProcess, Address):
_SymGetSymFromAddr64 = windll.dbghelp.SymGetSymFromAddr64
_SymGetSymFromAddr64.argtypes = [HANDLE, DWORD64, PDWORD64, PIMAGEHLP_SYMBOL64]
_SymGetSymFromAddr64.restype = bool
_SymGetSymFromAddr64.errcheck = RaiseIfZero
imagehlp_symbol64 = IMAGEHLP_SYMBOL64()
imagehlp_symbol64.SizeOfStruct = 32 # *don't modify*: sizeof(IMAGEHLP_SYMBOL64) in C.
imagehlp_symbol64.MaxNameLen = MAX_SYM_NAME
Displacement = DWORD64(0)
_SymGetSymFromAddr64(hProcess, Address, byref(Displacement), byref(imagehlp_symbol64))
return (Displacement.value, imagehlp_symbol64)
#TODO: check for the 'W' version of SymGetSymFromAddr64()
#===============================================================================
# typedef struct API_VERSION {
# USHORT MajorVersion;
# USHORT MinorVersion;
# USHORT Revision;
# USHORT Reserved;
# } API_VERSION, *LPAPI_VERSION;
#===============================================================================
class API_VERSION (Structure):
_fields_ = [
("MajorVersion", USHORT),
("MinorVersion", USHORT),
("Revision", USHORT),
("Reserved", USHORT),
]
PAPI_VERSION = POINTER(API_VERSION)
LPAPI_VERSION = PAPI_VERSION
#===============================================================================
# LPAPI_VERSION WINAPI ImagehlpApiVersion(void);
#===============================================================================
def ImagehlpApiVersion():
_ImagehlpApiVersion = windll.dbghelp.ImagehlpApiVersion
_ImagehlpApiVersion.restype = LPAPI_VERSION
api_version = _ImagehlpApiVersion()
return api_version.contents
#===============================================================================
# LPAPI_VERSION WINAPI ImagehlpApiVersionEx(
# __in LPAPI_VERSION AppVersion
# );
#===============================================================================
def ImagehlpApiVersionEx(MajorVersion, MinorVersion, Revision):
_ImagehlpApiVersionEx = windll.dbghelp.ImagehlpApiVersionEx
_ImagehlpApiVersionEx.argtypes = [LPAPI_VERSION]
_ImagehlpApiVersionEx.restype = LPAPI_VERSION
api_version = API_VERSION(MajorVersion, MinorVersion, Revision, 0)
ret_api_version = _ImagehlpApiVersionEx(byref(api_version))
return ret_api_version.contents
#===============================================================================
# typedef enum {
# AddrMode1616,
# AddrMode1632,
# AddrModeReal,
# AddrModeFlat
# } ADDRESS_MODE;
#===============================================================================
AddrMode1616 = 0
AddrMode1632 = 1
AddrModeReal = 2
AddrModeFlat = 3
ADDRESS_MODE = DWORD #needed for the size of an ADDRESS_MODE (see ADDRESS64)
#===============================================================================
# typedef struct _tagADDRESS64 {
# DWORD64 Offset;
# WORD Segment;
# ADDRESS_MODE Mode;
# } ADDRESS64, *LPADDRESS64;
#===============================================================================
class ADDRESS64 (Structure):
_fields_ = [
("Offset", DWORD64),
("Segment", WORD),
("Mode", ADDRESS_MODE), #it's a member of the ADDRESS_MODE enum.
]
LPADDRESS64 = POINTER(ADDRESS64)
#===============================================================================
# typedef struct _KDHELP64 {
# DWORD64 Thread;
# DWORD ThCallbackStack;
# DWORD ThCallbackBStore;
# DWORD NextCallback;
# DWORD FramePointer;
# DWORD64 KiCallUserMode;
# DWORD64 KeUserCallbackDispatcher;
# DWORD64 SystemRangeStart;
# DWORD64 KiUserExceptionDispatcher;
# DWORD64 StackBase;
# DWORD64 StackLimit;
# DWORD64 Reserved[5];
# } KDHELP64, *PKDHELP64;
#===============================================================================
class KDHELP64 (Structure):
_fields_ = [
("Thread", DWORD64),
("ThCallbackStack", DWORD),
("ThCallbackBStore", DWORD),
("NextCallback", DWORD),
("FramePointer", DWORD),
("KiCallUserMode", DWORD64),
("KeUserCallbackDispatcher", DWORD64),
("SystemRangeStart", DWORD64),
("KiUserExceptionDispatcher", DWORD64),
("StackBase", DWORD64),
("StackLimit", DWORD64),
("Reserved", DWORD64 * 5),
]
PKDHELP64 = POINTER(KDHELP64)
#===============================================================================
# typedef struct _tagSTACKFRAME64 {
# ADDRESS64 AddrPC;
# ADDRESS64 AddrReturn;
# ADDRESS64 AddrFrame;
# ADDRESS64 AddrStack;
# ADDRESS64 AddrBStore;
# PVOID FuncTableEntry;
# DWORD64 Params[4];
# BOOL Far;
# BOOL Virtual;
# DWORD64 Reserved[3];
# KDHELP64 KdHelp;
# } STACKFRAME64, *LPSTACKFRAME64;
#===============================================================================
class STACKFRAME64(Structure):
_fields_ = [
("AddrPC", ADDRESS64),
("AddrReturn", ADDRESS64),
("AddrFrame", ADDRESS64),
("AddrStack", ADDRESS64),
("AddrBStore", ADDRESS64),
("FuncTableEntry", PVOID),
("Params", DWORD64 * 4),
("Far", BOOL),
("Virtual", BOOL),
("Reserved", DWORD64 * 3),
("KdHelp", KDHELP64),
]
LPSTACKFRAME64 = POINTER(STACKFRAME64)
#===============================================================================
# BOOL CALLBACK ReadProcessMemoryProc64(
# __in HANDLE hProcess,
# __in DWORD64 lpBaseAddress,
# __out PVOID lpBuffer,
# __in DWORD nSize,
# __out LPDWORD lpNumberOfBytesRead
# );
#===============================================================================
PREAD_PROCESS_MEMORY_ROUTINE64 = WINFUNCTYPE(BOOL, HANDLE, DWORD64, PVOID, DWORD, LPDWORD)
#===============================================================================
# PVOID CALLBACK FunctionTableAccessProc64(
# __in HANDLE hProcess,
# __in DWORD64 AddrBase
# );
#===============================================================================
PFUNCTION_TABLE_ACCESS_ROUTINE64 = WINFUNCTYPE(PVOID, HANDLE, DWORD64)
#===============================================================================
# DWORD64 CALLBACK GetModuleBaseProc64(
# __in HANDLE hProcess,
# __in DWORD64 Address
# );
#===============================================================================
PGET_MODULE_BASE_ROUTINE64 = WINFUNCTYPE(DWORD64, HANDLE, DWORD64)
#===============================================================================
# DWORD64 CALLBACK GetModuleBaseProc64(
# __in HANDLE hProcess,
# __in DWORD64 Address
# );
#===============================================================================
PTRANSLATE_ADDRESS_ROUTINE64 = WINFUNCTYPE(DWORD64, HANDLE, DWORD64)
# Valid machine types for StackWalk64 function
IMAGE_FILE_MACHINE_I386 = 0x014c #Intel x86
IMAGE_FILE_MACHINE_IA64 = 0x0200 #Intel Itanium Processor Family (IPF)
IMAGE_FILE_MACHINE_AMD64 = 0x8664 #x64 (AMD64 or EM64T)
#===============================================================================
# BOOL WINAPI StackWalk64(
# __in DWORD MachineType,
# __in HANDLE hProcess,
# __in HANDLE hThread,
# __inout LPSTACKFRAME64 StackFrame,
# __inout PVOID ContextRecord,
# __in_opt PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
# __in_opt PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
# __in_opt PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
# __in_opt PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress
# );
#===============================================================================
def StackWalk64(MachineType, hProcess, hThread, StackFrame,
ContextRecord = None, ReadMemoryRoutine = None,
FunctionTableAccessRoutine = None, GetModuleBaseRoutine = None,
TranslateAddress = None):
_StackWalk64 = windll.dbghelp.StackWalk64
_StackWalk64.argtypes = [DWORD, HANDLE, HANDLE, LPSTACKFRAME64, PVOID,
PREAD_PROCESS_MEMORY_ROUTINE64,
PFUNCTION_TABLE_ACCESS_ROUTINE64,
PGET_MODULE_BASE_ROUTINE64,
PTRANSLATE_ADDRESS_ROUTINE64]
_StackWalk64.restype = bool
pReadMemoryRoutine = None
if ReadMemoryRoutine:
pReadMemoryRoutine = PREAD_PROCESS_MEMORY_ROUTINE64(ReadMemoryRoutine)
else:
pReadMemoryRoutine = ctypes.cast(None, PREAD_PROCESS_MEMORY_ROUTINE64)
pFunctionTableAccessRoutine = None
if FunctionTableAccessRoutine:
pFunctionTableAccessRoutine = PFUNCTION_TABLE_ACCESS_ROUTINE64(FunctionTableAccessRoutine)
else:
pFunctionTableAccessRoutine = ctypes.cast(None, PFUNCTION_TABLE_ACCESS_ROUTINE64)
pGetModuleBaseRoutine = None
if GetModuleBaseRoutine:
pGetModuleBaseRoutine = PGET_MODULE_BASE_ROUTINE64(GetModuleBaseRoutine)
else:
pGetModuleBaseRoutine = ctypes.cast(None, PGET_MODULE_BASE_ROUTINE64)
pTranslateAddress = None
if TranslateAddress:
pTranslateAddress = PTRANSLATE_ADDRESS_ROUTINE64(TranslateAddress)
else:
pTranslateAddress = ctypes.cast(None, PTRANSLATE_ADDRESS_ROUTINE64)
pContextRecord = None
if ContextRecord is None:
ContextRecord = GetThreadContext(hThread, raw=True)
pContextRecord = PCONTEXT(ContextRecord)
#this function *DOESN'T* set last error [GetLastError()] properly most of the time.
ret = _StackWalk64(MachineType, hProcess, hThread, byref(StackFrame),
pContextRecord, pReadMemoryRoutine,
pFunctionTableAccessRoutine, pGetModuleBaseRoutine,
pTranslateAddress)
return ret
#==============================================================================
# This calculates the list of exported symbols.
_all = set(vars().keys()).difference(_all)
__all__ = [_x for _x in _all if not _x.startswith('_')]
__all__.sort()
#==============================================================================
| |
#!/usr/bin/env python
#
# Copyright (c) 2016 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys, os, logging, collections, struct, json, threading, glob
import atexit
logging.basicConfig(level=logging.DEBUG)
import vpp_api
def eprint(*args, **kwargs):
"""Print critical diagnostics to stderr."""
print(*args, file=sys.stderr, **kwargs)
def vpp_atexit(self):
"""Clean up VPP connection on shutdown."""
if self.connected:
eprint ('Cleaning up VPP on exit')
self.disconnect()
class VPP():
"""VPP interface.
This class provides the APIs to VPP. The APIs are loaded
from provided .api.json files and makes functions accordingly.
These functions are documented in the VPP .api files, as they
are dynamically created.
Additionally, VPP can send callback messages; this class
provides a means to register a callback function to receive
these messages in a background thread.
"""
def __init__(self, apifiles = None, testmode = False):
"""Create a VPP API object.
apifiles is a list of files containing API
descriptions that will be loaded - methods will be
dynamically created reflecting these APIs. If not
provided this will load the API files from VPP's
default install location.
"""
self.messages = {}
self.id_names = []
self.id_msgdef = []
self.buffersize = 10000
self.connected = False
self.header = struct.Struct('>HI')
self.results_lock = threading.Lock()
self.results = {}
self.timeout = 5
self.apifiles = []
if not apifiles:
# Pick up API definitions from default directory
apifiles = glob.glob('/usr/share/vpp/api/*.api.json')
for file in apifiles:
with open(file) as apidef_file:
api = json.load(apidef_file)
for t in api['types']:
self.add_type(t[0], t[1:])
for m in api['messages']:
self.add_message(m[0], m[1:])
self.apifiles = apifiles
# Basic sanity check
if len(self.messages) == 0 and not testmode:
raise ValueError(1, 'Missing JSON message definitions')
# Make sure we allow VPP to clean up the message rings.
atexit.register(vpp_atexit, self)
class ContextId(object):
"""Thread-safe provider of unique context IDs."""
def __init__(self):
self.context = 0
self.lock = threading.Lock()
def __call__(self):
"""Get a new unique (or, at least, not recently used) context."""
with self.lock:
self.context += 1
return self.context
get_context = ContextId()
def status(self):
"""Debug function: report current VPP API status to stdout."""
print('Connected') if self.connected else print('Not Connected')
print('Read API definitions from', ', '.join(self.apifiles))
def __struct (self, t, n = None, e = -1, vl = None):
"""Create a packing structure for a message."""
base_types = { 'u8' : 'B',
'u16' : 'H',
'u32' : 'I',
'i32' : 'i',
'u64' : 'Q',
'f64' : 'd',
}
pack = None
if t in base_types:
pack = base_types[t]
if not vl:
if e > 0 and t == 'u8':
# Fixed byte array
return struct.Struct('>' + str(e) + 's')
if e > 0:
# Fixed array of base type
return [e, struct.Struct('>' + base_types[t])]
elif e == 0:
# Old style variable array
return [-1, struct.Struct('>' + base_types[t])]
else:
# Variable length array
return [vl, struct.Struct('>s')] if t == 'u8' else \
[vl, struct.Struct('>' + base_types[t])]
return struct.Struct('>' + base_types[t])
if t in self.messages:
### Return a list in case of array ###
if e > 0 and not vl:
return [e, lambda self, encode, buf, offset, args: (
self.__struct_type(encode, self.messages[t], buf, offset,
args))]
if vl:
return [vl, lambda self, encode, buf, offset, args: (
self.__struct_type(encode, self.messages[t], buf, offset,
args))]
elif e == 0:
# Old style VLA
raise NotImplementedError(1, 'No support for compound types ' + t)
return lambda self, encode, buf, offset, args: (
self.__struct_type(encode, self.messages[t], buf, offset, args)
)
raise ValueError(1, 'Invalid message type: ' + t)
def __struct_type(self, encode, msgdef, buf, offset, kwargs):
"""Get a message packer or unpacker."""
if encode:
return self.__struct_type_encode(msgdef, buf, offset, kwargs)
else:
return self.__struct_type_decode(msgdef, buf, offset)
def __struct_type_encode(self, msgdef, buf, offset, kwargs):
off = offset
size = 0
for k in kwargs:
if k not in msgdef['args']:
raise ValueError(1, 'Invalid field-name in message call ' + k)
for k,v in msgdef['args'].iteritems():
off += size
if k in kwargs:
if type(v) is list:
if callable(v[1]):
e = kwargs[v[0]] if v[0] in kwargs else v[0]
size = 0
for i in range(e):
size += v[1](self, True, buf, off + size,
kwargs[k][i])
else:
if v[0] in kwargs:
l = kwargs[v[0]]
else:
l = len(kwargs[k])
if v[1].size == 1:
buf[off:off + l] = bytearray(kwargs[k])
size = l
else:
size = 0
for i in kwargs[k]:
v[1].pack_into(buf, off + size, i)
size += v[1].size
else:
if callable(v):
size = v(self, True, buf, off, kwargs[k])
else:
v.pack_into(buf, off, kwargs[k])
size = v.size
else:
size = v.size if not type(v) is list else 0
return off + size - offset
def __getitem__(self, name):
if name in self.messages:
return self.messages[name]
return None
def encode(self, msgdef, kwargs):
# Make suitably large buffer
buf = bytearray(self.buffersize)
offset = 0
size = self.__struct_type(True, msgdef, buf, offset, kwargs)
return buf[:offset + size]
def decode(self, msgdef, buf):
return self.__struct_type(False, msgdef, buf, 0, None)[1]
def __struct_type_decode(self, msgdef, buf, offset):
res = []
off = offset
size = 0
for k,v in msgdef['args'].iteritems():
off += size
if type(v) is list:
lst = []
if callable(v[1]): # compound type
size = 0
if v[0] in msgdef['args']: # vla
e = res[v[2]]
else: # fixed array
e = v[0]
res.append(lst)
for i in range(e):
(s,l) = v[1](self, False, buf, off + size, None)
lst.append(l)
size += s
continue
if v[1].size == 1:
if type(v[0]) is int:
size = len(buf) - off
else:
size = res[v[2]]
res.append(buf[off:off + size])
else:
e = v[0] if type(v[0]) is int else res[v[2]]
if e == -1:
e = (len(buf) - off) / v[1].size
lst = []
res.append(lst)
size = 0
for i in range(e):
lst.append(v[1].unpack_from(buf, off + size)[0])
size += v[1].size
else:
if callable(v):
(s,l) = v(self, False, buf, off, None)
res.append(l)
size += s
else:
res.append(v.unpack_from(buf, off)[0])
size = v.size
return off + size - offset, msgdef['return_tuple']._make(res)
def ret_tup(self, name):
if name in self.messages and 'return_tuple' in self.messages[name]:
return self.messages[name]['return_tuple']
return None
def add_message(self, name, msgdef):
if name in self.messages:
raise ValueError('Duplicate message name: ' + name)
args = collections.OrderedDict()
argtypes = collections.OrderedDict()
fields = []
msg = {}
for i, f in enumerate(msgdef):
if type(f) is dict and 'crc' in f:
msg['crc'] = f['crc']
continue
field_type = f[0]
field_name = f[1]
if len(f) == 3 and f[2] == 0 and i != len(msgdef) - 2:
raise ValueError('Variable Length Array must be last: ' + name)
args[field_name] = self.__struct(*f)
argtypes[field_name] = field_type
if len(f) == 4: # Find offset to # elements field
args[field_name].append(args.keys().index(f[3]) - i)
fields.append(field_name)
msg['return_tuple'] = collections.namedtuple(name, fields,
rename = True)
self.messages[name] = msg
self.messages[name]['args'] = args
self.messages[name]['argtypes'] = argtypes
return self.messages[name]
def add_type(self, name, typedef):
return self.add_message('vl_api_' + name + '_t', typedef)
def make_function(self, name, i, msgdef, multipart, async):
if (async):
f = lambda **kwargs: (self._call_vpp_async(i, msgdef, **kwargs))
else:
f = lambda **kwargs: (self._call_vpp(i, msgdef, multipart, **kwargs))
args = self.messages[name]['args']
argtypes = self.messages[name]['argtypes']
f.__name__ = str(name)
f.__doc__ = ", ".join(["%s %s" % (argtypes[k], k) for k in args.keys()])
return f
def _register_functions(self, async=False):
self.id_names = [None] * (self.vpp_dictionary_maxid + 1)
self.id_msgdef = [None] * (self.vpp_dictionary_maxid + 1)
for name, msgdef in self.messages.iteritems():
if name in self.vpp_dictionary:
if self.messages[name]['crc'] != self.vpp_dictionary[name]['crc']:
raise ValueError(3, 'Failed CRC checksum ' + name +
' ' + self.messages[name]['crc'] +
' ' + self.vpp_dictionary[name]['crc'])
i = self.vpp_dictionary[name]['id']
self.id_msgdef[i] = msgdef
self.id_names[i] = name
multipart = True if name.find('_dump') > 0 else False
setattr(self, name, self.make_function(name, i, msgdef, multipart, async))
def _write (self, buf):
"""Send a binary-packed message to VPP."""
if not self.connected:
raise IOError(1, 'Not connected')
return vpp_api.write(str(buf))
def _load_dictionary(self):
self.vpp_dictionary = {}
self.vpp_dictionary_maxid = 0
d = vpp_api.msg_table()
if not d:
raise IOError(3, 'Cannot get VPP API dictionary')
for i,n in d:
name, crc = n.rsplit('_', 1)
crc = '0x' + crc
self.vpp_dictionary[name] = { 'id' : i, 'crc' : crc }
self.vpp_dictionary_maxid = max(self.vpp_dictionary_maxid, i)
def connect(self, name, chroot_prefix = None, async = False):
"""Attach to VPP.
name - the name of the client.
chroot_prefix - if VPP is chroot'ed, the prefix of the jail
async - if true, messages are sent without waiting for a reply
rx_qlen - the length of the VPP message receive queue between
client and server.
"""
msg_handler = self.msg_handler_sync if not async else self.msg_handler_async
if chroot_prefix is not None:
rv = vpp_api.connect(name, msg_handler, chroot_prefix)
else:
rv = vpp_api.connect(name, msg_handler)
if rv != 0:
raise IOError(2, 'Connect failed')
self.connected = True
self._load_dictionary()
self._register_functions(async=async)
# Initialise control ping
self.control_ping_index = self.vpp_dictionary['control_ping']['id']
self.control_ping_msgdef = self.messages['control_ping']
def disconnect(self):
"""Detach from VPP."""
rv = vpp_api.disconnect()
self.connected = False
return rv
def results_wait(self, context):
"""In a sync call, wait for the reply
The context ID is used to pair reply to request.
"""
# Results is filled by the background callback. It will
# raise the event when the context receives a response.
# Given there are two threads we have to be careful with the
# use of results and the structures under it, hence the lock.
with self.results_lock:
result = self.results[context]
ev = result['e']
timed_out = not ev.wait(self.timeout)
if timed_out:
raise IOError(3, 'Waiting for reply timed out')
else:
with self.results_lock:
result = self.results[context]
del self.results[context]
return result['r']
def results_prepare(self, context, multi=False):
"""Prep for receiving a result in response to a request msg
context - unique context number sent in request and
returned in reply or replies
multi - true if we expect multiple messages from this
reply.
"""
# The event is used to indicate that all results are in
new_result = {
'e': threading.Event(),
}
if multi:
# Make it clear to the BG thread it's going to see several
# messages; messages are stored in a results array
new_result['m'] = True
new_result['r'] = []
new_result['e'].clear()
# Put the prepped result structure into results, at which point
# the bg thread can also access it (hence the thread lock)
with self.results_lock:
self.results[context] = new_result
def msg_handler_sync(self, msg):
"""Process an incoming message from VPP in sync mode.
The message may be a reply or it may be an async notification.
"""
r = self.decode_incoming_msg(msg)
if r is None:
return
# If we have a context, then use the context to find any
# request waiting for a reply
context = 0
if hasattr(r, 'context') and r.context > 0:
context = r.context
msgname = type(r).__name__
if context == 0:
# No context -> async notification that we feed to the callback
if self.event_callback:
self.event_callback(msgname, r)
else:
# Context -> use the results structure (carefully) to find
# who we're responding to and return the message to that
# thread
with self.results_lock:
if context not in self.results:
eprint('Not expecting results for this context', context, r)
else:
result = self.results[context]
#
# Collect results until control ping
#
if msgname == 'control_ping_reply':
# End of a multipart
result['e'].set()
elif 'm' in self.results[context]:
# One element in a multipart
result['r'].append(r)
else:
# All of a single result
result['r'] = r
result['e'].set()
def decode_incoming_msg(self, msg):
if not msg:
eprint('vpp_api.read failed')
return
i, ci = self.header.unpack_from(msg, 0)
if self.id_names[i] == 'rx_thread_exit':
return
#
# Decode message and returns a tuple.
#
msgdef = self.id_msgdef[i]
if not msgdef:
raise IOError(2, 'Reply message undefined')
r = self.decode(msgdef, msg)
return r
def msg_handler_async(self, msg):
"""Process a message from VPP in async mode.
In async mode, all messages are returned to the callback.
"""
r = self.decode_incoming_msg(msg)
if r is None:
return
msgname = type(r).__name__
if self.event_callback:
self.event_callback(msgname, r)
def _control_ping(self, context):
"""Send a ping command."""
self._call_vpp_async(self.control_ping_index,
self.control_ping_msgdef,
context=context)
def _call_vpp(self, i, msgdef, multipart, **kwargs):
"""Given a message, send the message and await a reply.
msgdef - the message packing definition
i - the message type index
multipart - True if the message returns multiple
messages in return.
context - context number - chosen at random if not
supplied.
The remainder of the kwargs are the arguments to the API call.
The return value is the message or message array containing
the response. It will raise an IOError exception if there was
no response within the timeout window.
"""
# We need a context if not supplied, in order to get the
# response
context = kwargs.get('context', self.get_context())
kwargs['context'] = context
# Set up to receive a response
self.results_prepare(context, multi=multipart)
# Output the message
self._call_vpp_async(i, msgdef, **kwargs)
if multipart:
# Send a ping after the request - we use its response
# to detect that we have seen all results.
self._control_ping(context)
# Block until we get a reply.
r = self.results_wait(context)
return r
def _call_vpp_async(self, i, msgdef, **kwargs):
"""Given a message, send the message and await a reply.
msgdef - the message packing definition
i - the message type index
context - context number - chosen at random if not
supplied.
The remainder of the kwargs are the arguments to the API call.
"""
if not 'context' in kwargs:
context = self.get_context()
kwargs['context'] = context
else:
context = kwargs['context']
kwargs['_vl_msg_id'] = i
b = self.encode(msgdef, kwargs)
self._write(b)
def register_event_callback(self, callback):
"""Register a callback for async messages.
This will be called for async notifications in sync mode,
and all messages in async mode. In sync mode, replies to
requests will not come here.
callback is a fn(msg_type_name, msg_type) that will be
called when a message comes in. While this function is
executing, note that (a) you are in a background thread and
may wish to use threading.Lock to protect your datastructures,
and (b) message processing from VPP will stop (so if you take
a long while about it you may provoke reply timeouts or cause
VPP to fill the RX buffer). Passing None will disable the
callback.
"""
self.event_callback = callback
| |
import logging
from decimal import Decimal
from typing import TYPE_CHECKING, Callable, List
from django import forms
from ..extensions.manager import get_extensions_manager
from ..payment.interface import TokenConfig
from . import GatewayError, PaymentError, TransactionKind
from .models import Payment, Transaction
from .utils import (
clean_authorize,
clean_capture,
create_payment_information,
create_transaction,
gateway_postprocess,
update_card_details,
validate_gateway_response,
)
if TYPE_CHECKING:
# flake8: noqa
from ..payment.interface import CustomerSource
logger = logging.getLogger(__name__)
ERROR_MSG = "Oops! Something went wrong."
GENERIC_TRANSACTION_ERROR = "Transaction was unsuccessful"
def raise_payment_error(fn: Callable) -> Callable:
def wrapped(*args, **kwargs):
result = fn(*args, **kwargs)
if not result.is_success:
raise PaymentError(result.error or GENERIC_TRANSACTION_ERROR)
return result
return wrapped
def payment_postprocess(fn: Callable) -> Callable:
def wrapped(*args, **kwargs):
txn = fn(*args, **kwargs)
gateway_postprocess(txn, txn.payment)
return txn
return wrapped
def require_active_payment(fn: Callable) -> Callable:
def wrapped(payment: Payment, *args, **kwargs):
if not payment.is_active:
raise PaymentError("This payment is no longer active.")
return fn(payment, *args, **kwargs)
return wrapped
@payment_postprocess
@raise_payment_error
@require_active_payment
def process_payment(
payment: Payment, token: str, store_source: bool = False
) -> Transaction:
plugin_manager = get_extensions_manager()
payment_data = create_payment_information(
payment=payment, payment_token=token, store_source=store_source
)
response, error = _fetch_gateway_response(
plugin_manager.process_payment, payment.gateway, payment_data
)
return create_transaction(
payment=payment,
kind=TransactionKind.CAPTURE,
payment_information=payment_data,
error_msg=error,
gateway_response=response,
)
@payment_postprocess
@raise_payment_error
@require_active_payment
def authorize(payment: Payment, token: str, store_source: bool = False) -> Transaction:
plugin_manager = get_extensions_manager()
clean_authorize(payment)
payment_data = create_payment_information(
payment=payment, payment_token=token, store_source=store_source
)
response, error = _fetch_gateway_response(
plugin_manager.authorize_payment, payment.gateway, payment_data
)
return create_transaction(
payment=payment,
kind=TransactionKind.AUTH,
payment_information=payment_data,
error_msg=error,
gateway_response=response,
)
@payment_postprocess
@raise_payment_error
@require_active_payment
def capture(
payment: Payment, amount: Decimal = None, store_source: bool = False
) -> Transaction:
plugin_manager = get_extensions_manager()
if amount is None:
amount = payment.get_charge_amount()
clean_capture(payment, Decimal(amount))
token = _get_past_transaction_token(payment, TransactionKind.AUTH)
payment_data = create_payment_information(
payment=payment, payment_token=token, amount=amount, store_source=store_source
)
response, error = _fetch_gateway_response(
plugin_manager.capture_payment, payment.gateway, payment_data
)
if response.card_info:
update_card_details(payment, response)
return create_transaction(
payment=payment,
kind=TransactionKind.CAPTURE,
payment_information=payment_data,
error_msg=error,
gateway_response=response,
)
@payment_postprocess
@raise_payment_error
@require_active_payment
def refund(payment: Payment, amount: Decimal = None) -> Transaction:
plugin_manager = get_extensions_manager()
if amount is None:
amount = payment.captured_amount
_validate_refund_amount(payment, amount)
if not payment.can_refund():
raise PaymentError("This payment cannot be refunded.")
token = _get_past_transaction_token(payment, TransactionKind.CAPTURE)
payment_data = create_payment_information(
payment=payment, payment_token=token, amount=amount
)
response, error = _fetch_gateway_response(
plugin_manager.refund_payment, payment.gateway, payment_data
)
return create_transaction(
payment=payment,
kind=TransactionKind.REFUND,
payment_information=payment_data,
error_msg=error,
gateway_response=response,
)
@payment_postprocess
@raise_payment_error
@require_active_payment
def void(payment: Payment) -> Transaction:
plugin_manager = get_extensions_manager()
token = _get_past_transaction_token(payment, TransactionKind.AUTH)
payment_data = create_payment_information(payment=payment, payment_token=token)
response, error = _fetch_gateway_response(
plugin_manager.void_payment, payment.gateway, payment_data
)
return create_transaction(
payment=payment,
kind=TransactionKind.VOID,
payment_information=payment_data,
error_msg=error,
gateway_response=response,
)
@payment_postprocess
@raise_payment_error
@require_active_payment
def confirm(payment: Payment) -> Transaction:
plugin_manager = get_extensions_manager()
token = _get_past_transaction_token(payment, TransactionKind.AUTH)
payment_data = create_payment_information(payment=payment, payment_token=token)
response, error = _fetch_gateway_response(
plugin_manager.confirm_payment, payment.gateway, payment_data
)
return create_transaction(
payment=payment,
kind=TransactionKind.CONFIRM,
payment_information=payment_data,
error_msg=error,
gateway_response=response,
)
def list_payment_sources(gateway: str, customer_id: str) -> List["CustomerSource"]:
plugin_manager = get_extensions_manager()
return plugin_manager.list_payment_sources(gateway, customer_id)
def get_client_token(gateway: str, customer_id: str = None) -> str:
plugin_manager = get_extensions_manager()
token_config = TokenConfig(customer_id=customer_id)
return plugin_manager.get_client_token(gateway, token_config)
def list_gateways() -> List[dict]:
return get_extensions_manager().list_payment_gateways()
def _fetch_gateway_response(fn, *args, **kwargs):
response, error = None, None
try:
response = fn(*args, **kwargs)
validate_gateway_response(response)
except GatewayError:
logger.exception("Gateway response validation failed!")
response = None
error = ERROR_MSG
except Exception:
logger.exception("Error encountered while executing payment gateway.")
error = ERROR_MSG
response = None
return response, error
def _get_past_transaction_token(payment: Payment, kind: TransactionKind):
txn = payment.transactions.filter(kind=kind, is_success=True).first()
if txn is None:
raise PaymentError(f"Cannot find successful {kind} transaction")
return txn.token
def _validate_refund_amount(payment: Payment, amount: Decimal):
if amount <= 0:
raise PaymentError("Amount should be a positive number.")
if amount > payment.captured_amount:
raise PaymentError("Cannot refund more than captured")
| |
#
## pick a sequence of length L that maximizes the % of kmers it covers
## a few strategues
## 1. pick window that maximize the number ofunique kmers
## 2. pick smallest collection of windows that maximizes number of unique kmers
## 3. starting at position 1, extend window until capture all kmers.
## Then degrade beginning removing kmers until it shrinks set, then stop.
## This will give shortest window inside (but not nec including) start and end points.
## Can start next search with the next position inside what was captured.
## Can also take current window and define the smallest set of 2 windows inside that captures all kmers.
from collections import defaultdict, deque
from itertools import product
from Bio import SeqIO
import numpy as np
import matplotlib.pyplot as plt
import sys
from copy import deepcopy
def get_all_kmers(k):
assert k > 0 and k < 7
if k == 1:
return [''.join(e) for e in product("ACGT")]
elif k == 2:
return [''.join(e) for e in product("ACGT","ACGT")]
elif k == 3:
return [''.join(e) for e in product("ACGT","ACGT","ACGT")]
elif k == 4:
return [''.join(e) for e in product("ACGT","ACGT","ACGT","ACGT")]
elif k == 5:
return [''.join(e) for e in product("ACGT","ACGT","ACGT","ACGT","ACGT")]
elif k == 6:
return [''.join(e) for e in product("ACGT","ACGT","ACGT","ACGT","ACGT","ACGT")]
def generate_kmer_count_dict(k):
kmercounts = {}
for kmer in get_all_kmers(k):
kmercounts[kmer] = 0
return kmercounts
def uniq_kmers_in_seq(seq, k):
kmers = set([])
for i in range(len(seq)-k+1):
kmers.add(seq[i:i+k])
return kmers
def num_uniq_kmers_in_seq(seq,k):
return len(uniq_kmers_in_seq(seq, k))
def kmer_counts_in_seq(seq, k):
kmers = defaultdict(int)
for i in range(len(seq)-k+1):
kmers[seq[i:i+k]] += 1
return kmers
def kmer_counts_in_seqs(seqlist, k):
kmers = defaultdict(int)
for seq in seqlist:
for i in range(len(seq)-k+1):
kmers[seq[i:i+k]] += 1
return kmers
def update_kmer_counts(kmers, seq, k):
'''kmers is dict of kmer:count'''
for i in range(len(seq)-k+1):
kmers[seq[i:i+k]] += 1
return kmers
def kmer_counts_in_fastx(k, filename, fastx="fasta",revcomp=False, verbose=True):
kmers = generate_kmer_count_dict(k)
for fa in SeqIO.parse(filename, fastx):
if verbose:
sys.stderr.write( fa.name + "\n" )
if fa is not None:
kmers = update_kmer_counts(kmers, str(fa.seq), k)
if revcomp:
kmers = update_kmer_counts(kmers, str(fa.reverse_complement().seq), k)
return kmers
def kmer_hist(kmercounts):
hist = defaultdict(int)
for count in kmercounts.values():
hist[count] += 1
return hist
def print_hist(hist):
print "Coverage", "Num_kmers"
for key in sorted(hist.keys()):
print key, hist[key]
def plot_hist(hist):
data = defaultdict(list)
for cov in sorted(hist.keys()):
data['cov'].append(cov)
data['numkmers'].append(hist[cov])
plt.bar(left=data['cov'], height=data['numkmers'], width=1.0)
plt.show()
def plot_kmer_counts(kmercounts):
''' '''
data = defaultdict(list)
numKmers = len(kmercounts)
mincount = min(kmercounts.values())
maxcount = max(kmercounts.values())
i=0
for kmer in sorted(kmercounts.keys()):
i += 1
data['kmers'].append(kmer)
data['counts'].append(kmercounts[kmer])
if kmercounts[kmer] == mincount:
data['minspots'].append(i)
data['minkmers'].append(kmer)
elif kmercounts[kmer] == maxcount:
data['maxspots'].append(i)
data['maxkmers'].append(kmer)
plt.bar(left=range(1,numKmers+1), height=data['counts'], width=1.0)
plt.bar(left=data['minspots'], height=[mincount]*len(data['minspots']), color="r", width=5.0)
plt.bar(left=data['maxspots'], height=[maxcount]*len(data['maxspots']), color="b", width=10.0)
for i in range(len(data['minspots'])):
plt.annotate(data['minkmers'][i], (data['minspots'][i],mincount+np.random.randint(-3,4)+np.random.rand()*np.random.choice([-1,1])), color="g", size='x-small')
for i in range(len(data['maxspots'])):
plt.annotate(data['maxkmers'][i], (data['maxspots'][i],maxcount+np.random.randint(-2,2)+np.random.rand()*np.random.choice([-1,1])), color="g", size='x-small')
plt.show()
def uniq_kmers_in_windows_seq(seq, k, window, step):
windowkmers = defaultdict(set)
windowkmers[0] = uniq_kmers_in_seq(seq[0:window], k)
for i in range(1,len(seq)-window+1, step):
windowkmers[i] = uniq_kmers_in_seq(seq[i:i+window], k)
return windowkmers
def num_uniq_kmers_in_windows_seq(seq, k, window, step):
windowcounts = defaultdict(int)
currentkmerdict = kmer_counts_in_seq(seq[:window], k)
windowcounts[0] = len({k:v for k, v in currentkmerdict.items() if v})
for i in range(1,len(seq)-window+1, step):
currentkmerdict[seq[(i-1):(i-1+k)]] -= 1
currentkmerdict[seq[(i+window-k):(i+window)]] += 1
windowcounts[i] = len({k:v for k, v in currentkmerdict.items() if v})
return windowcounts
## RETIRED -- delete if all works fine with newer fxn below moving forward
##def find_window_with_max_uniq_kmers(seq, k, window, step, fastx="fasta",revcomp=False,verbose=False):
## currentkmerdict = kmer_counts_in_seq(seq[:window], k)
## if revcomp:
## pass
## windowcount = len({k:v for k, v in currentkmerdict.items() if v})
## maxcount = windowcount
## maxseqs = [seq[:window]]
## for i in range(1,len(seq)-window+1, step):
## if currentkmerdict[seq[(i-1):(i-1+k)]] == 1:
## windowcount -= 1
## currentkmerdict[seq[(i-1):(i-1+k)]] -= 1
## if currentkmerdict[seq[(i+window-k):(i+window)]] == 0:
## windowcount += 1
## currentkmerdict[seq[(i+window-k):(i+window)]] += 1
## if windowcount > maxcount:
## maxcount = windowcount
## maxseqs = [seq[i:i+window]]
## elif windowcount == maxcount:
## maxseqs.append(seq[i:i+window])
## if verbose:
## sys.stderr.write( maxcount + "\n" )
## return maxcount, maxseqs
def rc(seq):
new=''
seq=seq.upper()[-1::-1]
for b in seq:
if b == 'A':
new+='T'
elif b == 'C':
new+='G'
elif b == 'G':
new+='C'
elif b == 'T':
new+='A'
return new
def find_window_with_max_uniq_kmers(fa, k, window, step, fastx="fasta",revcomp=False,verbose=False):
## Use seq object (fa) instead of seq string
seq = str(fa.seq)
seqlist = [str(fa.seq[:window])]
if revcomp:
rc_seq = str(fa.reverse_complement().seq)
seqlist.append( rc_seq[-window:] )
currentkmerdict = kmer_counts_in_seqs(seqlist, k)
windowcount = len({k:v for k, v in currentkmerdict.items() if v})
maxcount = windowcount
maxseqs = [seq[:window]]
for i in range(1,len(seq)-window+1, step):
currentkmerdict[seq[(i-1):(i-1+k)]] -= 1 ##subtract out kmer that is no longer in window at left boundary
if revcomp: ## 1 is a special case where (-i-k+1):(-i+1) does not work -- it is (-x:-0)
if i == 1:
currentkmerdict[rc_seq[(-i-k+1):]] -= 1 ##subtract RC
else:
currentkmerdict[rc_seq[(-i-k+1):(-i+1)]] -= 1 ##subtract RC
if currentkmerdict[seq[(i-1):(i-1+k)]] == 0: ## if recently passed kmer at left boundar had only 1 count in last window (now 0), subtract 1 from the count of unique kmers in this window
windowcount -= 1
if revcomp: ## a kmer and its revcomp always have the same count -- I first wrote the code to check anyway: all was fine.
windowcount -= 1
currentkmerdict[seq[(i+window-k):(i+window)]] += 1 ## add
if revcomp:
currentkmerdict[rc_seq[(-i-window):(-i-window+k)]] += 1 ## add
if currentkmerdict[seq[(i+window-k):(i+window)]] == 1: ## if new kmer in window (at right side) was not in last window (now count = 1), window diversity goes up +1
windowcount += 1
if revcomp:
windowcount += 1
if windowcount > maxcount:
maxcount = windowcount
maxseqs = [seq[i:i+window]]
elif windowcount == maxcount:
maxseqs.append(seq[i:i+window])
if verbose:
sys.stderr.write( maxcount + "\n" )
## testing revcomp -- all kmers should have same count as corresponding rec comp kmers
## for kmer in currentkmerdict:
## print currentkmerdict[kmer] == currentkmerdict[rc(kmer)]
return maxcount, maxseqs
def find_window_with_max_uniq_kmers_in_fastx(k, window, step, filename, fastx="fasta",revcomp=False,verbose=False):
maxcount = 0
maxseqs = []
max_fas = []
for fa in SeqIO.parse(filename, fastx):
if verbose:
sys.stderr.write( fa.name + "\n" )
if fa is not None:
## fa_maxcount, fa_maxseqs = find_window_with_max_uniq_kmers(str(fa.seq), k, window, step, fastx,revcomp,verbose)
fa_maxcount, fa_maxseqs = find_window_with_max_uniq_kmers(fa, k, window, step, fastx,revcomp,verbose)
if fa_maxcount > maxcount:
maxcount = fa_maxcount
maxseqs = fa_maxseqs
max_fas = [str(fa.name)]*len(fa_maxseqs)
elif fa_maxcount == maxcount:
maxseqs += fa_maxseqs
max_fas += [str(fa.name)]*len(fa_maxseqs)
return max_fas, maxseqs, maxcount
def find_pair_of_seqs_with_max_uniq_kmers(seqs, k):
## seqs is list of strings
kmers = defaultdict(set)
for i in range(len(seqs)):
kmers[i] = uniq_kmers_in_seq(seqs[i], k)
kmerpairs = defaultdict(tuple)
maxcount = 0
maxpairs = []
for i in range(len(seqs)):
for j in range(i, len(seqs)):
kmerpairs[(i,j)] = len(kmers[i].union(kmers[j]))
if kmerpairs[(i,j)] > maxcount:
maxcount = kmerpairs[(i,j)]
maxpairs = [(i,j)]
elif kmerpairs[(i,j)] == maxcount:
maxpairs += [(i,j)]
return maxpairs, maxcount
def find_windowpairs_with_all_uniq_kmers_in_fastx():
pass
def find_window_containing_these_kmers(seq, k, window, step, kmers=set([]), fastx="fasta",revcomp=False,verbose=False,optimizediversity=True):
## if optimizediversity==True, it will return window containing those kmers + max amount of unique kmers
## else returns first window with those kmers
kmers = list(set(kmers))
numkmers = len(kmers)
currentkmerdict = kmer_counts_in_seq(seq[:window], k)
windowcount = len({k:v for k, v in currentkmerdict.items() if v})
maxcount = windowcount
maxseqs = [seq[:window]]
matchfound = False
for i in range(1, len(seq)-window+1, step):
if currentkmerdict[seq[(i-1):(i-1+k)]] == 1:
windowcount -= 1
currentkmerdict[seq[(i-1):(i-1+k)]] -= 1
if currentkmerdict[seq[(i+window-k):(i+window)]] == 0:
windowcount += 1
currentkmerdict[seq[(i+window-k):(i+window)]] += 1
nkmer_currwin = 0
for kmer in kmers:
if currentkmerdict[kmer] > 0:
nkmer_currwin += 1
if nkmer_currwin == numkmers:
matchfound=True
if optimizediversity and windowcount > maxcount:
maxcount = windowcount
maxseqs = [seq[i:i+window]]
elif optimizediversity and windowcount == maxcount:
maxseqs.append(seq[i:i+window])
elif not optimizediversity:
return windowcount, seq[i:i+window]
if matchfound:
return maxcount, maxseqs
else:
return 0, []
def find_window_containing_these_kmers_in_fastx(k, window, step, filename, kmers=set([]), fastx="fasta",revcomp=False,verbose=False,optimizediversity=True):
maxcount = 0
maxseqs = []
max_fas = []
kmers = list(set(kmers))
numkmers = len(kmers)
matchfound=False
for fa in SeqIO.parse(filename, fastx):
if verbose:
sys.stderr.write( fa.name + "\n")
if fa is not None:
fa_maxcount, fa_maxseqs = find_window_containing_these_kmers(str(fa.seq), k, window, step, kmers=kmers, fastx="fasta",revcomp=revcomp,verbose=verbose,optimizediversity=optimizediversity)
if fa_maxseqs:
matchfound = True
if optimizediversity and fa_maxcount > maxcount:
maxcount = fa_maxcount
maxseqs = fa_maxseqs
max_fas = [str(fa.name)]*len(fa_maxseqs)
elif optimizediversity and fa_maxcount == maxcount:
maxseqs += fa_maxseqs
max_fas += [str(fa.name)]*len(fa_maxseqs)
elif not optimizediversity: ## these are not actually maxs -- they are just values to a window that had first successful/complete union
return [str(fa.name)]*len(fa_maxseqs), fa_maxseqs, fa_maxcount
if matchfound:
return max_fas, maxseqs, maxcount
else:
return [], [], 0
def uniq_kmer_in_fastx(k, filename, fastx="fasta",revcomp=False,verbose=True):
kmers = set([])
total = 4**k
kmerdone = False
for fa in SeqIO.parse(filename, fastx):
if verbose:
sys.stderr.write( fa.name + "\n")
if fa is not None:
seqkmers = uniq_kmers_in_seq(str(fa.seq), k)
if revcomp:
seqkmers.add(uniq_kmers_in_seq(str(fa.reverse_complement().seq), k))
if verbose:
sys.stderr.write(str(len(seqkmers)) + " kmers found.\n")
for kmer in seqkmers:
kmers.add(kmer)
kmderdone = (len(kmers) == total)
if kmerdone:
break
if verbose:
sys.stderr.write("Up to " + str(len(kmers)) + " kmers\n\n")
return kmers
def num_uniq_kmer_in_fastx(k, filename, fastx="fasta",revcomp=False, verbose=False):
return len(uniq_kmer_in_fastx(k, filename, fastx=fastx,revcomp=revcomp, verbose=verbose))
def extend_until_all_kmers_captured(seq, k, breakin2=False, start=0):
total = len(get_all_kmers(k))
seqlen = len(seq)
kmers = defaultdict(int)
i = start-1
end = i+k
while len(kmers.keys()) != total and end <= seqlen-k:
i+=1
end+=1
kmers[seq[i:end]] += 1
## check
if len(kmers.keys()) != total:
if breakin2:
return None,None,None,None
else:
return None,None
## else all kmers were found, begin trimming
#trim beginning
while start < end:
if kmers[seq[start:start+k]] - 1 <= 0:
break
else:
kmers[seq[start:start+k]] -= 1
start += 1
if start >= end:
if breakin2:
return None,None,None,None
else:
return None,None
if not breakin2:
return start,end
else:
## the first and last kmer are necessary as defined by the processes that found them
## however, we may be able to shrnk the amount of sequence by extending inward from both sides
## and stopping when all kmers are found
kmers = defaultdict(int)
alternate = 0
end1 = start
start2 = end-k
while len(kmers.keys()) != total and end1 <= start2:
if kmers[seq[end1:end1+k]] == 0:
kmers[seq[end1:end1+k]] += 1
end1 += 1
elif kmers[seq[start2:start2+k]] == 0:
kmers[seq[start2:start2+k]] += 1
start2 -= 1
else:
end1+=1
start2-=1
if end1 >= start2:
return start, end, None, None
else:
return start, end1, start2+1, end
def extend_and_repeat(seq, k, breakin2=False,prefix="",suffix="", verbose=True):
seqlen = len(seq)
end2 = 0
group = 1
minlen = float("inf")
minseqs = []
while end2 < seqlen-k:
if breakin2:
start1, end1, start2, end2 = extend_until_all_kmers_captured(seq, k, breakin2=breakin2, start=end2)
if start1 == None:
break
if verbose:
print ">"+prefix+"group"+str(group)+"-1"+suffix
print seq[start1:end1]
if start2 is not None and end2 is not None:
if verbose:
print ">"+prefix+"group"+str(group)+"-2"+suffix
print seq[start2:end2]
if (end1-start1)+(end2-start2) < minlen:
minlen = (end1-start1)+(end2-start2)
mincoords = [start1, end1, start2, end2]
minseqs = [seq[start1:end1], seq[start2:end2]]
else:
end2 = end1
if (end1-start1) < minlen:
minlen = end1-start1
mincoords = [start1, end1, None, None]
minseqs = [seq[start1:end1]]
else:
start1, end2 = extend_until_all_kmers_captured(seq, k, breakin2=breakin2, start=end2)
if start1 == None:
break
if verbose:
print ">"+prefix+"group"+str(group)
print seq[start1:end2]
if (end2-start1) < minlen:
minlen = end2-start1
mincoords = [start1, end2, None, None]
minseqs = [seq[start1:end2]]
group += 1
return minseqs, minlen
def extend_and_repeat_in_fastx(k, filename, fastx="fasta", breakin2=False, revcomp=False, verbose=False):
kmers = set([])
total = 4**k
kmerdone = False
minlen = float("inf")
for fa in SeqIO.parse(filename, fastx):
if fa is not None:
prefix = str(fa.name) + "_"
fa_minseqs, fa_minlen = extend_and_repeat(str(fa.seq), k, breakin2=breakin2, prefix=prefix, suffix="", verbose=verbose)
sys.stderr.write( prefix + str(fa_minlen) + "\n")
if fa_minlen < minlen:
sys.stderr.write( "^^^^min^^^^\n" )
minfastaentry = str(fa.name)
minlen = fa_minlen
minseqs = fa_minseqs
return minseqs, minlen, minfastaentry
def randomly_sample_window(seqlens, window, numseqs=None):
## seqlens is a dict with seqnames as keys and lengths as values
if numseqs is None:
numseqs = len(seqnames)
seq = sorted(seqlens.keys())[np.random.randint(numseqs)]
start = np.random.randint(seqlens[seq]-window)
return seq, start
def read_in_fastx(filename,fastx="fasta",verbose=False):
seqome = {}
seqlens = {}
for fa in SeqIO.parse(filename, fastx):
if verbose:
sys.stderr.write( fa.name + "\n" )
if fa is not None:
seqome[str(fa.name)] = str(fa.seq)
seqlens[str(fa.name)] = len(str(fa.seq))
return seqome, seqlens
## converges faster when both sequences are changed each time -- see second version below this one
##def find_n_pairs_that_have_complete_union_sets(N, k, window, filename, fastx="fasta", revcomp=False, verbose=False):
## seqome, seqlens = read_in_fastx(filename,fastx,verbose)
## numseqs = len(seqlens.keys())
## pairseqs = {}
## pairseqnames = {}
## allkmers = set(get_all_kmers(k))
## n = 0
## while n != N:
## n += 1
## print n
## name1, start1 = randomly_sample_window(seqlens, window, numseqs)
## seq1 = seqome[name1][start1:start1+window]
## kmers1 = uniq_kmers_in_seq(seq1,k)
## neededkmers = allkmers.difference(kmers1)
## kmers2 = set([])
## i=0
## while len(neededkmers.difference(kmers2)) != 0:
## i+=1
## print n,i
## name2, start2 = randomly_sample_window(seqlens, window, numseqs)
## seq2 = seqome[name1][start1:start1+window]
## kmers2 = uniq_kmers_in_seq(seq2,k)
## pairseqnames[n] = [name1, name2]
## pairseqs[n] = [seq1, seq2]
## return pairseqnames, pairseqs
def find_n_pairs_that_have_complete_union_sets(N, k, window, filename, fastx="fasta", revcomp=False, verbose=False):
seqome, seqlens = read_in_fastx(filename,fastx,verbose)
numseqs = len(seqlens.keys())
pairseqs = {}
pairseqnames = {}
allkmers = set(get_all_kmers(k))
n = 0
while n != N:
n += 1
sys.stderr.write( str(n) + "\n" )
i=0
neededkmers = set(["initiating"])
kmers2 = set([])
while len(neededkmers.difference(kmers2)) != 0:
i+=1
sys.stderr.write( str(n) + " " + str(i) )
name1, start1 = randomly_sample_window(seqlens, window, numseqs)
seq1 = seqome[name1][start1:start1+window]
kmers1 = uniq_kmers_in_seq(seq1,k)
neededkmers = allkmers.difference(kmers1)
name2, start2 = randomly_sample_window(seqlens, window, numseqs)
seq2 = seqome[name1][start1:start1+window]
kmers2 = uniq_kmers_in_seq(seq2,k)
pairseqnames[n] = [name1, name2]
pairseqs[n] = [seq1, seq2]
return pairseqnames, pairseqs
class Cluster(object):
def __init__(self, start, end, k, uniqkmers=set([])):
self.start = start
self.end = end
self.k = k
assert type(uniqkmers) == set
self.uniqkmers = uniqkmers
def update_start(self, newstart):
self.start = newstart
def update_end(self, newend):
self.end = newend
def update_uniqkmers(self, newset):
if type(newset) == set or type(newset) == list:
self.uniqkmers = self.uniqkmers.union(newset)
elif type(newset) == int:
self.uniqkmers = self.uniqkmers.add(newset)
def overlaps(self, other):
#other is cluster object
if other.start >= self.start and other.start <= self.end:
return True
elif other.end <= self.end and other.end >= self.start:
return True
else:
return False
def find_nearest_kmer_from_set(self, target_kmer_set, seqindex):
near_start = self.start
while seqindex[near_start] not in target_kmer_set and near_start >= 0:
near_start -= 1
near_end = self.end-self.k+1
while seqindex[near_end] not in target_kmer_set and near_end <= max(seqindex.keys()):
near_end += 1
d1 = abs(self.start - near_start)
d2 = abs(near_end - self.end)
if near_start != self.start and d1 < d2:
return "near_start", near_start, d1
elif near_end != self.end-self.k+1 and d2 < d1:
return "near_end", near_end, d2
elif d1 == d2:
kmerstart = set([])
kmerend = set([])
startrange = self.get_startrange(near_start)
endrange = self.get_endrange(near_end)
assert startrange == endrange
for i in startrange:
kmerstart.add(seqindex[i])
for i in endrange:
kmerend.add(seqindex[i])
if len(kmerstart) >= len(kmerend):
return "near_start", near_start, d1
else:
return "near_end", near_end, d2
else: #catchall, just return one
if np.random.binomial(1,0.5):
return "near_start", near_start, d1
else:
return "near_end", near_end, d2
def grow_cluster(self, newlimit, seqindex, returnNewKmers=True):
newkmers = set([])
if newlimit < self.start:
startrange = self.get_startrange(newlimit)
self.update_start(newlimit)
for i in startrange:
newkmers.add(seqindex[i])
elif newlimit > self.end:
endrange = self.get_endrange(newlimit)
self.update_end(newlimit)
for i in endrange:
newkmers.add(seqindex[i])
#else it is not growing.
self.uniqkmers = self.uniqkmers.union(newkmers)
if returnNewKmers:
return newkmers
def get_startrange(self, new_start):
return range(new_start, self.start)
def get_endrange(self, new_end):
return range(self.end-self.k+2, new_end-self.k+1)
def merge_with(self, other, seqindex):
''' seqindex has pos:kmer pairs from the parent sequence'''
## merge cluster coordinates by taking longest interval they make
self.start = min(self.start, other.start)
self.end = max(self.start, other.end)
## merge kmers already in each set
self.uniqkmers = self.uniqkmers.union(other.uniqkmers)
## for non-overlapping clusters, intervening kmers need to be added as well
for i in range(self.start, self.end-self.k+1):
self.uniqkmers.add(seqindex[i])
def get_start(self):
return self.start
def get_end(self):
return self.end
def get_uniqkmers(self):
return self.uniqkmers
def __str__(self):
return str(self.start) +"\t" + str(self.end)
class Cluster_Set(object):
def __init__(self, target_num_uniq_kmers, k, parent_sequence_index):
''' parent sequence is the sequence that all clusters added to cluster set are indexed on
parent_sequence_index is a dict with pos:kmer pairs'''
self.num_clusters = 0
self.clusters = {}
self.uniqkmers = set([])
self.numuniq = None
self.target = target_num_uniq_kmers
self.k = k
self.seqindex = parent_sequence_index
self.closest = None
self.mindist = float('inf')
self.distances = None
def add(self, cluster):
self.num_clusters += 1
self.clusters[self.num_clusters] = cluster
self.uniqkmers.union(cluster.get_uniqkmers())
def get_num_clust(self):
return self.num_clusters
def get_target_num_kmer(self):
return self.target
def get_uniqkmers(self):
return self.uniqkmers
def get_num_uniq(self):
return len(self.uniqkmers)
def get_cluster_list(self):
return sorted(self.clusters.keys())
def print_distances(self):
if self.distances is None:
print self.distances
else:
cluster_list = self.get_cluster_list()
for i in cluster_list:
for j in cluster_list:
a = min(i,j)
b = max(i,j)
print str(i)+"-->"+str(j)+":\t"+str(self.distances[i][j])
def merge_overlapping_clusters(self):
initial_clusts = deque(sorted(self.clusters.keys()))
while initial_clusts:
a = initial_clusts.popleft()
others = sorted(self.clusters.keys())
if a in others: ##if this cluster has not already been merged
self.__merge_a_with_others__(a, others)
def __merge_a_with_others__(self, a, others):
toremove = []
for b in others:
if b != a:
if self.clusters[a].overlaps(self.clusters[b]):
self.clusters[a].merge_with(self.clusters[b], self.seqindex)
toremove.append(b)
for b in toremove:
self.clusters.pop(b)
self.num_clusters -= 1
if self.distances is not None:
self.distances.pop(b)
def update_numuniq(self):
self.numuniq = len(self.uniqkmers)
def target_reached(self):
return self.numuniq == self.target
def find_cluster_with_closest_nearby_target_kmer(self, target_kmer_set, seqindex):
cluster_list = self.get_cluster_list()
mindist = float('inf')
minclusts = {}
for i in cluster_list:
near, newlimit, dist = self.clusters[i].find_nearest_kmer_from_set(target_kmer_set, seqindex)
if dist < mindist:
mindist = dist
minclusts = {}
minclusts[i] = [near,newlimit,dist]
elif dist == mindist:
minclusts[i] = [near,newlimit,dist]
numclusts = len(minclusts.keys())
if numclusts != 1 and numclusts > 1: #not 0
randind = np.random.randint(0,len(minclusts.keys()))
randclustname = minclusts.keys()[randind]
randclust = minclusts[randclustname]
minclusts = {}
minclusts[randclustname] = randclust
#else it is 1, and we are set
return minclusts
def grow_cluster(self, clusterinfo, seqindex):
''' cluster info is minclust dict with single k:v pair output from find_cluster_with_closest_nearby_target_kmer'''
a = clusterinfo.keys()[0]
near = clusterinfo[a][0]
newlimit = clusterinfo[a][1]
dist = clusterinfo[a][2]
newkmers = self.clusters[a].grow_cluster(newlimit, seqindex, returnNewKmers=True)
self.uniqkmers = self.uniqkmers.union(newkmers)
self.update_numuniq()
## does this cluster now overlap others?
self.__merge_a_with_others__(a, others = sorted(self.clusters.keys()))
def grow_cluster_with_closest_nearby_target_kmer(self, target_kmer_set, seqindex):
minclusts = self.find_cluster_with_closest_nearby_target_kmer(target_kmer_set, seqindex)
self.grow_cluster(minclusts, seqindex)
def join_closest_clusters(self):
## THIS WILL BE USED AFTER CLUSTER GROWTH FOR KMER CAPTURE STOPS AND USER WANTS CLUSTERS FURTHER REDUCED TO N CLUSTERS
if self.closest == None:
self.calculate_cluster_distances()
a = self.closest[0]
b = self.closest[1]
#merge
self.clusters[a].merge_with(self.clusters[b], self.seqindex)
# remove b from self.clusters
self.clusters.pop(b)
self.num_clusters -= 1
#remove cluster b distances from self.distances
self.distances.pop(b)
#recalculate distances from A only
i = 0
cluster_list = self.get_cluster_list()
while a != cluster_list[i]:
i+=1
self.calculate_distances_for(i, cluster_list)
def calculate_cluster_distances(self):
''' for each, take minimim distance between cluster1 start or end and cluster2 start or end '''
''' resets entire self.distances -- i.e. recalculates entirely'''
cluster_list = self.get_cluster_list()
self.distances = {}
for i in range(len(cluster_list)):
self.distances[cluster_list[i]] = {}
self.calculate_distances_for(i, cluster_list)
def calculate_distances_for(self, i, cluster_list):
for j in range(len(cluster_list))[i+1:]:
a=self.clusters[cluster_list[i]]
b=self.clusters[cluster_list[j]]
d1 = abs(a.get_start() - b.get_start())
d2 = abs(a.get_start() - b.get_end())
d3 = abs(a.get_end() - b.get_start())
d4 = abs(a.get_end() - b.get_end())
ij_dist = min(d1,d2,d3,d4)
if ij_dist < self.mindist:
self.mindist = ij_dist
self.closest = (i,j)
self.distances[cluster_list[i]][cluster_list[j]] = ij_dist
def get_dist_between(self, i, j):
if self.distances is None:
self.calculate_cluster_distances()
a = min(i,j)
b = max(i,j)
return self.distances[a][b]
def get_subsequences_from_clusters(self, sequence):
seqs = []
for c in self.clusters.keys():
seqs.append(sequence[self.clusters[c].get_start():self.clusters[c].get_end()])
return seqs
def __str__(self):
msg=''
for c in sorted(self.clusters.keys()):
msg += str(c) + "\t" + self.clusters[c].__str__() + "\n"
return msg
def seq_clusters(seq, k):
## find position of all kmers
kpos = defaultdict(list)
index = defaultdict(str)
for i in range(len(seq)-k+1):
kpos[seq[i:i+k]].append(i)
index[i] = seq[i:i+k]
max_num_uniq_kmers = len(kpos.keys())
if max_num_uniq_kmers != 4**k:
sys.stderr.write("This sequence does not contain all " + str(4**k) + " " + str(k) + "-mers: it has " + str(max_num_uniq_kmers) + " unique " +str(k) + "-mers.\n")
##seed cluster spots will be those if kmers represented only once -- or least number of times
kset = set([kmer for kmer in kpos if len(kpos[kmer]) == 1])
kcomp = set(kpos.keys()).difference(kset) ## need these
cset = Cluster_Set(target_num_uniq_kmers = max_num_uniq_kmers, k=k, parent_sequence_index = index)
kseed=list(kset)[0]
for kseed in kset:
assert len(kpos[kseed]) == 1
cset.add(Cluster(start = kpos[kseed][0], end = kpos[kseed][0]+k, k=k, uniqkmers=set([kseed])))
cset.merge_overlapping_clusters()
cset.update_numuniq()
i=0
while cset.get_num_uniq() != cset.get_target_num_kmer():
i+=1
if i: #%100 == 0:
print "Iter:", i
print cset.get_num_uniq(), cset.get_target_num_kmer(), cset.get_num_clust()
print cset.get_cluster_list()
print
cset.grow_cluster_with_closest_nearby_target_kmer(kmer, index)
print cset.get_subsequences_from_clusters(seq)
## THERE IS SOME ACCOUNTING THAT IS NOT WORKING.....
def num_uniq_from_2_dict(d1, d2):
allkeys = set(d1.keys() + d2.keys())
total_uniq = 0
for key in allkeys:
if d1[key] > 1 or d2[key] > 1:
total_uniq += 1
return total_uniq
## super slow
def window_pair_matrix1(filename, k=5, window=1000, step=1, outprefix="window_pair_matrix_out", verbose=False):
# number every kmer from 1 to 4**k.
# number every window in genome
# for every window in genome, check kmer spectrum, if same as previous, add window number as value in list; if new spectrum, start new list
# all windows with same kmer spectrum are then in same group
# for each kmer spectrum, see if any of the other spectrums complements to have 4**k kmers
# if so, store this pairing of window number of lists
# at end, print seqs from window number lists into separate files and 1 master file that says which pairs can be combined for all kmers.
specnum = 0
windownum = 0
specdict = {}
windows = {}
firstwindow = {}
sumlen = 0
for fa in SeqIO.parse(filename, "fasta"):
if verbose >= 1:
sys.stderr.write(fa.name + "\n")
## initialize current sequence
sumlen += len(str(fa.seq))
windownum += 1
if verbose >= 2:
print "Windownum", windownum
firstwindow[windownum] = fa.name
currset = kmer_counts_in_seqs([str(fa.seq)[:window]], k)
same_set = None
for key in specdict.keys():
if currset == specdict[key]:
same_set = key
break
if same_set is not None:
windows[same_set].append(windownum)
else: # is None
specnum += 1
specdict[specnum] = deepcopy(currset)
windows[specnum] = [windownum]
## iterate over current sequence
for i in range(step, len(str(fa.seq))-window, step):
windownum += 1
if verbose >= 2:
print "Windownum", windownum
currset[ str(fa.seq)[i-1:i-1+k] ] -= 1
currset[ str(fa.seq)[i+window-k:i+window] ] += 1
same_set = None
for key in specdict.keys():
if currset == specdict[key]:
same_set = key
break
if same_set is not None:
windows[same_set].append(windownum)
else: # is None
specnum += 1
specdict[specnum] = deepcopy(currset)
windows[specnum] = [windownum]
## find pairs of spectrums that are complete
if verbose >= 1:
print "comparing spectrum pairs"
complete = 4**k
num_complete_pairs = 0
num_complete_window_pairs = 0
pairs = []
allspecs = sorted(specdict.keys())
usefulspecs = set([])
for i in allspecs:
for j in allspecs[1:]:
if num_uniq_from_2_dict(specdict[i],specdict[j]) == complete:
pairs.append((i,j))
num_complete_window_pairs += len(windows[i])*len(windows[j])
num_complete_pairs += 1
usefulspecs.add(i)
usefulspecs.add(j)
## write out information
f = open(outprefix+".txt", 'w')
f.write("K = "+str(k)+"\n")
f.write("Window size = "+ str(window) + "\n")
f.write("Step size = " + str(step) + "\n")
f.write("Number sequences searched = " + str(len(firstwindow.keys())) + "\n")
f.write("Total sequence length searched = " + str(sumlen) + "\n")
f.write("Total number of windows searched = " + str(windownum) + "\n")
f.write("Number kmer spectrums found = " + str(specnum) + "\n")
f.write("Number of distinct kmer spectrum pairs that complete the spectrum = " + str(num_complete_pairs) + "\n")
f.write("Number of window pairs with complete kmer spectrums = " + str(num_complete_pairs) + "\n")
if num_complete_pairs == 0:
f.write("No window pairing with these parameters gives complete spectrum\n")
else:
f.write("Window pairings were found with these parameters that have complete spectrum\n")
f.write("The following spectrum pairings give complete spectrum\n")
for pair in pairs:
f.write(str(pair)+"\n")
f.write("The following windows belong to specified spectrum number\n")
for specnum in sorted(list(usefulspecs)):
f.write( str(specnum)+": " + str(windows[specnum]) + "\n" )
if verbose:
sys.stderr.write("Done! \n")
## doesnt really limit search much...
def window_pair_matrix2(filename, k=5, window=1000, step=1, other_threshold = False, outprefix="window_pair_matrix_out", verbose=False, N=1000):
# number every window in genome
# for every window in genome, get num uniq kmers
# then go through all window pairs and see if count of uniq kmers >= 2*4**k - 1 (or specified threshold)
# report those, to then go back and check
if not other_threshold:
threshold = 2*4**k-1
else:
if other_threshold > 1:
threshold = other_threshold
elif other_threshold <= 1 and other_threshold > 0:
threshold = (2*4**k-1)*other_threshold
windownum = 0
windows = {}
firstwindow = {}
sumlen = 0
for fa in SeqIO.parse(filename, "fasta"):
if verbose >= 1:
sys.stderr.write(fa.name + "\n")
## initialize current sequence
sumlen += len(str(fa.seq))
windownum += 1
if verbose >= 2:
print "Windownum", windownum
firstwindow[windownum] = fa.name
currset = kmer_counts_in_seqs([str(fa.seq)[:window]], k)
windows[windownum] = len(currset.keys())
## iterate over current sequence
for i in range(step, len(str(fa.seq))-window, step):
windownum += 1
if verbose >= 2 and windownum%N == 0:
print "Windownum", windownum
currset[ str(fa.seq)[i+window-k:i+window] ] += 1
currset[ str(fa.seq)[i-1:i-1+k] ] -= 1
if currset[ str(fa.seq)[i-1:i-1+k] ] == 0:
currset.pop( str(fa.seq)[i-1:i-1+k] )
windows[windownum] = len(currset.keys())
## some book-keeping
if verbose >= 1:
print "comparing spectrum pair counts"
num_complete_pairs = 0
num_complete_window_pairs = 0
pairs = set([])
## make numpy array
wincounts = np.array([windows[e] for e in sorted(windows.keys())])
numrow = len(wincounts)
## find pairs of spectrums that are complete
for i in range(numrow):
winsums = np.zeros(numrow)
winsums[:] = wincounts[i] + wincounts[:]
cols = np.nonzero(np.greater_equal(winsums, threshold).astype(int))[0]
if verbose >=2 and i%N == 0:
print "row i =", i, "of", numrow
print cols, len(cols), cols.shape
for j in cols:
if j > i and j not in oldcols: ## to limit the effect of adjacent i windows giving virtually identical answers
pairs.add((i,j))
num_complete_pairs += 1
oldcols = set(cols)
if verbose >=2 and i%N == 0:
print "num complete", num_complete_pairs
print "num pairs", len(pairs)
## write out information
f = open(outprefix+".txt", 'w')
f.write("K = "+str(k)+"\n")
f.write("Window size = "+ str(window) + "\n")
f.write("Step size = " + str(step) + "\n")
f.write("Number sequences searched = " + str(len(firstwindow.keys())) + "\n")
f.write("Total sequence length searched = " + str(sumlen) + "\n")
f.write("Total number of windows searched = " + str(windownum) + "\n")
f.write("Threshold used to be considered 'complete' = " + str(threshold) + "\n")
f.write("Number of window pairs with complete kmer spectrums = " + str(num_complete_pairs) + "\n")
if num_complete_pairs == 0:
f.write("No window pairing with these parameters gives complete spectrum\n")
else:
f.write("Window pairings were found with these parameters that have complete spectrum\n")
f.write("The following window pairings give complete spectrum\n")
for pair in pairs:
f.write(str(pair)+"\n")
if verbose:
sys.stderr.write("Done! \n")
def run(parser, args):
if args.minseq:
pass
elif args.fasta:
if args.test:
uniqkmers = uniq_kmer_in_fastx(args.kmersize, args.fasta, fastx="fasta",revcomp=args.revcomp, verbose=True)
ans = len(uniqkmers)
print "This file has", ans, "unique kmers."
print "Compare this to number of all possible:", 4**args.kmersize
if args.kmersize > 0 and args.kmersize < 7:
allkmers = set(get_all_kmers(args.kmersize))
missingkmers = allkmers.difference(uniqkmers)
if missingkmers:
print "The following kmers are missing:"
for kmer in missingkmers:
print kmer
elif args.hist or args.plotkmercounts or args.plothist:
kmercounts = kmer_counts_in_fastx(args.kmersize,args.fasta, fastx="fasta",revcomp=False, verbose=True)
if args.hist or args.plothist:
hist = kmer_hist(kmercounts)
if args.hist and not args.plothist:
print_hist(hist)
print
elif args.plothist:
plot_hist(hist)
print "Number kmers queried:", sum(hist.values())
if args.plotkmercounts:
plot_kmer_counts(kmercounts)
elif args.extend:
minseqs, minlen, minfastaentry = extend_and_repeat_in_fastx(k=args.kmersize, filename=args.fasta, fastx="fasta", breakin2=args.divide, revcomp=args.revcomp, verbose=False)
i = 1
for seq in minseqs:
print ">"+minfastaentry+"_"+str(i)
print seq
i+=1
elif args.fixed: ##get windows of size w with max number of unique kmers of size k
max_fas, maxseqs, maxcount = find_window_with_max_uniq_kmers_in_fastx(k=args.kmersize, window=args.fixed, step=args.step, filename=args.fasta, fastx="fasta",revcomp=args.revcomp,verbose=False)
if args.combine: ## taking set of windows that maximize number of unique kmers (greedy step), see if any 2 windows complement each other -- union option is better for this
if maxcount != 4**args.kmersize:
maxpairs, maxcount = find_pair_of_seqs_with_max_uniq_kmers(maxseqs, args.kmersize)
maxpairseqs = []
maxpairfas = []
pairnum = 1
for pair in maxpairs:
print ">"+max_fas[pair[0]]+"-seq-"+str(pair[0])+"-"+str(maxcount)+"unique-kmers_pairnumber-"+str(pairnum)+"-partner-1"
print maxseqs[pair[0]]
print ">"+max_fas[pair[1]]+"-seq-"+str(pair[1])+"-"+str(maxcount)+"unique-kmers_pairnumber-"+str(pairnum)+"-partner-2"
print maxseqs[pair[1]]
pairnum += 1
elif args.union: ## taking set of windows that maximize number of unique kmers (greedy step), find windows that complement each to give full set of unique kmers
allkmers = set(get_all_kmers(args.kmersize))
for i in range(len(maxseqs)):
neededkmers = allkmers.difference(uniq_kmers_in_seq(maxseqs[i], k=args.kmersize))
fas, seqs, count = find_window_containing_these_kmers_in_fastx(k=args.kmersize, window=args.fixed, step=args.step, filename=args.fasta, kmers=neededkmers, fastx="fasta",revcomp=False,verbose=False,optimizediversity=True)
## print neededkmers
## print len(neededkmers)
## print
for j in range(len(seqs)):
print ">"+max_fas[i]+"-"+str(maxcount)+"unique-kmers-union-group"+str(i)+"-"+str(j)
print maxseqs[i]
print ">"+fas[j]+"-seq-"+str(count)+"unique-kmers-union-group"+str(i)+"-"+str(j)
print seqs[j]
elif args.findNunions: ## I don't think this was ever made functional -- John 10/6/2015
pairseqnames, pairseqs = find_n_pairs_that_have_complete_union_sets(N=args.findNunions, k=args.kmersize, window=args.fixed, filename=args.fasta, fastx="fasta", revcomp=False, verbose=False)
for i in sorted(pairseqs.keys()):
print ">pair-"+str(i)+"-a-"+pairseqnames[i][0]
print pairseqs[i][0]
print ">pair-"+str(i)+"-b-"+pairseqnames[i][1]
print pairseqs[i][1]
elif args.cluster:
print "Max number unique kmers in these sequences is:", maxcount
for seq in maxseqs:
print seq_clusters(seq, args.kmersize)
else:
for i in range(len(maxseqs)):
print ">"+max_fas[i]+"-seq-"+str(i)+"-"+str(maxcount)+"unique-kmers"
print maxseqs[i]
elif args.exhaustive1:
window_pair_matrix1(filename=args.fasta, k=args.kmersize, window=args.exhaustive1, step=1, outprefix="window_pair_matrix_out", verbose=args.verbose)
elif args.exhaustive2:
window_pair_matrix2(filename=args.fasta, k=args.kmersize, window=args.exhaustive2, step=1, other_threshold=args.threshold, outprefix="window_pair_matrix_out", verbose=args.verbose)
## only works with step size 1 for now....
| |
# -*- coding: utf-8 -*-
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# google-cloud-notebooks documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
# For plugins that can not read conf.py.
# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85
sys.path.insert(0, os.path.abspath("."))
__version__ = ""
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.5.5"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"recommonmark",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_options = {"members": True}
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The root toctree document.
root_doc = "index"
# General information about the project.
project = "google-cloud-notebooks"
copyright = "2019, Google"
author = "Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
"_build",
"**/.nox/**/*",
"samples/AUTHORING_GUIDE.md",
"samples/CONTRIBUTING.md",
"samples/snippets/README.rst",
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for google-cloud-notebooks",
"github_user": "googleapis",
"github_repo": "python-notebooks",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-notebooks-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
root_doc,
"google-cloud-notebooks.tex",
"google-cloud-notebooks Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
root_doc,
"google-cloud-notebooks",
"google-cloud-notebooks Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
root_doc,
"google-cloud-notebooks",
"google-cloud-notebooks Documentation",
author,
"google-cloud-notebooks",
"google-cloud-notebooks Library",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://python.readthedocs.org/en/latest/", None),
"google-auth": ("https://googleapis.dev/python/google-auth/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,),
"grpc": ("https://grpc.github.io/grpc/python/", None),
"proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None),
"protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
| |
# Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for Oracle's ZFSSA Cinder volume driver."""
from datetime import date
import errno
import json
import math
import mock
from oslo_utils import units
import six
from cinder import context
from cinder import exception
from cinder.image import image_utils
from cinder import test
from cinder.tests.unit import fake_utils
from cinder.tests.unit import utils
from cinder.volume import configuration as conf
from cinder.volume import driver
from cinder.volume.drivers import nfs as nfsdriver
from cinder.volume.drivers import remotefs
from cinder.volume.drivers.zfssa import restclient as client
from cinder.volume.drivers.zfssa import webdavclient
from cinder.volume.drivers.zfssa import zfssaiscsi as iscsi
from cinder.volume.drivers.zfssa import zfssanfs
from cinder.volume.drivers.zfssa import zfssarest as rest
nfs_logbias = 'latency'
nfs_compression = 'off'
zfssa_cache_dir = 'os-cinder-cache'
no_virtsize_img = {
'id': 'no_virtsize_img_id1234',
'size': 654321,
'updated_at': date(2015, 1, 1),
}
small_img = {
'id': '1e5177e7-95e5-4a0f-b170-e45f4b469f6a',
'size': 654321,
'virtual_size': 2361393152,
'updated_at': date(2015, 1, 1),
}
large_img = {
'id': 'large_id5678',
'size': 50000000,
'virtual_size': 11806965760,
'updated_at': date(2015, 2, 2),
}
fakespecs = {
'prop1': 'prop1_val',
'prop2': 'prop2_val',
}
small_img_props = {
'size': 3,
}
img_props_nfs = {
'image_id': small_img['id'],
'updated_at': small_img['updated_at'].isoformat(),
'size': 3,
'name': '%(dir)s/os-cache-vol-%(name)s' % ({'dir': zfssa_cache_dir,
'name': small_img['id']}),
'id': small_img['id']
}
fakecontext = 'fakecontext'
img_service = 'fakeimgservice'
img_location = 'fakeimglocation'
class ImgInfo(object):
def __init__(self, vsize):
self.virtual_size = vsize
class FakeResponse(object):
def __init__(self, statuscode, data='data'):
self.status = statuscode
self.data = data
class FakeSSL(object):
def _create_unverified_context(self):
return 'fakecontext'
class TestZFSSAISCSIDriver(test.TestCase):
test_vol = {
'name': 'cindervol',
'size': 3,
'id': 1,
'provider_location': 'fake_location 1 2',
'provider_auth': 'fake_auth user pass',
}
test_vol2 = {
'name': 'cindervol2',
'size': 5,
'id': 2,
'provider_location': 'fake_location 3 4',
'provider_auth': 'fake_auth user pass',
}
test_snap = {
'name': 'cindersnap',
'volume_name': test_vol['name']
}
test_vol_snap = {
'name': 'cindersnapvol',
'size': test_vol['size']
}
def __init__(self, method):
super(TestZFSSAISCSIDriver, self).__init__(method)
@mock.patch.object(iscsi, 'factory_zfssa')
def setUp(self, _factory_zfssa):
super(TestZFSSAISCSIDriver, self).setUp()
self._create_fake_config()
_factory_zfssa.return_value = mock.MagicMock(spec=rest.ZFSSAApi)
iscsi.ZFSSAISCSIDriver._execute = fake_utils.fake_execute
self.drv = iscsi.ZFSSAISCSIDriver(configuration=self.configuration)
self.drv.do_setup({})
def _create_fake_config(self):
self.configuration = mock.Mock(spec=conf.Configuration)
self.configuration.san_ip = '1.1.1.1'
self.configuration.san_login = 'user'
self.configuration.san_password = 'passwd'
self.configuration.zfssa_pool = 'pool'
self.configuration.zfssa_project = 'project'
self.configuration.zfssa_lun_volblocksize = '8k'
self.configuration.zfssa_lun_sparse = 'false'
self.configuration.zfssa_lun_logbias = 'latency'
self.configuration.zfssa_lun_compression = 'off'
self.configuration.zfssa_initiator_group = 'test-init-grp1'
self.configuration.zfssa_initiator = \
'iqn.1-0.org.deb:01:d7, iqn.1-0.org.deb:01:d9'
self.configuration.zfssa_initiator_user = ''
self.configuration.zfssa_initiator_password = ''
self.configuration.zfssa_initiator_config = "{'test-init-grp1':[{'iqn':\
'iqn.1-0.org.deb:01:d7','user':'','password':''}],'test-init-grp\
2':[{'iqn':'iqn.1-0.org.deb:01:d9','user':'','password':''}]}"
self.configuration.zfssa_target_group = 'test-target-grp1'
self.configuration.zfssa_target_user = ''
self.configuration.zfssa_target_password = ''
self.configuration.zfssa_target_portal = '1.1.1.1:3260'
self.configuration.zfssa_target_interfaces = 'e1000g0'
self.configuration.zfssa_rest_timeout = 60
self.configuration.volume_backend_name = 'fake_zfssa'
self.configuration.zfssa_enable_local_cache = True
self.configuration.zfssa_cache_project = zfssa_cache_dir
self.configuration.safe_get = self.fake_safe_get
self.configuration.zfssa_replication_ip = '1.1.1.1'
self.configuration.zfssa_manage_policy = 'loose'
def _util_migrate_volume_exceptions(self):
self.drv.zfssa.get_lun.return_value = (
{'targetgroup': 'test-target-grp1'})
self.drv.zfssa.get_asn.return_value = (
'9a2b5a0f-e3af-6d14-9578-8825f229dc89')
self.drv.tgt_zfssa.get_asn.return_value = (
'9a2b5a0f-e3af-6d14-9578-8825f229dc89')
targets = {'targets': [{'hostname': '2.2.2.2',
'address': '2.2.2.2:216',
'label': '2.2.2.2',
'asn':
'9a2b5a0f-e3af-6d14-9578-8825f229dc89'}]}
self.drv.zfssa.get_replication_targets.return_value = targets
self.drv.zfssa.edit_inherit_replication_flag.return_value = {}
self.drv.zfssa.create_replication_action.return_value = 'action-123'
self.drv.zfssa.send_repl_update.return_value = True
@mock.patch.object(iscsi.LOG, 'warning')
@mock.patch.object(iscsi.LOG, 'error')
@mock.patch.object(iscsi, 'factory_zfssa')
def test_parse_initiator_config(self, _factory_zfssa, elog, wlog):
"""Test the parsing of the old style initator config variables. """
lcfg = self.configuration
with mock.patch.object(lcfg, 'zfssa_initiator_config', ''):
# Test empty zfssa_initiator_group
with mock.patch.object(lcfg, 'zfssa_initiator_group', ''):
self.assertRaises(exception.InvalidConfigurationValue,
self.drv.do_setup, {})
# Test empty zfssa_initiator with zfssa_initiator_group set to
# a value other than "default"
with mock.patch.object(lcfg, 'zfssa_initiator', ''):
self.assertRaises(exception.InvalidConfigurationValue,
self.drv.do_setup, {})
# Test zfssa_initiator_group set to 'default' with non-empty
# zfssa_initiator.
with mock.patch.object(lcfg, 'zfssa_initiator_group', 'default'):
self.drv.do_setup({})
wlog.assert_called_with(mock.ANY,
{'inigrp': lcfg.zfssa_initiator_group,
'ini': lcfg.zfssa_initiator})
def test_migrate_volume(self):
self._util_migrate_volume_exceptions()
volume = self.test_vol
volume.update({'host': 'fake_host',
'status': 'available',
'name': 'vol-1',
'source_volid': self.test_vol['id']})
loc_info = '2.2.2.2:fake_auth:pool2:project2:test-target-grp1:2.2.2.2'
host = {'host': 'stack@zfssa_iscsi#fake_zfssa',
'capabilities': {'vendor_name': 'Oracle',
'storage_protocol': 'iSCSI',
'location_info': loc_info}}
ctxt = context.get_admin_context()
# Test the normal case
result = self.drv.migrate_volume(ctxt, volume, host)
self.assertEqual((True, None), result)
# Test when volume status is not available
volume['status'] = 'in-use'
result = self.drv.migrate_volume(ctxt, volume, host)
self.assertEqual((False, None), result)
volume['status'] = 'available'
# Test when vendor is not Oracle
host['capabilities']['vendor_name'] = 'elcarO'
result = self.drv.migrate_volume(ctxt, volume, host)
self.assertEqual((False, None), result)
host['capabilities']['vendor_name'] = 'Oracle'
# Test when storage protocol is not iSCSI
host['capabilities']['storage_protocol'] = 'not_iSCSI'
result = self.drv.migrate_volume(ctxt, volume, host)
self.assertEqual((False, None), result)
host['capabilities']['storage_protocol'] = 'iSCSI'
# Test when location_info is incorrect
host['capabilities']['location_info'] = ''
self.assertEqual((False, None), result)
host['capabilities']['location_info'] = loc_info
# Test if replication ip and replication target's address dont match
invalid_loc_info = (
'2.2.2.2:fake_auth:pool2:project2:test-target-grp1:9.9.9.9')
host['capabilities']['location_info'] = invalid_loc_info
result = self.drv.migrate_volume(ctxt, volume, host)
self.assertEqual((False, None), result)
host['capabilities']['location_info'] = loc_info
# Test if no targets are returned
self.drv.zfssa.get_replication_targets.return_value = {'targets': []}
result = self.drv.migrate_volume(ctxt, volume, host)
self.assertEqual((False, None), result)
def test_migrate_volume_uninherit_exception(self):
self._util_migrate_volume_exceptions()
volume = self.test_vol
volume.update({'host': 'fake_host',
'status': 'available',
'name': 'vol-1',
'source_volid': self.test_vol['id']})
loc_info = '2.2.2.2:fake_auth:pool2:project2:test-target-grp1:2.2.2.2'
host = {'host': 'stack@zfssa_iscsi#fake_zfssa',
'capabilities': {'vendor_name': 'Oracle',
'storage_protocol': 'iSCSI',
'location_info': loc_info}}
ctxt = context.get_admin_context()
self.drv.zfssa.edit_inherit_replication_flag.side_effect = (
exception.VolumeBackendAPIException(data='uniherit ex'))
self.assertRaises(exception.VolumeBackendAPIException,
self.drv.migrate_volume, ctxt, volume, host)
def test_migrate_volume_create_action_exception(self):
self._util_migrate_volume_exceptions()
volume = self.test_vol
volume.update({'host': 'fake_host',
'status': 'available',
'name': 'vol-1',
'source_volid': self.test_vol['id']})
loc_info = '2.2.2.2:fake_auth:pool2:project2:test-target-grp1:2.2.2.2'
host = {'host': 'stack@zfssa_iscsi#fake_zfssa',
'capabilities': {'vendor_name': 'Oracle',
'storage_protocol': 'iSCSI',
'location_info': loc_info}}
ctxt = context.get_admin_context()
self.drv.zfssa.create_replication_action.side_effect = (
exception.VolumeBackendAPIException(data=
'failed to create action'))
self.assertRaises(exception.VolumeBackendAPIException,
self.drv.migrate_volume, ctxt, volume, host)
def test_migrate_volume_send_update_exception(self):
self._util_migrate_volume_exceptions()
volume = self.test_vol
volume.update({'host': 'fake_host',
'status': 'available',
'name': 'vol-1',
'source_volid': self.test_vol['id']})
loc_info = '2.2.2.2:fake_auth:pool2:project2:test-target-grp1:2.2.2.2'
host = {'host': 'stack@zfssa_iscsi#fake_zfssa',
'capabilities': {'vendor_name': 'Oracle',
'storage_protocol': 'iSCSI',
'location_info': loc_info}}
ctxt = context.get_admin_context()
self.drv.zfssa.send_repl_update.side_effect = (
exception.VolumeBackendAPIException(data='failed to send update'))
self.assertRaises(exception.VolumeBackendAPIException,
self.drv.migrate_volume, ctxt, volume, host)
def test_migrate_volume_sever_repl_exception(self):
self._util_migrate_volume_exceptions()
volume = self.test_vol
volume.update({'host': 'fake_host',
'status': 'available',
'name': 'vol-1',
'source_volid': self.test_vol['id']})
loc_info = '2.2.2.2:fake_auth:pool2:project2:test-target-grp1:2.2.2.2'
host = {'host': 'stack@zfssa_iscsi#fake_zfssa',
'capabilities': {'vendor_name': 'Oracle',
'storage_protocol': 'iSCSI',
'location_info': loc_info}}
ctxt = context.get_admin_context()
self.drv.tgt_zfssa.sever_replication.side_effect = (
exception.VolumeBackendAPIException(data=
'failed to sever replication'))
self.assertRaises(exception.VolumeBackendAPIException,
self.drv.migrate_volume, ctxt, volume, host)
def test_create_delete_volume(self):
self.drv.zfssa.get_lun.return_value = {'guid':
'00000000000000000000000000000',
'number': 0,
'initiatorgroup': 'default',
'size': 1,
'nodestroy': False}
lcfg = self.configuration
self.drv.create_volume(self.test_vol)
self.drv.zfssa.create_lun.assert_called_once_with(
lcfg.zfssa_pool,
lcfg.zfssa_project,
self.test_vol['name'],
six.text_type(self.test_vol['size']) + 'g',
lcfg.zfssa_target_group,
mock.ANY)
self.drv.delete_volume(self.test_vol)
self.drv.zfssa.get_lun.assert_called_once_with(lcfg.zfssa_pool,
lcfg.zfssa_project,
self.test_vol['name'])
self.drv.zfssa.delete_lun.assert_called_once_with(
pool=lcfg.zfssa_pool,
project=lcfg.zfssa_project,
lun=self.test_vol['name'])
def test_delete_volume_with_missing_lun(self):
self.drv.zfssa.get_lun.side_effect = exception.VolumeNotFound(
volume_id=self.test_vol['name'])
self.drv.delete_volume(self.test_vol)
self.drv.zfssa.delete_lun.assert_not_called()
def test_delete_volume_backend_fail(self):
self.drv.zfssa.get_lun.side_effect = \
exception.VolumeBackendAPIException(data='fakemsg')
self.assertRaises(exception.VolumeBackendAPIException,
self.drv.delete_volume,
self.test_vol)
@mock.patch.object(iscsi.ZFSSAISCSIDriver, '_check_origin')
def test_delete_cache_volume(self, _check_origin):
lcfg = self.configuration
lun2del = {
'guid': '00000000000000000000000000000',
'number': 0,
'initiatorgroup': 'default',
'size': 1,
'nodestroy': False,
'origin': {
'project': lcfg.zfssa_cache_project,
'snapshot': 'image-%s' % small_img['id'],
'share': 'os-cache-vol-%s' % small_img['id'],
}
}
self.drv.zfssa.get_lun.return_value = lun2del
self.drv.delete_volume(self.test_vol)
self.drv._check_origin.assert_called_once_with(lun2del,
self.test_vol['name'])
def test_check_origin(self):
lcfg = self.configuration
lun2del = {
'guid': '00000000000000000000000000000',
'number': 0,
'initiatorgroup': 'default',
'size': 1,
'nodestroy': False,
'origin': {
'project': lcfg.zfssa_cache_project,
'snapshot': 'image-%s' % small_img['id'],
'share': 'os-cache-vol-%s' % small_img['id'],
}
}
cache = lun2del['origin']
self.drv.zfssa.num_clones.return_value = 0
self.drv._check_origin(lun2del, 'volname')
self.drv.zfssa.delete_lun.assert_called_once_with(
lcfg.zfssa_pool,
lcfg.zfssa_cache_project,
cache['share'])
def test_create_delete_snapshot(self):
self.drv.zfssa.num_clones.return_value = 0
lcfg = self.configuration
self.drv.create_snapshot(self.test_snap)
self.drv.zfssa.create_snapshot.assert_called_once_with(
lcfg.zfssa_pool,
lcfg.zfssa_project,
self.test_snap['volume_name'],
self.test_snap['name'])
self.drv.delete_snapshot(self.test_snap)
self.drv.zfssa.delete_snapshot.assert_called_once_with(
lcfg.zfssa_pool,
lcfg.zfssa_project,
self.test_snap['volume_name'],
self.test_snap['name'])
def test_create_volume_from_snapshot(self):
lcfg = self.configuration
self.drv.zfssa.get_lun.return_value = self.test_vol
self.drv.create_snapshot(self.test_snap)
self.drv.zfssa.create_snapshot.assert_called_once_with(
lcfg.zfssa_pool,
lcfg.zfssa_project,
self.test_snap['volume_name'],
self.test_snap['name'])
self.drv.create_volume_from_snapshot(self.test_vol_snap,
self.test_snap)
specs = self.drv._get_voltype_specs(self.test_vol)
specs.update({'custom:cinder_managed': True})
self.drv.zfssa.clone_snapshot.assert_called_once_with(
lcfg.zfssa_pool,
lcfg.zfssa_project,
self.test_snap['volume_name'],
self.test_snap['name'],
lcfg.zfssa_project,
self.test_vol_snap['name'],
specs)
def test_create_larger_volume_from_snapshot(self):
lcfg = self.configuration
self.drv.zfssa.get_lun.return_value = self.test_vol
self.drv.create_snapshot(self.test_snap)
self.drv.zfssa.create_snapshot.assert_called_once_with(
lcfg.zfssa_pool,
lcfg.zfssa_project,
self.test_snap['volume_name'],
self.test_snap['name'])
# use the larger test volume
self.drv.create_volume_from_snapshot(self.test_vol2,
self.test_snap)
specs = self.drv._get_voltype_specs(self.test_vol)
specs.update({'custom:cinder_managed': True})
self.drv.zfssa.clone_snapshot.assert_called_once_with(
lcfg.zfssa_pool,
lcfg.zfssa_project,
self.test_snap['volume_name'],
self.test_snap['name'],
lcfg.zfssa_project,
self.test_vol2['name'],
specs)
@mock.patch.object(iscsi.ZFSSAISCSIDriver, '_get_provider_info')
def test_volume_attach_detach(self, _get_provider_info):
lcfg = self.configuration
test_target_iqn = 'iqn.1986-03.com.sun:02:00000-aaaa-bbbb-cccc-ddddd'
self.drv._get_provider_info.return_value = {
'provider_location': '%s %s' % (lcfg.zfssa_target_portal,
test_target_iqn)
}
def side_effect_get_initiator_initiatorgroup(arg):
return [{
'iqn.1-0.org.deb:01:d7': 'test-init-grp1',
'iqn.1-0.org.deb:01:d9': 'test-init-grp2',
}[arg]]
self.drv.zfssa.get_initiator_initiatorgroup.side_effect = (
side_effect_get_initiator_initiatorgroup)
initiator = 'iqn.1-0.org.deb:01:d7'
initiator_group = 'test-init-grp1'
lu_number = '246'
self.drv.zfssa.get_lun.side_effect = iter([
{'initiatorgroup': [], 'number': []},
{'initiatorgroup': [initiator_group], 'number': [lu_number]},
{'initiatorgroup': [initiator_group], 'number': [lu_number]},
])
connector = dict(initiator=initiator)
props = self.drv.initialize_connection(self.test_vol, connector)
self.drv._get_provider_info.assert_called_once_with()
self.assertEqual('iscsi', props['driver_volume_type'])
self.assertEqual(self.test_vol['id'], props['data']['volume_id'])
self.assertEqual(lcfg.zfssa_target_portal,
props['data']['target_portal'])
self.assertEqual(test_target_iqn, props['data']['target_iqn'])
self.assertEqual(int(lu_number), props['data']['target_lun'])
self.assertFalse(props['data']['target_discovered'])
self.drv.zfssa.set_lun_initiatorgroup.assert_called_with(
lcfg.zfssa_pool,
lcfg.zfssa_project,
self.test_vol['name'],
[initiator_group])
self.drv.terminate_connection(self.test_vol, connector)
self.drv.zfssa.set_lun_initiatorgroup.assert_called_with(
lcfg.zfssa_pool,
lcfg.zfssa_project,
self.test_vol['name'],
[])
@mock.patch.object(iscsi.ZFSSAISCSIDriver, '_get_provider_info')
def test_volume_attach_detach_live_migration(self, _get_provider_info):
lcfg = self.configuration
test_target_iqn = 'iqn.1986-03.com.sun:02:00000-aaaa-bbbb-cccc-ddddd'
self.drv._get_provider_info.return_value = {
'provider_location': '%s %s' % (lcfg.zfssa_target_portal,
test_target_iqn)
}
def side_effect_get_initiator_initiatorgroup(arg):
return [{
'iqn.1-0.org.deb:01:d7': 'test-init-grp1',
'iqn.1-0.org.deb:01:d9': 'test-init-grp2',
}[arg]]
self.drv.zfssa.get_initiator_initiatorgroup.side_effect = (
side_effect_get_initiator_initiatorgroup)
src_initiator = 'iqn.1-0.org.deb:01:d7'
src_initiator_group = 'test-init-grp1'
src_connector = dict(initiator=src_initiator)
src_lu_number = '123'
dst_initiator = 'iqn.1-0.org.deb:01:d9'
dst_initiator_group = 'test-init-grp2'
dst_connector = dict(initiator=dst_initiator)
dst_lu_number = '456'
# In the beginning, the LUN is already presented to the source
# node. During initialize_connection(), and at the beginning of
# terminate_connection(), it's presented to both nodes.
self.drv.zfssa.get_lun.side_effect = iter([
{'initiatorgroup': [src_initiator_group],
'number': [src_lu_number]},
{'initiatorgroup': [dst_initiator_group, src_initiator_group],
'number': [dst_lu_number, src_lu_number]},
{'initiatorgroup': [dst_initiator_group, src_initiator_group],
'number': [dst_lu_number, src_lu_number]},
])
# Before migration, the volume gets connected to the destination
# node (whilst still connected to the source node), so it should
# be presented to the initiator groups for both
props = self.drv.initialize_connection(self.test_vol, dst_connector)
self.drv.zfssa.set_lun_initiatorgroup.assert_called_with(
lcfg.zfssa_pool,
lcfg.zfssa_project,
self.test_vol['name'],
[src_initiator_group, dst_initiator_group])
# LU number must be an int -
# https://bugs.launchpad.net/cinder/+bug/1538582
# and must be the LU number for the destination node's
# initiatorgroup (where the connection was just initialized)
self.assertEqual(int(dst_lu_number), props['data']['target_lun'])
# After migration, the volume gets detached from the source node
# so it should be present to only the destination node
self.drv.terminate_connection(self.test_vol, src_connector)
self.drv.zfssa.set_lun_initiatorgroup.assert_called_with(
lcfg.zfssa_pool,
lcfg.zfssa_project,
self.test_vol['name'],
[dst_initiator_group])
def test_volume_attach_detach_negative(self):
self.drv.zfssa.get_initiator_initiatorgroup.return_value = []
connector = dict(initiator='iqn.1-0.org.deb:01:d7')
self.assertRaises(exception.VolumeBackendAPIException,
self.drv.initialize_connection,
self.test_vol,
connector)
def test_get_volume_stats(self):
self.drv.zfssa.get_project_stats.return_value = 2 * units.Gi,\
3 * units.Gi
self.drv.zfssa.get_pool_details.return_value = \
{"profile": "mirror:log_stripe"}
lcfg = self.configuration
stats = self.drv.get_volume_stats(refresh=True)
self.drv.zfssa.get_project_stats.assert_called_once_with(
lcfg.zfssa_pool,
lcfg.zfssa_project)
self.drv.zfssa.get_pool_details.assert_called_once_with(
lcfg.zfssa_pool)
self.assertEqual('Oracle', stats['vendor_name'])
self.assertEqual(self.configuration.volume_backend_name,
stats['volume_backend_name'])
self.assertEqual(self.drv.VERSION, stats['driver_version'])
self.assertEqual(self.drv.protocol, stats['storage_protocol'])
self.assertEqual(0, stats['reserved_percentage'])
self.assertFalse(stats['QoS_support'])
self.assertEqual(3, stats['total_capacity_gb'])
self.assertEqual(2, stats['free_capacity_gb'])
self.assertEqual('mirror:log_stripe', stats['zfssa_poolprofile'])
self.assertEqual('8k', stats['zfssa_volblocksize'])
self.assertEqual('false', stats['zfssa_sparse'])
self.assertEqual('off', stats['zfssa_compression'])
self.assertEqual('latency', stats['zfssa_logbias'])
self.drv.zfssa.get_pool_details.return_value = {"profile": "raidz2"}
stats = self.drv.get_volume_stats(refresh=True)
self.assertEqual('raidz2', stats['zfssa_poolprofile'])
def test_extend_volume(self):
lcfg = self.configuration
self.drv.extend_volume(self.test_vol, 3)
self.drv.zfssa.set_lun_props.assert_called_once_with(
lcfg.zfssa_pool,
lcfg.zfssa_project,
self.test_vol['name'],
volsize= 3 * units.Gi)
@mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs')
def test_get_voltype_specs(self, get_volume_type_extra_specs):
volume_type_id = mock.sentinel.volume_type_id
volume = {'volume_type_id': volume_type_id}
get_volume_type_extra_specs.return_value = {
'zfssa:volblocksize': '128k',
'zfssa:compression': 'gzip'
}
ret = self.drv._get_voltype_specs(volume)
self.assertEqual('128k', ret.get('volblocksize'))
self.assertEqual(self.configuration.zfssa_lun_sparse,
ret.get('sparse'))
self.assertEqual('gzip', ret.get('compression'))
self.assertEqual(self.configuration.zfssa_lun_logbias,
ret.get('logbias'))
def fake_safe_get(self, value):
try:
val = getattr(self.configuration, value)
except AttributeError:
val = None
return val
@mock.patch.object(iscsi.ZFSSAISCSIDriver, '_verify_cache_volume')
def test_clone_image_negative(self, _verify_cache_volume):
# Disabling local cache feature:
self.configuration.zfssa_enable_local_cache = False
self.assertEqual((None, False),
self.drv.clone_image(fakecontext, self.test_vol,
img_location,
small_img,
img_service))
self.configuration.zfssa_enable_local_cache = True
# Creating a volume smaller than image:
self.assertEqual((None, False),
self.drv.clone_image(fakecontext, self.test_vol,
img_location,
large_img,
img_service))
# Creating a volume equal as image:
eq_img = large_img.copy()
eq_img['virtual_size'] = self.test_vol['size'] * units.Gi
self.assertEqual((None, False),
self.drv.clone_image(fakecontext, self.test_vol,
img_location,
eq_img,
img_service))
# Exception raised in _verify_cache_image
self.drv._verify_cache_volume.side_effect = (
exception.VolumeBackendAPIException('fakeerror'))
self.assertEqual((None, False),
self.drv.clone_image(fakecontext, self.test_vol,
img_location,
small_img,
img_service))
@mock.patch.object(iscsi.ZFSSAISCSIDriver, '_get_voltype_specs')
@mock.patch.object(iscsi.ZFSSAISCSIDriver, '_verify_cache_volume')
@mock.patch.object(iscsi.ZFSSAISCSIDriver, 'extend_volume')
def test_clone_image(self, _extend_vol, _verify_cache, _get_specs):
lcfg = self.configuration
cache_vol = 'volume-os-cache-vol-%s' % small_img['id']
cache_snap = 'image-%s' % small_img['id']
self.drv._get_voltype_specs.return_value = fakespecs.copy()
self.drv._verify_cache_volume.return_value = cache_vol, cache_snap
model, cloned = self.drv.clone_image(fakecontext, self.test_vol2,
img_location,
small_img,
img_service)
specs = fakespecs
specs.update({'custom:cinder_managed': True})
self.drv._verify_cache_volume.assert_called_once_with(fakecontext,
small_img,
img_service,
fakespecs,
small_img_props)
self.drv.zfssa.clone_snapshot.assert_called_once_with(
lcfg.zfssa_pool,
lcfg.zfssa_cache_project,
cache_vol,
cache_snap,
lcfg.zfssa_project,
self.test_vol2['name'],
specs)
self.drv.extend_volume.assert_called_once_with(self.test_vol2,
self.test_vol2['size'])
@mock.patch.object(iscsi.ZFSSAISCSIDriver, '_create_cache_volume')
def test_verify_cache_vol_no_cache_vol(self, _create_cache_vol):
vol_name = 'os-cache-vol-%s' % small_img['id']
self.drv.zfssa.get_lun.side_effect = exception.VolumeNotFound(
volume_id=vol_name)
self.drv._verify_cache_volume(fakecontext, small_img,
img_service, fakespecs, small_img_props)
self.drv._create_cache_volume.assert_called_once_with(fakecontext,
small_img,
img_service,
fakespecs,
small_img_props)
def test_verify_cache_vol_no_cache_snap(self):
snap_name = 'image-%s' % small_img['id']
self.drv.zfssa.get_lun_snapshot.side_effect = (
exception.SnapshotNotFound(snapshot_id=snap_name))
self.assertRaises(exception.VolumeBackendAPIException,
self.drv._verify_cache_volume,
fakecontext,
small_img,
img_service,
fakespecs,
small_img_props)
def test_verify_cache_vol_stale_vol(self):
self.drv.zfssa.get_lun_snapshot.return_value = {'numclones': 5}
self.assertRaises(exception.VolumeBackendAPIException,
self.drv._verify_cache_volume,
fakecontext,
small_img,
img_service,
fakespecs,
small_img_props)
@mock.patch.object(iscsi.ZFSSAISCSIDriver, '_create_cache_volume')
def test_verify_cache_vol_updated_vol(self, _create_cache_vol):
lcfg = self.configuration
updated_vol = {
'updated_at': date(3000, 12, 12),
'image_id': 'updated_id',
}
cachevol_name = 'os-cache-vol-%s' % small_img['id']
self.drv.zfssa.get_lun.return_value = updated_vol
self.drv.zfssa.get_lun_snapshot.return_value = {'numclones': 0}
self.drv._verify_cache_volume(fakecontext, small_img,
img_service, fakespecs, small_img_props)
self.drv.zfssa.delete_lun.assert_called_once_with(
lcfg.zfssa_pool,
lcfg.zfssa_cache_project,
cachevol_name)
self.drv._create_cache_volume.assert_called_once_with(fakecontext,
small_img,
img_service,
fakespecs,
small_img_props)
@mock.patch.object(driver.BaseVD, 'copy_image_to_volume')
def test_create_cache_volume(self, _copy_image):
lcfg = self.configuration
virtual_size = int(small_img['virtual_size'])
volsize = math.ceil(float(virtual_size) / units.Gi)
lunsize = "%sg" % six.text_type(int(volsize))
volname = 'os-cache-vol-%s' % small_img['id']
snapname = 'image-%s' % small_img['id']
cachevol_props = {
'cache_name': volname,
'snap_name': snapname,
}
cachevol_props.update(small_img_props)
cache_vol = {
'name': volname,
'id': small_img['id'],
'size': volsize,
}
lun_props = {
'custom:image_id': small_img['id'],
'custom:updated_at': (
six.text_type(small_img['updated_at'].isoformat())),
}
lun_props.update(fakespecs)
self.drv._create_cache_volume(fakecontext,
small_img,
img_service,
fakespecs,
cachevol_props)
self.drv.zfssa.create_lun.assert_called_once_with(
lcfg.zfssa_pool,
lcfg.zfssa_cache_project,
cache_vol['name'],
lunsize,
lcfg.zfssa_target_group,
lun_props)
_copy_image.assert_called_once_with(fakecontext,
cache_vol,
img_service,
small_img['id'])
self.drv.zfssa.create_snapshot.assert_called_once_with(
lcfg.zfssa_pool,
lcfg.zfssa_cache_project,
cache_vol['name'],
snapname)
def test_create_cache_vol_negative(self):
lcfg = self.configuration
volname = 'os-cache-vol-%s' % small_img['id']
snapname = 'image-%s' % small_img['id']
cachevol_props = {
'cache_name': volname,
'snap_name': snapname,
}
cachevol_props.update(small_img)
self.drv.zfssa.get_lun.side_effect = exception.VolumeNotFound(
volume_id=volname)
self.assertRaises(exception.VolumeBackendAPIException,
self.drv._create_cache_volume,
fakecontext,
small_img,
img_service,
fakespecs,
cachevol_props)
self.drv.zfssa.delete_lun.assert_called_once_with(
lcfg.zfssa_pool,
lcfg.zfssa_cache_project,
volname)
@mock.patch.object(iscsi.ZFSSAISCSIDriver, '_get_existing_vol')
@mock.patch.object(iscsi.ZFSSAISCSIDriver, '_verify_volume_to_manage')
def test_volume_manage(self, _get_existing_vol, _verify_volume_to_manage):
lcfg = self.configuration
lcfg.zfssa_manage_policy = 'loose'
test_vol = self.test_vol
self.drv._get_existing_vol.return_value = test_vol
self.drv._verify_volume_to_manage.return_value = None
self.drv.zfssa.set_lun_props.return_value = True
self.assertIsNone(self.drv.manage_existing({'name': 'volume-123'},
{'source-name':
'volume-567'}))
self.drv._get_existing_vol.assert_called_once_with({'source-name':
'volume-567'})
self.drv._verify_volume_to_manage.assert_called_once_with(test_vol)
self.drv.zfssa.set_lun_props.assert_called_once_with(
lcfg.zfssa_pool,
lcfg.zfssa_project,
test_vol['name'],
name='volume-123',
schema={"custom:cinder_managed": True})
# Case when zfssa_manage_policy is 'loose' and 'cinder_managed' is
# set to true.
test_vol.update({'cinder_managed': False})
self.assertIsNone(self.drv.manage_existing({'name': 'volume-123'},
{'source-name':
'volume-567'}))
# Another case is when the zfssa_manage_policy is set to 'strict'
lcfg.zfssa_manage_policy = 'strict'
test_vol.update({'cinder_managed': False})
self.assertIsNone(self.drv.manage_existing({'name': 'volume-123'},
{'source-name':
'volume-567'}))
def test_volume_manage_negative(self):
lcfg = self.configuration
lcfg.zfssa_manage_policy = 'strict'
test_vol = self.test_vol
if 'cinder_managed' in test_vol:
del test_vol['cinder_managed']
self.drv.zfssa.get_lun.return_value = test_vol
self.assertRaises(exception.InvalidInput,
self.drv.manage_existing, {'name': 'cindervol'},
{'source-name': 'volume-567'})
test_vol.update({'cinder_managed': True})
self.drv.zfssa.get_lun.return_value = test_vol
self.assertRaises(exception.ManageExistingAlreadyManaged,
self.drv.manage_existing, {'name': 'cindervol'},
{'source-name': 'volume-567'})
test_vol.update({'cinder_managed': False})
self.drv.zfssa.get_lun.return_value = test_vol
self.assertRaises(exception.ManageExistingInvalidReference,
self.drv.manage_existing, {'name': 'cindervol'},
{'source-id': 'volume-567'})
lcfg.zfssa_manage_policy = 'loose'
self.assertRaises(exception.ManageExistingInvalidReference,
self.drv.manage_existing, {'name': 'cindervol'},
{'source-id': 'volume-567'})
@mock.patch.object(iscsi.ZFSSAISCSIDriver, '_verify_volume_to_manage')
def test_volume_manage_negative_api_exception(self,
_verify_volume_to_manage):
lcfg = self.configuration
lcfg.zfssa_manage_policy = 'loose'
self.drv.zfssa.get_lun.return_value = self.test_vol
self.drv._verify_volume_to_manage.return_value = None
self.drv.zfssa.set_lun_props.side_effect = \
exception.VolumeBackendAPIException(data='fake exception')
self.assertRaises(exception.VolumeBackendAPIException,
self.drv.manage_existing, {'name': 'volume-123'},
{'source-name': 'volume-567'})
def test_volume_unmanage(self):
lcfg = self.configuration
self.drv.zfssa.set_lun_props.return_value = True
self.assertIsNone(self.drv.unmanage({'name': 'volume-123'}))
self.drv.zfssa.set_lun_props.assert_called_once_with(
lcfg.zfssa_pool,
lcfg.zfssa_project,
'volume-123',
name='unmanaged-volume-123',
schema={"custom:cinder_managed": False})
def test_volume_unmanage_negative(self):
self.drv.zfssa.set_lun_props.side_effect = \
exception.VolumeBackendAPIException(data='fake exception')
self.assertRaises(exception.VolumeBackendAPIException,
self.drv.unmanage, {'name': 'volume-123'})
@mock.patch.object(iscsi.ZFSSAISCSIDriver, '_get_existing_vol')
def test_manage_existing_get_size(self, _get_existing_vol):
test_vol = self.test_vol
test_vol['size'] = 3 * units.Gi
self.drv._get_existing_vol.return_value = test_vol
self.assertEqual(3, self.drv.manage_existing_get_size(
{'name': 'volume-123'},
{'source-name': 'volume-567'}))
@mock.patch.object(iscsi.ZFSSAISCSIDriver, '_get_existing_vol')
def test_manage_existing_get_size_negative(self, _get_existing_vol):
self.drv._get_existing_vol.side_effect = \
exception.VolumeNotFound(volume_id='123')
self.assertRaises(exception.VolumeNotFound,
self.drv.manage_existing_get_size,
{'name': 'volume-123'},
{'source-name': 'volume-567'})
class TestZFSSANFSDriver(test.TestCase):
test_vol = {
'name': 'test-vol',
'id': '1',
'size': 3,
'provider_location':
'fakelocation',
}
test_snap = {
'name': 'cindersnap',
'volume_name': test_vol['name'],
'volume_size': test_vol['size']
}
test_vol_snap = {
'name': 'cindersnapvol',
'size': test_vol['size']
}
def __init__(self, method):
super(TestZFSSANFSDriver, self).__init__(method)
@mock.patch.object(zfssanfs, 'factory_zfssa')
def setUp(self, _factory_zfssa):
super(TestZFSSANFSDriver, self).setUp()
self._create_fake_config()
_factory_zfssa.return_value = mock.MagicMock(spec=rest.ZFSSANfsApi)
self.drv = zfssanfs.ZFSSANFSDriver(configuration=self.configuration)
self.drv._execute = fake_utils.fake_execute
self.drv.do_setup({})
self.drv.mount_path = 'fake_mount_path'
self.context = context.get_admin_context()
def _create_fake_config(self):
self.configuration = mock.Mock(spec=conf.Configuration)
self.configuration.reserved_percentage = 0
self.configuration.max_over_subscription_ratio = 20.0
self.configuration.san_ip = '1.1.1.1'
self.configuration.san_login = 'user'
self.configuration.san_password = 'passwd'
self.configuration.zfssa_data_ip = '2.2.2.2'
self.configuration.zfssa_https_port = '443'
self.configuration.zfssa_nfs_pool = 'pool'
self.configuration.zfssa_nfs_project = 'nfs_project'
self.configuration.zfssa_nfs_share = 'nfs_share'
self.configuration.zfssa_nfs_share_logbias = nfs_logbias
self.configuration.zfssa_nfs_share_compression = nfs_compression
self.configuration.zfssa_nfs_mount_options = ''
self.configuration.zfssa_rest_timeout = '30'
self.configuration.zfssa_enable_local_cache = True
self.configuration.zfssa_cache_directory = zfssa_cache_dir
self.configuration.nfs_sparsed_volumes = 'true'
self.configuration.nfs_mount_point_base = '$state_path/mnt'
self.configuration.nfs_mount_options = None
self.configuration.zfssa_manage_policy = 'strict'
def test_setup_nfs_client(self):
mock_execute = self.mock_object(self.drv, '_execute',
side_effect= OSError(errno.ENOENT,
'No such file or '
'directory.'))
self.assertRaises(exception.NfsException, self.drv.do_setup,
self.context)
mock_execute.assert_has_calls(
[mock.call('mount.nfs',
check_exit_code=False,
run_as_root=True),
mock.call('/usr/sbin/mount',
check_exit_code=False,
run_as_root=True)])
def test_migrate_volume(self):
self.drv.zfssa.get_asn.return_value = (
'9a2b5a0f-e3af-6d14-9578-8825f229dc89')
volume = self.test_vol
volume.update({'host': 'fake_host',
'status': 'available',
'name': 'vol-1',
'source_volid': self.test_vol['id']})
loc_info = '9a2b5a0f-e3af-6d14-9578-8825f229dc89:nfs_share'
host = {'host': 'stack@zfssa_nfs#fake_zfssa',
'capabilities': {'vendor_name': 'Oracle',
'storage_protocol': 'nfs',
'location_info': loc_info}}
ctxt = context.get_admin_context()
# Test Normal case
result = self.drv.migrate_volume(ctxt, volume, host)
self.assertEqual((True, None), result)
# Test when volume status is not available
volume['status'] = 'in-use'
result = self.drv.migrate_volume(ctxt, volume, host)
self.assertEqual((False, None), result)
volume['status'] = 'available'
# Test when Vendor is not Oracle
host['capabilities']['vendor_name'] = 'elcarO'
result = self.drv.migrate_volume(ctxt, volume, host)
self.assertEqual((False, None), result)
host['capabilities']['vendor_name'] = 'Oracle'
# Test when storage protocol is not iSCSI
host['capabilities']['storage_protocol'] = 'not_nfs'
result = self.drv.migrate_volume(ctxt, volume, host)
self.assertEqual((False, None), result)
host['capabilities']['storage_protocol'] = 'nfs'
# Test for exceptions
host['capabilities']['location_info'] = ''
result = self.drv.migrate_volume(ctxt, volume, host)
self.assertEqual((False, None), result)
host['capabilities']['location_info'] = loc_info
# Test case when source and target asn dont match
invalid_loc_info = (
'fake_asn*https://2.2.2.2:/shares/export/nfs_share*nfs_share')
host['capabilities']['location_info'] = invalid_loc_info
result = self.drv.migrate_volume(ctxt, volume, host)
self.assertEqual((False, None), result)
# Test case when source and target shares names are different
invalid_loc_info = (
'9a2b5a0f-e3af-6d14-9578-8825f229dc89*' +
'https://tgt:/shares/export/nfs_share*nfs_share_1')
host['capabilities']['location_info'] = invalid_loc_info
result = self.drv.migrate_volume(ctxt, volume, host)
self.assertEqual((False, None), result)
def test_create_delete_snapshot(self):
lcfg = self.configuration
self.drv.create_snapshot(self.test_snap)
self.drv.zfssa.create_snapshot.assert_called_once_with(
lcfg.zfssa_nfs_pool,
lcfg.zfssa_nfs_project,
lcfg.zfssa_nfs_share,
mock.ANY)
self.drv.zfssa.create_snapshot_of_volume_file.assert_called_once_with(
src_file=mock.ANY,
dst_file=self.test_snap['name'])
self.drv.delete_snapshot(self.test_snap)
self.drv.zfssa.delete_snapshot_of_volume_file.assert_called_with(
src_file=self.test_snap['name'])
def test_create_volume_from_snapshot(self):
self.drv.create_snapshot(self.test_snap)
with mock.patch.object(self.drv, '_ensure_shares_mounted'):
self.drv.create_volume_from_snapshot(self.test_vol_snap,
self.test_snap,
method='COPY')
self.drv.zfssa.create_volume_from_snapshot_file.\
assert_called_once_with(src_file=self.test_snap['name'],
dst_file=self.test_vol_snap['name'],
method='COPY')
def test_get_volume_stats(self):
lcfg = self.configuration
self.drv._mounted_shares = ['nfs_share']
with mock.patch.object(self.drv, '_ensure_shares_mounted'):
with mock.patch.object(self.drv, '_get_share_capacity_info') as \
mock_get_share_capacity_info:
mock_get_share_capacity_info.return_value = (1073741824,
9663676416)
self.drv.zfssa.get_pool_details.return_value = \
{"profile": "mirror:log_stripe"}
self.drv.zfssa.get_share.return_value = {"compression": "lzjb",
"encryption": "off",
"logbias": "latency"}
stats = self.drv.get_volume_stats(refresh=True)
self.drv.zfssa.get_pool_details.assert_called_once_with(
lcfg.zfssa_nfs_pool)
self.drv.zfssa.get_share.assert_called_with(
lcfg.zfssa_nfs_pool, lcfg.zfssa_nfs_project,
lcfg.zfssa_nfs_share)
self.assertEqual(1, stats['free_capacity_gb'])
self.assertEqual(10, stats['total_capacity_gb'])
self.assertEqual('mirror:log_stripe',
stats['zfssa_poolprofile'])
self.assertEqual('lzjb', stats['zfssa_compression'])
self.assertEqual('true', stats['zfssa_sparse'])
self.assertEqual('off', stats['zfssa_encryption'])
self.assertEqual('latency', stats['zfssa_logbias'])
self.drv.zfssa.get_pool_details.return_value = \
{"profile": "mirror3"}
stats = self.drv.get_volume_stats(refresh=True)
self.assertEqual('mirror3', stats['zfssa_poolprofile'])
@mock.patch.object(nfsdriver.NfsDriver, 'delete_volume')
@mock.patch.object(zfssanfs.ZFSSANFSDriver, '_check_origin')
def test_delete_volume(self, _check_origin, _delete_vol):
self.drv.zfssa.get_volume.side_effect = self._get_volume_side_effect
test_vol = zfssanfs.Volume()
test_vol._name_id = small_img['id']
test_vol.size = 3
test_vol.provider_location = 'fakelocation'
self.drv.delete_volume(test_vol)
_delete_vol.assert_called_once_with(test_vol)
self.drv._check_origin.assert_called_once_with(img_props_nfs['name'])
def _get_volume_side_effect(self, *args, **kwargs):
lcfg = self.configuration
volname = six.text_type(args[0])
if volname.startswith(lcfg.zfssa_cache_directory):
return {'numclones': 0}
else:
return {'origin': img_props_nfs['name']}
def test_check_origin(self):
self.drv.zfssa.get_volume.side_effect = self._get_volume_side_effect
self.drv._check_origin(img_props_nfs['name'])
self.drv.zfssa.delete_file.assert_called_once_with(
img_props_nfs['name'])
@mock.patch.object(image_utils, 'qemu_img_info')
@mock.patch.object(image_utils.TemporaryImages, 'fetch')
@mock.patch.object(zfssanfs.ZFSSANFSDriver, '_verify_cache_volume')
@mock.patch.object(zfssanfs.ZFSSANFSDriver, 'create_cloned_volume')
def test_clone_image_negative(self, _create_clone, _verify_cache_volume,
_fetch, _info):
_fetch.return_value = mock.MagicMock(spec=utils.get_file_spec())
_info.return_value = ImgInfo(small_img['virtual_size'])
# Disabling local cache feature:
self.configuration.zfssa_enable_local_cache = False
self.assertEqual((None, False),
self.drv.clone_image(fakecontext, self.test_vol,
img_location,
small_img,
img_service))
self.configuration.zfssa_enable_local_cache = True
# Creating a volume smaller than image:
_info.return_value = ImgInfo(large_img['virtual_size'])
self.assertEqual((None, False),
self.drv.clone_image(fakecontext, self.test_vol,
img_location,
large_img,
img_service))
# Exception raised in _verify_cache_image
_info.return_value = ImgInfo(small_img['virtual_size'])
self.drv._verify_cache_volume.side_effect = (
exception.VolumeBackendAPIException('fakeerror'))
self.assertEqual((None, False),
self.drv.clone_image(fakecontext, self.test_vol,
img_location,
small_img,
img_service))
@mock.patch.object(image_utils, 'qemu_img_info')
@mock.patch.object(image_utils.TemporaryImages, 'fetch')
@mock.patch.object(zfssanfs.ZFSSANFSDriver, 'create_cloned_volume')
@mock.patch.object(zfssanfs.ZFSSANFSDriver, '_verify_cache_volume')
@mock.patch.object(zfssanfs.ZFSSANFSDriver, 'extend_volume')
def test_clone_image(self, _extend_vol, _verify_cache, _create_clone,
_fetch, _info):
_fetch.return_value = mock.MagicMock(spec=utils.get_file_spec())
_info.return_value = ImgInfo(small_img['virtual_size'])
self.drv._verify_cache_volume.return_value = \
'volume-' + img_props_nfs['id']
prov_loc = {'provider_location': self.test_vol['provider_location']}
self.drv.create_cloned_volume.return_value = prov_loc
self.assertEqual((prov_loc, True),
self.drv.clone_image(fakecontext, self.test_vol,
img_location,
small_img,
img_service))
img_props = {}
img_props['id'] = img_props_nfs['image_id']
img_props['image_id'] = img_props_nfs['image_id']
img_props['updated_at'] = img_props_nfs['updated_at']
img_props['size'] = img_props_nfs['size']
self.drv._verify_cache_volume.assert_called_once_with(fakecontext,
small_img,
img_service,
img_props)
cache_vol = {
'name': self.drv._verify_cache_volume.return_value,
'size': 3,
'id': small_img['id'],
}
self.drv.create_cloned_volume.assert_called_once_with(self.test_vol,
cache_vol)
@mock.patch.object(zfssanfs.ZFSSANFSDriver, '_create_cache_volume')
def test_verify_cache_vol_no_cache_vol(self, _create_cache_vol):
self.drv.zfssa.get_volume.side_effect = exception.VolumeNotFound(
volume_id=img_props_nfs['name'])
self.drv._verify_cache_volume(fakecontext, small_img,
img_service, img_props_nfs)
self.drv._create_cache_volume.assert_called_once_with(fakecontext,
small_img,
img_service,
img_props_nfs)
def test_verify_cache_vol_stale_vol(self):
self.drv.zfssa.get_volume.return_value = {
'numclones': 5,
'updated_at': small_img['updated_at'].isoformat(),
'image_id': 'wrong_id',
}
self.assertRaises(exception.VolumeBackendAPIException,
self.drv._verify_cache_volume,
fakecontext,
small_img,
img_service,
img_props_nfs)
@mock.patch.object(zfssanfs.ZFSSANFSDriver, '_create_cache_volume')
@mock.patch.object(nfsdriver.NfsDriver, 'delete_volume')
def test_verify_cache_vol_updated_vol(self, _del_vol, _create_cache_vol):
updated_vol = {
'updated_at': date(3000, 12, 12),
'image_id': 'updated_id',
'numclones': 0,
}
self.drv.zfssa.get_volume.return_value = updated_vol
self.drv._verify_cache_volume(fakecontext, small_img,
img_service, img_props_nfs)
self.drv._create_cache_volume.assert_called_once_with(fakecontext,
small_img,
img_service,
img_props_nfs)
@mock.patch.object(remotefs.RemoteFSDriver, 'copy_image_to_volume')
@mock.patch.object(nfsdriver.NfsDriver, 'create_volume')
def test_create_cache_volume(self, _create_vol, _copy_image):
self.drv.zfssa.webdavclient = mock.Mock()
self.drv._create_cache_volume(fakecontext,
small_img,
img_service,
img_props_nfs)
self.assertEqual(1, _create_vol.call_count)
self.assertEqual(1, _copy_image.call_count)
def test_create_cache_vol_negative(self):
self.drv.zfssa.get_lun.side_effect = (
exception.VolumeBackendAPIException)
self.assertRaises(exception.VolumeBackendAPIException,
self.drv._create_cache_volume,
fakecontext,
small_img,
img_service,
img_props_nfs)
self.drv.zfssa.delete_file.assert_called_once_with(
'os-cinder-cache/volume-' + img_props_nfs['id'])
def test_volume_manage(self):
lcfg = self.configuration
lcfg.zfssa_manage_policy = 'loose'
test_vol = self.test_vol
self.drv.zfssa.get_volume.return_value = test_vol
self.drv.zfssa.rename_volume.return_value = None
self.drv.zfssa.set_file_props.return_value = None
self.drv.mount_path = lcfg.zfssa_data_ip + ':' + 'fake_mountpoint'
self.assertEqual({'provider_location': self.drv.mount_path},
self.drv.manage_existing({'name': 'volume-123'},
{'source-name':
'volume-567'}))
self.drv.zfssa.get_volume.assert_called_once_with('volume-567')
self.drv.zfssa.rename_volume.assert_called_once_with('volume-567',
'volume-123')
self.drv.zfssa.set_file_props.assert_called_once_with(
'volume-123', {'cinder_managed': 'True'})
# Test when 'zfssa_manage_policy' is set to 'strict'.
lcfg.zfssa_manage_policy = 'strict'
test_vol.update({'cinder_managed': 'False'})
self.drv.zfssa.get_volume.return_value = test_vol
self.assertEqual({'provider_location': self.drv.mount_path},
self.drv.manage_existing({'name': 'volume-123'},
{'source-name':
'volume-567'}))
def test_volume_manage_negative_no_source_name(self):
self.assertRaises(exception.ManageExistingInvalidReference,
self.drv.manage_existing,
{'name': 'volume-123'},
{'source-id': 'volume-567'})
def test_volume_manage_negative_backend_exception(self):
self.drv.zfssa.get_volume.side_effect = \
exception.VolumeNotFound(volume_id='volume-567')
self.assertRaises(exception.InvalidInput,
self.drv.manage_existing,
{'name': 'volume-123'},
{'source-name': 'volume-567'})
def test_volume_manage_negative_verify_fail(self):
lcfg = self.configuration
lcfg.zfssa_manage_policy = 'strict'
test_vol = self.test_vol
test_vol['cinder_managed'] = ''
self.drv.zfssa.get_volume.return_value = test_vol
self.assertRaises(exception.InvalidInput,
self.drv.manage_existing,
{'name': 'volume-123'},
{'source-name': 'volume-567'})
test_vol.update({'cinder_managed': 'True'})
self.drv.zfssa.get_volume.return_value = test_vol
self.assertRaises(exception.ManageExistingAlreadyManaged,
self.drv.manage_existing,
{'name': 'volume-123'},
{'source-name': 'volume-567'})
@mock.patch.object(zfssanfs.ZFSSANFSDriver, '_verify_volume_to_manage')
def test_volume_manage_negative_rename_fail(self,
_verify_volume_to_manage):
test_vol = self.test_vol
test_vol.update({'cinder_managed': 'False'})
self.drv.zfssa.get_volume.return_value = test_vol
self.drv._verify_volume_to_manage.return_value = None
self.drv.zfssa.rename_volume.side_effect = \
exception.VolumeBackendAPIException(data="fake exception")
self.assertRaises(exception.VolumeBackendAPIException,
self.drv.manage_existing, {'name': 'volume-123'},
{'source-name': 'volume-567'})
@mock.patch.object(zfssanfs.ZFSSANFSDriver, '_verify_volume_to_manage')
def test_volume_manage_negative_set_prop_fail(self,
_verify_volume_to_manage):
test_vol = self.test_vol
test_vol.update({'cinder_managed': 'False'})
self.drv.zfssa.get_volume.return_value = test_vol
self.drv._verify_volume_to_manage.return_value = None
self.drv.zfssa.rename_volume.return_value = None
self.drv.zfssa.set_file_props.side_effect = \
exception.VolumeBackendAPIException(data="fake exception")
self.assertRaises(exception.VolumeBackendAPIException,
self.drv.manage_existing, {'name': 'volume-123'},
{'source-name': 'volume-567'})
def test_volume_unmanage(self):
test_vol = self.test_vol
test_vol.update({'cinder_managed': 'True'})
self.drv.zfssa.rename_volume.return_value = None
self.drv.zfssa.set_file_props.return_value = None
self.assertIsNone(self.drv.unmanage(test_vol))
new_vol_name = 'unmanaged-' + test_vol['name']
self.drv.zfssa.rename_volume.assert_called_once_with(test_vol['name'],
new_vol_name)
self.drv.zfssa.set_file_props.assert_called_once_with(
new_vol_name, {'cinder_managed': 'False'})
def test_volume_unmanage_negative_rename_fail(self):
test_vol = self.test_vol
test_vol.update({'cinder_managed': 'True'})
self.drv.zfssa.rename_volume.side_effect = \
exception.VolumeBackendAPIException(data="fake exception")
self.drv.zfssa.set_file_props.return_value = None
self.assertRaises(exception.VolumeBackendAPIException,
self.drv.unmanage, test_vol)
def test_volume_unmanage_negative_set_prop_fail(self):
test_vol = self.test_vol
test_vol.update({'cinder_managed': 'True'})
self.drv.zfssa.rename_volume.return_value = None
self.drv.zfssa.set_file_props.side_effect = \
exception.VolumeBackendAPIException(data="fake exception")
self.assertRaises(exception.VolumeBackendAPIException,
self.drv.unmanage, test_vol)
@mock.patch.object(zfssanfs.ZFSSANFSDriver, '_get_mount_point_for_share')
def test_manage_existing_get_size(self, _get_mount_point_for_share):
self.drv._get_mount_point_for_share.return_value = \
'/fake/mnt/fake_share/'
self.drv._mounted_shares = []
self.drv._mounted_shares.append('fake_share')
file = mock.Mock(st_size=123 * units.Gi)
with mock.patch('os.path.isfile', return_value=True):
with mock.patch('os.stat', return_value=file):
self.assertEqual(float(file.st_size / units.Gi),
self.drv.manage_existing_get_size(
{'name': 'volume-123'},
{'source-name': 'volume-567'}))
@mock.patch.object(zfssanfs.ZFSSANFSDriver, '_get_mount_point_for_share')
def test_manage_existing_get_size_negative(self,
_get_mount_point_for_share):
self.drv._get_mount_point_for_share.return_value = \
'/fake/mnt/fake_share/'
self.drv._mounted_shares = []
self.drv._mounted_shares.append('fake_share')
with mock.patch('os.path.isfile', return_value=True):
with mock.patch('os.stat', side_effect=OSError):
self.assertRaises(exception.VolumeBackendAPIException,
self.drv.manage_existing_get_size,
{'name': 'volume-123'},
{'source-name': 'volume-567'})
class TestZFSSAApi(test.TestCase):
@mock.patch.object(rest, 'factory_restclient')
def setUp(self, _restclient):
super(TestZFSSAApi, self).setUp()
self.host = 'fakehost'
self.user = 'fakeuser'
self.url = None
self.pool = 'fakepool'
self.project = 'fakeproject'
self.vol = 'fakevol'
self.snap = 'fakesnapshot'
self.clone = 'fakeclone'
self.targetalias = 'fakealias'
_restclient.return_value = mock.MagicMock(spec=client.RestClientURL)
self.zfssa = rest.ZFSSAApi()
self.zfssa.set_host('fakehost')
self.pool_url = '/api/storage/v1/pools/'
def _create_response(self, status, data='data'):
response = FakeResponse(status, data)
return response
def test_create_project(self):
self.zfssa.rclient.get.return_value = self._create_response(
client.Status.OK)
self.zfssa.create_project(self.pool, self.project)
expected_svc = self.pool_url + self.pool + '/projects/' + self.project
self.zfssa.rclient.get.assert_called_with(expected_svc)
def test_create_initiator(self):
self.zfssa.rclient.get.return_value = self._create_response(
client.Status.OK)
initiator = 'iqn.1986-03.com.sun:02:00000-aaaa-bbbb-cccc-ddddd'
alias = 'init-group'
self.zfssa.create_initiator(initiator, alias)
self.zfssa.rclient.get.assert_called_with(
'/api/san/v1/iscsi/initiators/alias=' + alias)
def test_create_target(self):
self.zfssa.rclient.get.return_value = self._create_response(
client.Status.NOT_FOUND)
ret_val = json.dumps(
{'target': {'iqn':
'iqn.1986-03.com.sun:02:00000-aaaa-bbbb-cccc-ddddd'}})
self.zfssa.rclient.post.return_value = self._create_response(
client.Status.CREATED, ret_val)
alias = 'tgt-group'
self.zfssa.create_target(alias)
self.zfssa.rclient.post.assert_called_with('/api/san/v1/iscsi/targets',
{'alias': alias})
def test_get_target(self):
ret_val = json.dumps(
{'target': {'href': 'fake_href',
'alias': 'tgt-group',
'iqn':
'iqn.1986-03.com.sun:02:00000-aaaa-bbbb-cccc-ddddd',
'targetchapuser': '',
'targetchapsecret': '',
'interfaces': ['nge0']}})
self.zfssa.rclient.get.return_value = self._create_response(
client.Status.OK, ret_val)
ret = self.zfssa.get_target('tgt-group')
self.zfssa.rclient.get.assert_called_once_with(
'/api/san/v1/iscsi/targets/alias=tgt-group')
self.assertEqual('iqn.1986-03.com.sun:02:00000-aaaa-bbbb-cccc-ddddd',
ret)
def test_verify_pool(self):
self.zfssa.rclient.get.return_value = self._create_response(
client.Status.OK)
self.zfssa.verify_pool(self.pool)
self.zfssa.rclient.get.assert_called_with(self.pool_url + self.pool)
def test_verify_project(self):
self.zfssa.rclient.get.return_value = self._create_response(
client.Status.NOT_FOUND)
self.assertRaises(exception.VolumeBackendAPIException,
self.zfssa.verify_project,
self.pool,
self.project)
def test_verify_initiator(self):
self.zfssa.rclient.get.return_value = self._create_response(
client.Status.OK)
self.zfssa.verify_initiator('iqn.1-0.org.deb:01:d7')
self.zfssa.rclient.get.assert_called_with(
'/api/san/v1/iscsi/initiators/iqn.1-0.org.deb:01:d7')
def test_verify_target(self):
self.zfssa.rclient.get.return_value = self._create_response(
client.Status.BAD_REQUEST)
self.assertRaises(exception.VolumeBackendAPIException,
self.zfssa.verify_target,
self.targetalias)
def test_create_delete_lun(self):
arg = json.dumps({'name': self.vol,
'initiatorgroup': 'com.sun.ms.vss.hg.maskAll'})
self.zfssa.rclient.post.return_value = self._create_response(
client.Status.CREATED, data=arg)
self.zfssa.create_lun(self.pool, self.project, self.vol, 1, 'tgt-grp',
None)
expected_arg = {'name': self.vol,
'volsize': 1,
'targetgroup': 'tgt-grp',
'initiatorgroup': 'com.sun.ms.vss.hg.maskAll'}
self.zfssa.rclient.post.assert_called_with(
self.pool_url + self.pool + '/projects/' + self.project + '/luns',
expected_arg)
self.zfssa.rclient.delete.return_value = self._create_response(
client.Status.NO_CONTENT)
self.zfssa.delete_lun(self.pool, self.project, self.vol)
self.zfssa.rclient.delete.assert_called_with(
self.pool_url + self.pool + '/projects/' + self.project +
'/luns/' + self.vol)
def test_create_delete_snapshot(self):
self.zfssa.rclient.post.return_value = self._create_response(
client.Status.CREATED)
self.zfssa.create_snapshot(self.pool,
self.project,
self.vol,
self.snap)
expected_arg = {'name': self.snap}
self.zfssa.rclient.post.assert_called_with(
self.pool_url + self.pool + '/projects/' + self.project +
'/luns/' + self.vol + '/snapshots', expected_arg)
self.zfssa.rclient.delete.return_value = self._create_response(
client.Status.NO_CONTENT)
self.zfssa.delete_snapshot(self.pool,
self.project,
self.vol,
self.snap)
self.zfssa.rclient.delete.assert_called_with(
self.pool_url + self.pool + '/projects/' + self.project +
'/luns/' + self.vol + '/snapshots/' + self.snap)
def test_clone_snapshot(self):
self.zfssa.rclient.put.return_value = self._create_response(
client.Status.CREATED)
self.zfssa.clone_snapshot(self.pool,
self.project,
self.vol,
self.snap,
self.project,
self.clone,
None)
expected_svc = '/api/storage/v1/pools/' + self.pool + '/projects/' + \
self.project + '/luns/' + self.vol + '/snapshots/' + self.snap + \
'/clone'
expected_arg = {'project': self.project,
'share': self.clone,
'nodestroy': True}
self.zfssa.rclient.put.assert_called_with(expected_svc, expected_arg)
def test_get_project_stats(self):
ret_val = json.dumps({"project": {"name": self.project,
"space_available": 15754895360,
"space_total": 25754895360,
"dedup": False,
"logbias": "latency",
"encryption": "off"}})
self.zfssa.rclient.get.return_value = self._create_response(
client.Status.OK, ret_val)
self.zfssa.get_project_stats(self.pool, self.project)
expected_svc = '/api/storage/v1/pools/' + self.pool + '/projects/' + \
self.project
self.zfssa.rclient.get.assert_called_with(expected_svc)
self.zfssa.rclient.get.return_value = self._create_response(
client.Status.NOT_FOUND)
self.assertRaises(exception.VolumeBackendAPIException,
self.zfssa.get_project_stats,
self.pool,
self.project)
class TestZFSSANfsApi(test.TestCase):
@mock.patch.object(rest, 'factory_restclient')
def setUp(self, _restclient):
super(TestZFSSANfsApi, self).setUp()
self.host = 'fakehost'
self.user = 'fakeuser'
self.url = None
self.pool = 'fakepool'
self.project = 'fakeproject'
self.share = 'fakeshare'
self.snap = 'fakesnapshot'
self.targetalias = 'fakealias'
_restclient.return_value = mock.MagicMock(spec=client.RestClientURL)
self.webdavclient = mock.MagicMock(spec=webdavclient.ZFSSAWebDAVClient)
self.zfssa = rest.ZFSSANfsApi()
self.zfssa.set_host('fakehost')
self.pool_url = '/api/storage/v1/pools/'
def _create_response(self, status, data='data'):
response = FakeResponse(status, data)
return response
def test_verify_share(self):
self.zfssa.rclient.get.return_value = self._create_response(
client.Status.OK)
self.zfssa.verify_share(self.pool, self.project, self.share)
self.zfssa.rclient.get.assert_called_with(self.pool_url + self.pool +
'/projects/' + self.project +
'/filesystems/' + self.share)
def test_create_delete_snapshot(self):
self.zfssa.rclient.post.return_value = self._create_response(
client.Status.CREATED)
self.zfssa.create_snapshot(self.pool,
self.project,
self.share,
self.snap)
expected_arg = {'name': self.snap}
self.zfssa.rclient.post.assert_called_with(
self.pool_url + self.pool + '/projects/' + self.project +
'/filesystems/' + self.share + '/snapshots', expected_arg)
self.zfssa.rclient.delete.return_value = self._create_response(
client.Status.NO_CONTENT)
self.zfssa.delete_snapshot(self.pool,
self.project,
self.share,
self.snap)
self.zfssa.rclient.delete.assert_called_with(
self.pool_url + self.pool + '/projects/' + self.project +
'/filesystems/' + self.share + '/snapshots/' + self.snap)
def create_delete_snapshot_of_volume_file(self):
src_file = "fake_src_file"
dst_file = "fake_dst_file"
self.zfssa.create_snapshot_of_volume_file(src_file=src_file,
dst_file=dst_file)
self.zfssa.webdavclient.request.assert_called_once_with(
src_file=src_file,
dst_file=dst_file,
method='COPY')
self.zfssa.delete_snapshot_of_volume_file(src_file=src_file)
self.zfssa.webdavclient.request.assert_called_once_with(
src_file=src_file, method='DELETE')
def test_get_share(self):
ret_val = json.dumps({'filesystem': 'test_fs'})
self.zfssa.rclient.get.return_value = self._create_response(
client.Status.OK, ret_val)
ret = self.zfssa.get_share(self.pool, self.project, self.share)
self.zfssa.rclient.get.assert_called_with(self.pool_url + self.pool +
'/projects/' + self.project +
'/filesystems/' + self.share)
self.assertEqual('test_fs', ret)
def test_create_share(self):
self.zfssa.rclient.get.return_value = self._create_response(
client.Status.NOT_FOUND)
self.zfssa.rclient.post.return_value = self._create_response(
client.Status.BAD_REQUEST)
self.assertRaises(exception.VolumeBackendAPIException,
self.zfssa.create_share,
self.pool,
self.project,
self.share,
{})
@mock.patch.object(rest.ZFSSANfsApi, '_change_service_state')
@mock.patch.object(rest.ZFSSANfsApi, 'verify_service')
def test_enable_disable_modify_service(self,
verify_service,
_change_service_state):
self.zfssa.enable_service('http')
self.zfssa._change_service_state.assert_called_with(
'http', state='enable')
self.zfssa.verify_service.assert_called_with('http')
self.zfssa.disable_service('http')
self.zfssa._change_service_state.assert_called_with(
'http', state='disable')
self.zfssa.verify_service.assert_called_with('http', status='offline')
ret_val = json.dumps({'service': {
"href": "/api/service/v1/services/http",
"<status>": "online",
"require_login": False,
"protocols": "http/https",
"listen_port": 81,
"https_port": 443}})
self.zfssa.rclient.put.return_value = self._create_response(
client.Status.ACCEPTED, ret_val)
args = {'listen_port': 81}
self.zfssa.modify_service('http', args)
self.zfssa.rclient.put.called_with('/api/service/v1/services/http',
args)
class TestRestClientURL(test.TestCase):
def setUp(self):
super(TestRestClientURL, self).setUp()
self.timeout = 60
self.url = '1.1.1.1'
self.client = client.RestClientURL(self.url, timeout=self.timeout)
@mock.patch.object(client.RestClientURL, 'request')
def test_post(self, _request):
path = '/api/storage/v1/pools'
body = {'name': 'fakepool'}
self.client.post(path, body=body)
self.client.request.assert_called_with(path, 'POST', body)
@mock.patch.object(client.RestClientURL, 'request')
def test_get(self, _request):
path = '/api/storage/v1/pools'
self.client.get(path)
self.client.request.assert_called_with(path, 'GET')
@mock.patch.object(client.RestClientURL, 'request')
def test_put(self, _request):
path = '/api/storage/v1/pools'
body = {'name': 'fakepool'}
self.client.put(path, body=body)
self.client.request.assert_called_with(path, 'PUT', body)
@mock.patch.object(client.RestClientURL, 'request')
def test_delete(self, _request):
path = '/api/storage/v1/pools'
self.client.delete(path)
self.client.request.assert_called_with(path, 'DELETE')
@mock.patch.object(client.RestClientURL, 'request')
def test_head(self, _request):
path = '/api/storage/v1/pools'
self.client.head(path)
self.client.request.assert_called_with(path, 'HEAD')
@mock.patch.object(client, 'RestResult')
@mock.patch.object(client.urllib.request, 'Request')
@mock.patch.object(client.urllib.request, 'urlopen')
def test_request(self, _urlopen, _Request, _RestResult):
path = '/api/storage/v1/pools'
_urlopen.return_value = mock.Mock()
self.client.request(path, mock.ANY)
_Request.assert_called_with(self.url + path, None, self.client.headers)
self.assertEqual(1, _urlopen.call_count)
_RestResult.assert_called_with(response=mock.ANY)
@mock.patch.object(client, 'RestResult')
@mock.patch.object(client.urllib.request, 'Request')
@mock.patch.object(client.urllib.request, 'urlopen')
@mock.patch.object(client, 'ssl', new_callable=FakeSSL)
def test_ssl_with_context(self, _ssl, _urlopen, _Request, _RestResult):
"""Test PEP476 certificate opt_out fix. """
path = '/api/storage/v1/pools'
_urlopen.return_value = mock.Mock()
self.client.request(path, mock.ANY)
_urlopen.assert_called_once_with(mock.ANY,
timeout=self.timeout,
context='fakecontext')
@mock.patch.object(client, 'RestResult')
@mock.patch.object(client.urllib.request, 'Request')
@mock.patch.object(client.urllib.request, 'urlopen')
@mock.patch.object(client, 'ssl', new_callable=object)
def test_ssl_no_context(self, _ssl, _urlopen, _Request, _RestResult):
"""Verify the PEP476 fix backward compatibility. """
path = '/api/storage/v1/pools'
_urlopen.return_value = mock.Mock()
self.client.request(path, mock.ANY)
_urlopen.assert_called_once_with(mock.ANY, timeout=self.timeout)
| |
# =========================================================================
# Copyright 2012-present Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
import json
from qingcloud.iaas.errors import InvalidRouterStatic
class RouterStaticFactory(object):
TYPE_PORT_FORWARDING = 1
TYPE_VPN = 2
TYPE_TUNNEL = 4
TYPE_FILTERING = 5
PPTP_DEFAULT_CONNS = 100
@classmethod
def create(cls, static_type, router_static_id='', **kw):
""" Create router static.
"""
if static_type not in STATIC_MAPPER:
raise InvalidRouterStatic('invalid static type[%s]' % static_type)
clazz = STATIC_MAPPER[static_type]
kw = clazz.extract(kw)
inst = clazz(**kw)
inst.router_static_id = router_static_id
return inst
@classmethod
def create_from_string(cls, string):
""" Create router static from json formatted string.
"""
data = json.loads(string)
if isinstance(data, dict):
return cls.create(**data)
if isinstance(data, list):
return [cls.create(**item) for item in data]
class _RouterStatic(object):
""" _RouterStatic is used to define static rule in router.
"""
router_static_id = None
static_type = None
def __repr__(self):
return '<%s>%s' % (self.__class__.__name__, self.to_json())
@staticmethod
def extract(kw):
raise NotImplementedError
def extra_props(self):
raise NotImplementedError
def to_json(self):
props = {
'router_static_id': self.router_static_id,
'static_type': self.static_type,
}
props.update(self.extra_props())
return props
class _StaticForPortForwarding(_RouterStatic):
static_type = RouterStaticFactory.TYPE_PORT_FORWARDING
def __init__(self, src_port, dst_ip, dst_port, protocol='tcp',
router_static_name='', **kw):
super(_StaticForPortForwarding, self).__init__()
self.router_static_name = router_static_name
self.src_port = src_port
self.dst_ip = dst_ip
self.dst_port = dst_port
self.protocol = protocol
@staticmethod
def extract(kw):
if 'val1' in kw:
kw['src_port'] = kw.pop('val1')
if 'val2' in kw:
kw['dst_ip'] = kw.pop('val2')
if 'val3' in kw:
kw['dst_port'] = kw.pop('val3')
if 'val4' in kw:
kw['protocol'] = kw.pop('val4')
return kw
def extra_props(self):
return {
'router_static_name': self.router_static_name,
'val1': self.src_port,
'val2': self.dst_ip,
'val3': self.dst_port,
'val4': self.protocol,
}
class _StaticForVPN(_RouterStatic):
class OpenVPN(object):
def __init__(self, ip_network, serv_port='1194', serv_protocol='udp',
**kw):
self.serv_port = serv_port
self.serv_protocol = serv_protocol
self.ip_network = ip_network
def extra_props(self):
return {
'val1': 'openvpn',
'val2': self.serv_port,
'val3': self.serv_protocol,
'val4': self.ip_network,
}
class PPTP(object):
def __init__(self, usr, pwd, ip_network,
max_conn_cnt=RouterStaticFactory.PPTP_DEFAULT_CONNS, **kw):
self.usr = usr
self.pwd = pwd
self.max_conn_cnt = max_conn_cnt
self.ip_network = ip_network
def extra_props(self):
return {
'val1': 'pptp',
'val2': '%s:%s' % (self.usr, self.pwd),
'val3': self.max_conn_cnt,
'val4': self.ip_network,
}
static_type = RouterStaticFactory.TYPE_VPN
def __init__(self, vpn_type='', **kw):
super(_StaticForVPN, self).__init__()
vpn_type = vpn_type or kw.get('val1')
if vpn_type == 'openvpn':
self.inst = _StaticForVPN.OpenVPN(**kw)
elif vpn_type == 'pptp':
self.inst = _StaticForVPN.PPTP(**kw)
else:
raise InvalidRouterStatic('unsupported vpn type[%s]' % vpn_type)
@staticmethod
def extract(kw):
vpn_type = kw.get('val1')
if vpn_type == 'openvpn':
if 'val2' in kw:
kw['serv_port'] = kw.pop('val2')
if 'val3' in kw:
kw['serv_protocol'] = kw.pop('val3')
if 'val4' in kw:
kw['ip_network'] = kw.pop('val4')
elif vpn_type == 'pptp':
if 'entry_set' in kw:
entry_set = kw['entry_set']
kw['usr'] = entry_set[0]['val1']
kw['pwd'] = ''
if 'val3' in kw:
kw['max_conn_cnt'] = kw.pop('val3')
if 'val4' in kw:
kw['ip_network'] = kw.pop('val4')
return kw
def extra_props(self):
return self.inst.extra_props()
class _StaticForTunnel(_RouterStatic):
static_type = RouterStaticFactory.TYPE_TUNNEL
def __init__(self, vxnet_id, tunnel_entries, **kw):
"""
@param tunnel_entries: [(tunnel_type, ip_network, key), ...]
"""
super(_StaticForTunnel, self).__init__()
self.vxnet_id = vxnet_id
self.tunnel_entries = tunnel_entries
@staticmethod
def extract(kw):
if 'val1' in kw:
kw['tunnel_entries'] = [tuple(entry.split('|')) for entry in kw.pop('val1').split(';')]
return kw
def extra_props(self):
return {
'vxnet_id': self.vxnet_id,
'val1': ';'.join('%s|%s|%s' % entry for entry in self.tunnel_entries),
}
class _StaticForFiltering(_RouterStatic):
static_type = RouterStaticFactory.TYPE_FILTERING
def __init__(self, router_static_name='', src_ip='', src_port='',
dst_ip='', dst_port='', priority='1', action='', **kw):
super(_StaticForFiltering, self).__init__()
self.router_static_name = router_static_name
self.src_ip = src_ip
self.src_port = src_port
self.dst_ip = dst_ip
self.dst_port = dst_port
self.priority = priority
self.action = action
@staticmethod
def extract(kw):
if 'val1' in kw:
kw['src_ip'] = kw.pop('val1')
if 'val2' in kw:
kw['src_port'] = kw.pop('val2')
if 'val3' in kw:
kw['dst_ip'] = kw.pop('val3')
if 'val4' in kw:
kw['dst_port'] = kw.pop('val4')
if 'val5' in kw:
kw['priority'] = kw.pop('val5')
if 'val6' in kw:
kw['action'] = kw.pop('val6')
return kw
def extra_props(self):
return {
'router_static_name': self.router_static_name,
'val1': self.src_ip,
'val2': self.src_port,
'val3': self.dst_ip,
'val4': self.dst_port,
'val5': self.priority,
'val6': self.action,
}
STATIC_MAPPER = {
RouterStaticFactory.TYPE_PORT_FORWARDING: _StaticForPortForwarding,
RouterStaticFactory.TYPE_VPN: _StaticForVPN,
RouterStaticFactory.TYPE_TUNNEL: _StaticForTunnel,
RouterStaticFactory.TYPE_FILTERING: _StaticForFiltering,
}
| |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'OrganizationMember.counter'
db.delete_column(u'sentry_organizationmember', 'counter')
def backwards(self, orm):
# Adding field 'OrganizationMember.counter'
db.add_column(u'sentry_organizationmember', 'counter',
self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(null=True, blank=True),
keep_default=False)
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.apitoken': {
'Meta': {'object_name': 'ApiToken'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True'}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authenticator': {
'Meta': {'object_name': 'Authenticator', 'db_table': "'auth_authenticator'"},
'config': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2016, 8, 31, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'sentry.broadcastseen': {
'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen'},
'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}),
'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.counter': {
'Meta': {'object_name': 'Counter', 'db_table': "'sentry_projectcounter'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.dsymbundle': {
'Meta': {'object_name': 'DSymBundle'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymObject']"}),
'sdk': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymSDK']"})
},
'sentry.dsymobject': {
'Meta': {'object_name': 'DSymObject'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_path': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'db_index': 'True'}),
'vmaddr': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'vmsize': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'})
},
'sentry.dsymsdk': {
'Meta': {'object_name': 'DSymSDK', 'index_together': "[('version_major', 'version_minor', 'version_patchlevel', 'version_build')]"},
'dsym_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'sdk_name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'version_build': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'version_major': ('django.db.models.fields.IntegerField', [], {}),
'version_minor': ('django.db.models.fields.IntegerField', [], {}),
'version_patchlevel': ('django.db.models.fields.IntegerField', [], {})
},
'sentry.dsymsymbol': {
'Meta': {'unique_together': "[('object', 'address')]", 'object_name': 'DSymSymbol'},
'address': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymObject']"}),
'symbol': ('django.db.models.fields.TextField', [], {})
},
'sentry.environment': {
'Meta': {'unique_together': "(('project_id', 'name'),)", 'object_name': 'Environment'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.event': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventtag': {
'Meta': {'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag', 'index_together': "(('project_id', 'key_id', 'value_id'), ('group_id', 'key_id', 'value_id'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project', 'ident'), ('project', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}),
'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.fileblob': {
'Meta': {'object_name': 'FileBlob'},
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'})
},
'sentry.fileblobindex': {
'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.globaldsymfile': {
'Meta': {'object_name': 'GlobalDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {'object_name': 'GroupRedirect'},
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'unique': 'True'})
},
'sentry.grouprelease': {
'Meta': {'unique_together': "(('group_id', 'release_id', 'environment'),)", 'object_name': 'GroupRelease'},
'environment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.groupresolution': {
'Meta': {'object_name': 'GroupResolution'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.groupsnooze': {
'Meta': {'object_name': 'GroupSnooze'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.groupsubscription': {
'Meta': {'unique_together': "(('group', 'user'),)", 'object_name': 'GroupSubscription'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Project']"}),
'reason': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('group', 'key', 'value'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'", 'index_together': "(('project', 'key', 'value'),)"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationonboardingtask': {
'Meta': {'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.organizationoption': {
'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'forced_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectbookmark': {
'Meta': {'unique_together': "(('project_id', 'user'),)", 'object_name': 'ProjectBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.projectdsymfile': {
'Meta': {'unique_together': "(('project', 'uuid'),)", 'object_name': 'ProjectDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.projectplatform': {
'Meta': {'unique_together': "(('project_id', 'platform'),)", 'object_name': 'ProjectPlatform'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.release': {
'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.releaseenvironment': {
'Meta': {'unique_together': "(('project_id', 'release_id', 'environment_id'),)", 'object_name': 'ReleaseEnvironment', 'db_table': "'sentry_environmentrelease'"},
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_password_expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useravatar': {
'Meta': {'object_name': 'UserAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.User']"})
},
'sentry.useremail': {
'Meta': {'unique_together': "(('user', 'email'),)", 'object_name': 'UserEmail'},
'date_hash_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'emails'", 'to': "orm['sentry.User']"}),
'validation_hash': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
}
}
complete_apps = ['sentry']
| |
# -*- coding: utf8 -*-
"""
.. module:: lesscpy.scripts.compiler
CSS/LESSCSS run script
http://lesscss.org/#docs
Copyright (c)
See LICENSE for details
.. moduleauthor:: Johann T. Mariusson <jtm@robot.is>
"""
from __future__ import print_function
import os
import sys
import glob
import copy
import argparse
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
from lesscpy.lessc import parser
from lesscpy.lessc import lexer
from lesscpy.lessc import formatter
VERSION_STR = 'Lesscpy compiler 0.9h'
def ldirectory(inpath, outpath, args, scope):
"""Compile all *.less files in directory
Args:
inpath (str): Path to compile
outpath (str): Output directory
args (object): Argparse Object
scope (Scope): Scope object or None
"""
yacctab = 'yacctab' if args.debug else None
if not outpath:
sys.exit("Compile directory option needs -o ...")
else:
if not os.path.isdir(outpath):
if args.verbose:
print("Creating '%s'" % outpath, file=sys.stderr)
if not args.dry_run:
os.mkdir(outpath)
less = glob.glob(os.path.join(inpath, '*.less'))
f = formatter.Formatter(args)
for lf in less:
outf = os.path.splitext(os.path.basename(lf))
minx = '.min' if args.min_ending else ''
outf = "%s/%s%s.css" % (outpath, outf[0], minx)
if not args.force and os.path.exists(outf):
recompile = os.path.getmtime(outf) < os.path.getmtime(lf)
else:
recompile = True
if recompile:
print('%s -> %s' % (lf, outf))
p = parser.LessParser(
yacc_debug=(args.debug),
lex_optimize=True,
yacc_optimize=(not args.debug),
scope=scope,
tabfile=yacctab,
verbose=args.verbose)
p.parse(filename=lf, debuglevel=0)
css = f.format(p)
if not args.dry_run:
with open(outf, 'w') as outfile:
outfile.write(css)
elif args.verbose:
print('skipping %s, not modified' % lf, file=sys.stderr)
sys.stdout.flush()
if args.recurse:
[
ldirectory(
os.path.join(inpath, name), os.path.join(outpath, name), args,
scope) for name in os.listdir(inpath)
if os.path.isdir(os.path.join(inpath, name))
and not name.startswith('.') and not name == outpath
]
def run():
"""Run compiler
"""
aparse = argparse.ArgumentParser(
description='LessCss Compiler', epilog='<< jtm@robot.is @_o >>')
aparse.add_argument(
'-v', '--version', action='version', version=VERSION_STR)
aparse.add_argument(
'-I',
'--include',
action="store",
type=str,
help="Included less-files (comma separated)")
aparse.add_argument(
'-V',
'--verbose',
action="store_true",
default=False,
help="Verbose mode")
aparse.add_argument(
'-C',
'--dont_create_dirs',
action="store_true",
default=False,
help="Creates directories when outputing files (lessc non-compatible)")
fgroup = aparse.add_argument_group('Formatting options')
fgroup.add_argument(
'-x',
'--minify',
action="store_true",
default=False,
help="Minify output")
fgroup.add_argument(
'-X',
'--xminify',
action="store_true",
default=False,
help="Minify output, no end of block newlines")
fgroup.add_argument('-t', '--tabs', help="Use tabs", action="store_true")
fgroup.add_argument(
'-s',
'--spaces',
help="Number of startline spaces (default 2)",
default=2)
dgroup = aparse.add_argument_group(
'Directory options', 'Compiles all *.less files in directory that '
'have a newer timestamp than it\'s css file.')
dgroup.add_argument('-o', '--out', action="store", help="Output directory")
dgroup.add_argument(
'-r',
'--recurse',
action="store_true",
help="Recursive into subdirectorys")
dgroup.add_argument(
'-f',
'--force',
action="store_true",
help="Force recompile on all files")
dgroup.add_argument(
'-m',
'--min-ending',
action="store_true",
default=False,
help="Add '.min' into output filename. eg, name.min.css")
dgroup.add_argument(
'-D',
'--dry-run',
action="store_true",
default=False,
help="Dry run, do not write files")
group = aparse.add_argument_group('Debugging')
group.add_argument(
'-g',
'--debug',
action="store_true",
default=False,
help="Debugging information")
group.add_argument(
'-S',
'--scopemap',
action="store_true",
default=False,
help="Scopemap")
group.add_argument(
'-L',
'--lex-only',
action="store_true",
default=False,
help="Run lexer on target")
group.add_argument(
'-N',
'--no-css',
action="store_true",
default=False,
help="No css output")
aparse.add_argument('target', help="less file or directory")
aparse.add_argument('output', nargs='?', help="output file path")
args = aparse.parse_args()
try:
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
if args.lex_only:
lex = lexer.LessLexer()
ll = lex.file(args.target)
while True:
tok = ll.token()
if not tok:
break
if hasattr(tok,
"lexer"): # literals don't have the lexer attribute
print(tok, "State:", tok.lexer.lexstate)
else:
print(tok)
print('EOF')
sys.exit()
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
yacctab = 'yacctab' if args.debug else None
scope = None
if args.include:
for u in args.include.split(','):
if os.path.exists(u):
p = parser.LessParser(
yacc_debug=(args.debug),
lex_optimize=True,
yacc_optimize=(not args.debug),
tabfile=yacctab,
verbose=args.verbose)
p.parse(filename=u, debuglevel=args.debug)
if not scope:
scope = p.scope
else:
scope.update(p.scope)
else:
sys.exit('included file `%s` not found ...' % u)
sys.stdout.flush()
p = None
f = formatter.Formatter(args)
if not os.path.exists(args.target):
sys.exit("Target not found '%s' ..." % args.target)
if os.path.isdir(args.target):
ldirectory(args.target, args.out, args, scope)
if args.dry_run:
print('Dry run, nothing done.', file=sys.stderr)
else:
p = parser.LessParser(
yacc_debug=(args.debug),
lex_optimize=True,
yacc_optimize=(not args.debug),
scope=copy.deepcopy(scope),
verbose=args.verbose)
p.parse(filename=args.target, debuglevel=args.debug)
if args.scopemap:
args.no_css = True
p.scopemap()
if not args.no_css and p:
out = f.format(p)
if args.output:
if not args.dont_create_dirs and not os.path.exists(
os.path.dirname(args.output)):
try:
os.makedirs(os.path.dirname(args.output))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open(args.output, "w") as f:
f.write(out)
else:
print(out)
except (KeyboardInterrupt, SystemExit, IOError):
sys.exit('\nAborting...')
| |
#!/usr/bin/env python
# May 12, 2004 -- make alarms to DN if no EU conversion
#
tool_version = "1.0"
_debug = False
import sys, re, os, os.path
import getopt
from string import *
import time, datetime
apids = (20, 21)
def read_gina_file(filename):
fd = open(filename, "r")
rows = []
myln = ""
for ln in fd:
myln += ln
if find(myln, '|||') != -1:
row = []
row = split(myln, '||')
rows.append(row)
myln = ""
fd.close()
return rows
tlm_name_to_chan = {}
chan_to_tlm_name = {}
chan_to_title = {}
def parse_tlm_update():
fd = open("TLM_UPDATES.csv", "r")
ln = fd.readline() # get rid of title line
for ln in fd:
(name, old_ch, chan, old_title, title, subs, tlm_id) = split(ln, ',')
if _debug: print "TLM Updates: name %s chan %s title %s subs %s" % (name,chan,title,subs)
tlm_name_to_chan[name] = chan
chan_to_tlm_name[chan] = name
chan_to_title[chan] = title
fd.close()
parse_tlm_update()
# names of packets, referenced by apid
packet_names = {};
rows = []
rows = read_gina_file("tlm/OSCPacketNames")
for row in rows:
packet_names[row[2]] = row[1];
if _debug: print "packet_names apid %s name %s" % (row[2], row[1])
class tlm_item:
def __init__(self):
self.tlm_id = ""
self.chid = ""
self.subs = ""
self.name = ""
self.title = ""
self.bitlength = ""
self.data_type = ""
self.eu_units = ""
self.description = ""
self.discussion = ""
self.has_states = False
self.state_dict = {}
self.state_list = []
self.has_alarms = False
self.red_high = ""
self.red_low = ""
self.yellow_high = ""
self.yellow_low = ""
self.has_dn_to_eu = False
self.low_dn = ""
self.high_dn = ""
self.poly = []
self.apid = ""
self.apid_name = ""
self.map_id = ""
self.offset_byte = 0
self.offset_bit = 0
def dump(self):
print "%s id is channel %s subs %s name %s" % (self.tlm_id, self.chid, self.subs, self.name)
print ".... %s bit length, %s data type" % (self.bitlength, self.data_type)
if self.has_states:
for st in self.state_list:
print "......state %s => %s" % (st, self.state_dict[st])
if self.has_alarms:
print "...... red %s %s yellow %s %s" % (self.red_low, self.red_high, self.yellow_low,
self.yellow_high)
if self.has_dn_to_eu:
print "......%s %s %s %s %s %s %s" % (self.poly[0], self.poly[1], self.poly[2],
self.poly[3], self.poly[4], self.poly[5], self.poly[6])
print "....apid %s name %s map_id %s offset %d bit %d" % (self.apid, self.apid_name, self.map_id,
self.offset_byte, self.offset_bit)
tlm_items = {}
subs_list = []
rows = []
rows = read_gina_file("tlm/TelemetryItemDefinition");
for row in rows:
tlm_id = row[0]
tlm = tlm_item()
tlm.tlm_id = tlm_id
tlm.subs = row[1]
if tlm.subs in subs_list:
pass
else:
subs_list.append(tlm.subs)
tlm.name = row[2]
tlm.chid = tlm_name_to_chan[row[2]]
if tlm.chid:
tlm.title = chan_to_title[tlm.chid]
else:
print "Unknown channel id for name %s" % tlm.name;
tlm.bitlength = row[3]
bl = atoi(tlm.bitlength)
if bl > 256:
continue
tlm.osc_data_type = row[4]
dt = row[4]
if dt == 'F':
tlm.data_type = 'FLOAT'
elif dt == 'I':
tlm.data_type = 'SIGNED'
elif dt == 'U':
tlm.data_type = 'UNSIGNED'
elif dt == 'D':
tlm.data_type = 'STATUS'
elif dt == 'C':
tlm.data_type = 'ASCII'
elif dt == 'P' or dt == 'M' or dt == 'X':
tlm.data_type = 'UNSIGNED'
elif dt == 'T':
print "%s tlm_id %s is an invalid floating point" % (tlm.chid, tlm.tlm_id)
else:
print "data type %s %s tlm_id %s is an unknown data type" % (dt, tlm.chid, tlm.tlm_id)
tlm.eu_units = row[6]
tlm.description = row[8]
tlm.discussion = row[9]
if _debug: tlm.dump()
tlm_items[tlm.tlm_id] = tlm
rows = []
rows = read_gina_file("tlm/TelemetryStateConversions");
for row in rows:
tlm_id = row[0]
eval_str = row[1]
state = row[2]
tlm = tlm_items[tlm_id]
tlm.has_states = True
tlm.state_dict[state] = eval_str
tlm.state_list.append(state)
if _debug: tlm.dump()
rows = []
rows = read_gina_file("tlm/TelemetryLimits")
for row in rows:
tlm_id = row[0]
tlm = tlm_items[tlm_id]
tlm.has_alarms = True
tlm.red_high = row[1]
tlm.red_low = row[2]
tlm.yellow_high = row[3];
tlm.yellow_low = row[4];
tlm.delta = row[5];
if _debug: tlm.dump()
rows = []
rows = read_gina_file("tlm/TelemetryAnalogConversions");
for row in rows:
tlm_id = row[0]
tlm = tlm_items[tlm_id]
tlm.has_dn_to_eu = True
tlm.low_dn = row[3]
tlm.high_dn = row[4]
tlm.poly.append(row[5])
tlm.poly.append(row[6])
tlm.poly.append(row[7])
tlm.poly.append(row[8])
tlm.poly.append(row[9])
tlm.poly.append(row[10])
tlm.poly.append(row[11])
if _debug: tlm.dump()
mapid_to_apid = {}
apid_to_mapid = {}
rows = []
rows = read_gina_file("tlm/MapsDefinition")
for row in rows:
mapid = row[0]
apid = row[1]
mapid_to_apid[mapid] = apid
apid_to_mapid[apid] = mapid
class mapid_list:
def __init__(self):
self.tlm_id_list = []
mapid_to_tlmid_list = {}
rows = []
rows = read_gina_file("tlm/TelemetryMaps")
for row in rows:
mapid = row[0]
tlmid = row[1]
if mapid_to_tlmid_list.has_key(mapid):
mapid_to_tlmid_list[mapid].tlm_id_list.append(tlmid)
else:
mapid_to_tlmid_list[mapid] = mapid_list()
mapid_to_tlmid_list[mapid].tlm_id_list.append(tlmid)
for mapid in mapid_to_tlmid_list.keys():
tlmidlist = mapid_to_tlmid_list[mapid]
apid = mapid_to_apid[mapid]
apid_name = packet_names[apid]
off_byte = 0
off_bit = 0
bits = 0
for tlm_id in tlmidlist.tlm_id_list:
if tlm_items.has_key(tlm_id):
tlm = tlm_items[tlm_id]
tlm.apid = apid
tlm.apid_name = apid_name
tlm.map_id = mapid
tlm.offset_byte = off_byte
tlm.offset_bit = off_bit
bits += atoi(tlm.bitlength)
off_byte = bits / 8
off_bit = bits % 8
if _debug: tlm.dump()
def tlmsort(left, right):
if tlm_items[left].chid > tlm_items[right].chid: return 1
if tlm_items[left].chid < tlm_items[right].chid: return -1
return 0
valid_bit_lengths = (8, 16, 32, 64)
def build_java_decom(fd, apid):
fd.write("""
private ArrayList<AbstractChannelValue> extractEhaFrom_%d(PacketMessage pm) {
ArrayList<AbstractChannelValue> chanValList = new ArrayList<AbstractChannelValue>(1024);
int off = 0;
""" % apid)
apidstr = "%d" % apid
mapid = apid_to_mapid[apidstr]
start_bit = 0
tlen = 0
tlm_list = mapid_to_tlmid_list[mapid]
for tlm_id in tlm_list.tlm_id_list:
tlm = tlm_items[tlm_id]
blen = atoi(tlm.bitlength)
if blen in valid_bit_lengths:
fd.write(" chanValList.add(doChan(\"%s\", pkt, off)); off += %d;\n" %
(tlm.chid, blen / 8))
start_bit = 0
tlen = 0
else:
fd.write("chanValList.add(doChanBits(\"%s\", pkt, off, %d, %d));\n" % (tlm.chid, start_bit, blen))
start_bit += blen
tlen += blen
if tlen in valid_bit_lengths:
fd.write(" off += %d\n" % (tlen/8))
start_bit = 0
tlen = 0
fd.write("}\n")
java_header = """
package jpl.gds.dawn.eha.mission;
import java.util.ArrayList;
import jpl.gds.core.eha.channel.dictionary.ChannelDefinition;
import jpl.gds.core.eha.channel.dictionary.ChannelDefinitionTable;
import jpl.gds.core.eha.channel.dictionary.ChannelId;
import jpl.gds.core.eha.channel.value.ASCIIChannelValue;
import jpl.gds.core.eha.channel.value.AbstractChannelValue;
import jpl.gds.core.eha.channel.value.IntegerChannelValue;
import jpl.gds.core.eha.mission.EhaMissionAdapter;
import jpl.gds.core.log.TraceManager;
import jpl.gds.core.packetextract.PacketMessage;
import jpl.gds.core.util.GDR;
/**
*
* DaweEhaAdapter extracts EHA from packets a
an instrument on the Lunar Reconnaissance Orbiter mission. It should not be used with other missions.
*
* @author Jesse Wright
*
*/
public class DlreEhaAdapter extends EhaMissionAdapter
{
private final ChannelDefinitionTable channelDict;
/**
* Creates an instance of GenericEhaAdapter.
*/
public DlreEhaAdapter()
{
super();
this.channelDict = ChannelDefinitionTable.getInstance();
}
private AbstractChannelValue doChan(String chanStr, byte[] pkt,int off)
{
ChannelId id = ChannelId.create(chanStr);
ChannelDefinition chanDef = this.channelDict.getDefinitionFromChannelId(id);
AbstractChannelValue chanVal = createChannelValueFromBytes(chanDef,pkt,off);
return(chanVal);
}
private AbstractChannelValue doChanBits(String chanStr, byte[] pkt,int off,int startBit, int bitLength)
{
ChannelId id = ChannelId.create(chanStr);
ChannelDefinition chanDef = this.channelDict.getDefinitionFromChannelId(id);
int val = GDR.get_u16(pkt, off, startBit, bitLength);
IntegerChannelValue chanVal = new IntegerChannelValue(val);
chanVal.setChanDef(chanDef);
return(chanVal);
}
private AbstractChannelValue doString(String chanStr, String valStr)
{
ChannelId id = ChannelId.create(chanStr);
ChannelDefinition chanDef = this.channelDict.getDefinitionFromChannelId(id);
int i = 0;
byte[] buff = new byte[valStr.length()+1];
for(; i < valStr.length(); i++)
{
buff[i] = (byte)valStr.charAt(i);
}
buff[i] = 0x00;
AbstractChannelValue chanVal = new ASCIIChannelValue(GDR.stringValue(buff,0,buff.length));
chanVal.setChanDef(chanDef);
return(chanVal);
}
"""
if __name__ == "__main__":
fd = open("DawnEhaAdapter.java", "w")
fd.write(java_header)
for apid in apids:
build_java_decom(fd, apid)
# trailer
fd.write("""
@Override
public ArrayList<AbstractChannelValue> extractEha(PacketMessage pm)
{
ArrayList<AbstractChannelValue> chanValList = null;
switch(pm.getPktInfo().apid) {
""")
for apid in apids:
fd.write(" case %d:\n" % apid)
fd.write(" chanValList = extract_%d;\n" % apid)
fd.write(" break;\n")
fd.write("""
default:
return(null);
}
return(chanValList);
}
}
""")
fd.close()
| |
"""Support to send and receive Telegram messages."""
from functools import partial
import importlib
import io
from ipaddress import ip_network
import logging
import requests
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
from telegram import (
Bot,
InlineKeyboardButton,
InlineKeyboardMarkup,
ReplyKeyboardMarkup,
ReplyKeyboardRemove,
)
from telegram.error import TelegramError
from telegram.parsemode import ParseMode
from telegram.utils.request import Request
import voluptuous as vol
from homeassistant.const import (
ATTR_COMMAND,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_API_KEY,
CONF_PLATFORM,
CONF_TIMEOUT,
CONF_URL,
HTTP_DIGEST_AUTHENTICATION,
)
from homeassistant.exceptions import TemplateError
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTR_DATA = "data"
ATTR_MESSAGE = "message"
ATTR_TITLE = "title"
ATTR_ARGS = "args"
ATTR_AUTHENTICATION = "authentication"
ATTR_CALLBACK_QUERY = "callback_query"
ATTR_CALLBACK_QUERY_ID = "callback_query_id"
ATTR_CAPTION = "caption"
ATTR_CHAT_ID = "chat_id"
ATTR_CHAT_INSTANCE = "chat_instance"
ATTR_DISABLE_NOTIF = "disable_notification"
ATTR_DISABLE_WEB_PREV = "disable_web_page_preview"
ATTR_EDITED_MSG = "edited_message"
ATTR_FILE = "file"
ATTR_FROM_FIRST = "from_first"
ATTR_FROM_LAST = "from_last"
ATTR_KEYBOARD = "keyboard"
ATTR_KEYBOARD_INLINE = "inline_keyboard"
ATTR_MESSAGEID = "message_id"
ATTR_MSG = "message"
ATTR_MSGID = "id"
ATTR_PARSER = "parse_mode"
ATTR_PASSWORD = "password"
ATTR_REPLY_TO_MSGID = "reply_to_message_id"
ATTR_REPLYMARKUP = "reply_markup"
ATTR_SHOW_ALERT = "show_alert"
ATTR_TARGET = "target"
ATTR_TEXT = "text"
ATTR_URL = "url"
ATTR_USER_ID = "user_id"
ATTR_USERNAME = "username"
ATTR_VERIFY_SSL = "verify_ssl"
CONF_ALLOWED_CHAT_IDS = "allowed_chat_ids"
CONF_PROXY_URL = "proxy_url"
CONF_PROXY_PARAMS = "proxy_params"
CONF_TRUSTED_NETWORKS = "trusted_networks"
DOMAIN = "telegram_bot"
SERVICE_SEND_MESSAGE = "send_message"
SERVICE_SEND_PHOTO = "send_photo"
SERVICE_SEND_STICKER = "send_sticker"
SERVICE_SEND_VIDEO = "send_video"
SERVICE_SEND_DOCUMENT = "send_document"
SERVICE_SEND_LOCATION = "send_location"
SERVICE_EDIT_MESSAGE = "edit_message"
SERVICE_EDIT_CAPTION = "edit_caption"
SERVICE_EDIT_REPLYMARKUP = "edit_replymarkup"
SERVICE_ANSWER_CALLBACK_QUERY = "answer_callback_query"
SERVICE_DELETE_MESSAGE = "delete_message"
SERVICE_LEAVE_CHAT = "leave_chat"
EVENT_TELEGRAM_CALLBACK = "telegram_callback"
EVENT_TELEGRAM_COMMAND = "telegram_command"
EVENT_TELEGRAM_TEXT = "telegram_text"
PARSER_HTML = "html"
PARSER_MD = "markdown"
DEFAULT_TRUSTED_NETWORKS = [ip_network("149.154.160.0/20"), ip_network("91.108.4.0/22")]
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_PLATFORM): vol.In(
("broadcast", "polling", "webhooks")
),
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_ALLOWED_CHAT_IDS): vol.All(
cv.ensure_list, [vol.Coerce(int)]
),
vol.Optional(ATTR_PARSER, default=PARSER_MD): cv.string,
vol.Optional(CONF_PROXY_URL): cv.string,
vol.Optional(CONF_PROXY_PARAMS): dict,
# webhooks
vol.Optional(CONF_URL): cv.url,
vol.Optional(
CONF_TRUSTED_NETWORKS, default=DEFAULT_TRUSTED_NETWORKS
): vol.All(cv.ensure_list, [ip_network]),
}
)
],
)
},
extra=vol.ALLOW_EXTRA,
)
BASE_SERVICE_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_TARGET): vol.All(cv.ensure_list, [vol.Coerce(int)]),
vol.Optional(ATTR_PARSER): cv.string,
vol.Optional(ATTR_DISABLE_NOTIF): cv.boolean,
vol.Optional(ATTR_DISABLE_WEB_PREV): cv.boolean,
vol.Optional(ATTR_KEYBOARD): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_KEYBOARD_INLINE): cv.ensure_list,
vol.Optional(CONF_TIMEOUT): vol.Coerce(float),
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_SCHEMA_SEND_MESSAGE = BASE_SERVICE_SCHEMA.extend(
{vol.Required(ATTR_MESSAGE): cv.template, vol.Optional(ATTR_TITLE): cv.template}
)
SERVICE_SCHEMA_SEND_FILE = BASE_SERVICE_SCHEMA.extend(
{
vol.Optional(ATTR_URL): cv.template,
vol.Optional(ATTR_FILE): cv.template,
vol.Optional(ATTR_CAPTION): cv.template,
vol.Optional(ATTR_USERNAME): cv.string,
vol.Optional(ATTR_PASSWORD): cv.string,
vol.Optional(ATTR_AUTHENTICATION): cv.string,
vol.Optional(ATTR_VERIFY_SSL): cv.boolean,
}
)
SERVICE_SCHEMA_SEND_LOCATION = BASE_SERVICE_SCHEMA.extend(
{
vol.Required(ATTR_LONGITUDE): cv.template,
vol.Required(ATTR_LATITUDE): cv.template,
}
)
SERVICE_SCHEMA_EDIT_MESSAGE = SERVICE_SCHEMA_SEND_MESSAGE.extend(
{
vol.Required(ATTR_MESSAGEID): vol.Any(
cv.positive_int, vol.All(cv.string, "last")
),
vol.Required(ATTR_CHAT_ID): vol.Coerce(int),
}
)
SERVICE_SCHEMA_EDIT_CAPTION = vol.Schema(
{
vol.Required(ATTR_MESSAGEID): vol.Any(
cv.positive_int, vol.All(cv.string, "last")
),
vol.Required(ATTR_CHAT_ID): vol.Coerce(int),
vol.Required(ATTR_CAPTION): cv.template,
vol.Optional(ATTR_KEYBOARD_INLINE): cv.ensure_list,
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_SCHEMA_EDIT_REPLYMARKUP = vol.Schema(
{
vol.Required(ATTR_MESSAGEID): vol.Any(
cv.positive_int, vol.All(cv.string, "last")
),
vol.Required(ATTR_CHAT_ID): vol.Coerce(int),
vol.Required(ATTR_KEYBOARD_INLINE): cv.ensure_list,
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_SCHEMA_ANSWER_CALLBACK_QUERY = vol.Schema(
{
vol.Required(ATTR_MESSAGE): cv.template,
vol.Required(ATTR_CALLBACK_QUERY_ID): vol.Coerce(int),
vol.Optional(ATTR_SHOW_ALERT): cv.boolean,
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_SCHEMA_DELETE_MESSAGE = vol.Schema(
{
vol.Required(ATTR_CHAT_ID): vol.Coerce(int),
vol.Required(ATTR_MESSAGEID): vol.Any(
cv.positive_int, vol.All(cv.string, "last")
),
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_SCHEMA_LEAVE_CHAT = vol.Schema({vol.Required(ATTR_CHAT_ID): vol.Coerce(int)})
SERVICE_MAP = {
SERVICE_SEND_MESSAGE: SERVICE_SCHEMA_SEND_MESSAGE,
SERVICE_SEND_PHOTO: SERVICE_SCHEMA_SEND_FILE,
SERVICE_SEND_STICKER: SERVICE_SCHEMA_SEND_FILE,
SERVICE_SEND_VIDEO: SERVICE_SCHEMA_SEND_FILE,
SERVICE_SEND_DOCUMENT: SERVICE_SCHEMA_SEND_FILE,
SERVICE_SEND_LOCATION: SERVICE_SCHEMA_SEND_LOCATION,
SERVICE_EDIT_MESSAGE: SERVICE_SCHEMA_EDIT_MESSAGE,
SERVICE_EDIT_CAPTION: SERVICE_SCHEMA_EDIT_CAPTION,
SERVICE_EDIT_REPLYMARKUP: SERVICE_SCHEMA_EDIT_REPLYMARKUP,
SERVICE_ANSWER_CALLBACK_QUERY: SERVICE_SCHEMA_ANSWER_CALLBACK_QUERY,
SERVICE_DELETE_MESSAGE: SERVICE_SCHEMA_DELETE_MESSAGE,
SERVICE_LEAVE_CHAT: SERVICE_SCHEMA_LEAVE_CHAT,
}
def load_data(
hass,
url=None,
filepath=None,
username=None,
password=None,
authentication=None,
num_retries=5,
verify_ssl=None,
):
"""Load data into ByteIO/File container from a source."""
try:
if url is not None:
# Load data from URL
params = {"timeout": 15}
if username is not None and password is not None:
if authentication == HTTP_DIGEST_AUTHENTICATION:
params["auth"] = HTTPDigestAuth(username, password)
else:
params["auth"] = HTTPBasicAuth(username, password)
if verify_ssl is not None:
params["verify"] = verify_ssl
retry_num = 0
while retry_num < num_retries:
req = requests.get(url, **params)
if not req.ok:
_LOGGER.warning(
"Status code %s (retry #%s) loading %s",
req.status_code,
retry_num + 1,
url,
)
else:
data = io.BytesIO(req.content)
if data.read():
data.seek(0)
data.name = url
return data
_LOGGER.warning("Empty data (retry #%s) in %s)", retry_num + 1, url)
retry_num += 1
_LOGGER.warning("Can't load data in %s after %s retries", url, retry_num)
elif filepath is not None:
if hass.config.is_allowed_path(filepath):
return open(filepath, "rb")
_LOGGER.warning("'%s' are not secure to load data from!", filepath)
else:
_LOGGER.warning("Can't load data. No data found in params!")
except (OSError, TypeError) as error:
_LOGGER.error("Can't load data into ByteIO: %s", error)
return None
async def async_setup(hass, config):
"""Set up the Telegram bot component."""
if not config[DOMAIN]:
return False
for p_config in config[DOMAIN]:
p_type = p_config.get(CONF_PLATFORM)
platform = importlib.import_module(
".{}".format(p_config[CONF_PLATFORM]), __name__
)
_LOGGER.info("Setting up %s.%s", DOMAIN, p_type)
try:
receiver_service = await platform.async_setup_platform(hass, p_config)
if receiver_service is False:
_LOGGER.error("Failed to initialize Telegram bot %s", p_type)
return False
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error setting up platform %s", p_type)
return False
bot = initialize_bot(p_config)
notify_service = TelegramNotificationService(
hass, bot, p_config.get(CONF_ALLOWED_CHAT_IDS), p_config.get(ATTR_PARSER)
)
async def async_send_telegram_message(service):
"""Handle sending Telegram Bot message service calls."""
def _render_template_attr(data, attribute):
attribute_templ = data.get(attribute)
if attribute_templ:
if any(
[isinstance(attribute_templ, vtype) for vtype in [float, int, str]]
):
data[attribute] = attribute_templ
else:
attribute_templ.hass = hass
try:
data[attribute] = attribute_templ.async_render()
except TemplateError as exc:
_LOGGER.error(
"TemplateError in %s: %s -> %s",
attribute,
attribute_templ.template,
exc,
)
data[attribute] = attribute_templ.template
msgtype = service.service
kwargs = dict(service.data)
for attribute in [
ATTR_MESSAGE,
ATTR_TITLE,
ATTR_URL,
ATTR_FILE,
ATTR_CAPTION,
ATTR_LONGITUDE,
ATTR_LATITUDE,
]:
_render_template_attr(kwargs, attribute)
_LOGGER.debug("New telegram message %s: %s", msgtype, kwargs)
if msgtype == SERVICE_SEND_MESSAGE:
await hass.async_add_job(partial(notify_service.send_message, **kwargs))
elif msgtype in [
SERVICE_SEND_PHOTO,
SERVICE_SEND_STICKER,
SERVICE_SEND_VIDEO,
SERVICE_SEND_DOCUMENT,
]:
await hass.async_add_job(
partial(notify_service.send_file, msgtype, **kwargs)
)
elif msgtype == SERVICE_SEND_LOCATION:
await hass.async_add_job(partial(notify_service.send_location, **kwargs))
elif msgtype == SERVICE_ANSWER_CALLBACK_QUERY:
await hass.async_add_job(
partial(notify_service.answer_callback_query, **kwargs)
)
elif msgtype == SERVICE_DELETE_MESSAGE:
await hass.async_add_job(partial(notify_service.delete_message, **kwargs))
else:
await hass.async_add_job(
partial(notify_service.edit_message, msgtype, **kwargs)
)
# Register notification services
for service_notif, schema in SERVICE_MAP.items():
hass.services.async_register(
DOMAIN, service_notif, async_send_telegram_message, schema=schema
)
return True
def initialize_bot(p_config):
"""Initialize telegram bot with proxy support."""
api_key = p_config.get(CONF_API_KEY)
proxy_url = p_config.get(CONF_PROXY_URL)
proxy_params = p_config.get(CONF_PROXY_PARAMS)
if proxy_url is not None:
request = Request(
con_pool_size=8, proxy_url=proxy_url, urllib3_proxy_kwargs=proxy_params
)
else:
request = Request(con_pool_size=8)
return Bot(token=api_key, request=request)
class TelegramNotificationService:
"""Implement the notification services for the Telegram Bot domain."""
def __init__(self, hass, bot, allowed_chat_ids, parser):
"""Initialize the service."""
self.allowed_chat_ids = allowed_chat_ids
self._default_user = self.allowed_chat_ids[0]
self._last_message_id = {user: None for user in self.allowed_chat_ids}
self._parsers = {PARSER_HTML: ParseMode.HTML, PARSER_MD: ParseMode.MARKDOWN}
self._parse_mode = self._parsers.get(parser)
self.bot = bot
self.hass = hass
def _get_msg_ids(self, msg_data, chat_id):
"""Get the message id to edit.
This can be one of (message_id, inline_message_id) from a msg dict,
returning a tuple.
**You can use 'last' as message_id** to edit
the message last sent in the chat_id.
"""
message_id = inline_message_id = None
if ATTR_MESSAGEID in msg_data:
message_id = msg_data[ATTR_MESSAGEID]
if (
isinstance(message_id, str)
and (message_id == "last")
and (self._last_message_id[chat_id] is not None)
):
message_id = self._last_message_id[chat_id]
else:
inline_message_id = msg_data["inline_message_id"]
return message_id, inline_message_id
def _get_target_chat_ids(self, target):
"""Validate chat_id targets or return default target (first).
:param target: optional list of integers ([12234, -12345])
:return list of chat_id targets (integers)
"""
if target is not None:
if isinstance(target, int):
target = [target]
chat_ids = [t for t in target if t in self.allowed_chat_ids]
if chat_ids:
return chat_ids
_LOGGER.warning(
"Disallowed targets: %s, using default: %s", target, self._default_user
)
return [self._default_user]
def _get_msg_kwargs(self, data):
"""Get parameters in message data kwargs."""
def _make_row_inline_keyboard(row_keyboard):
"""Make a list of InlineKeyboardButtons.
It can accept:
- a list of tuples like:
`[(text_b1, data_callback_b1),
(text_b2, data_callback_b2), ...]
- a string like: `/cmd1, /cmd2, /cmd3`
- or a string like: `text_b1:/cmd1, text_b2:/cmd2`
"""
buttons = []
if isinstance(row_keyboard, str):
for key in row_keyboard.split(","):
if ":/" in key:
# commands like: 'Label:/cmd' become ('Label', '/cmd')
label = key.split(":/")[0]
command = key[len(label) + 1 :]
buttons.append(
InlineKeyboardButton(label, callback_data=command)
)
else:
# commands like: '/cmd' become ('CMD', '/cmd')
label = key.strip()[1:].upper()
buttons.append(InlineKeyboardButton(label, callback_data=key))
elif isinstance(row_keyboard, list):
for entry in row_keyboard:
text_btn, data_btn = entry
buttons.append(
InlineKeyboardButton(text_btn, callback_data=data_btn)
)
else:
raise ValueError(str(row_keyboard))
return buttons
# Defaults
params = {
ATTR_PARSER: self._parse_mode,
ATTR_DISABLE_NOTIF: False,
ATTR_DISABLE_WEB_PREV: None,
ATTR_REPLY_TO_MSGID: None,
ATTR_REPLYMARKUP: None,
CONF_TIMEOUT: None,
}
if data is not None:
if ATTR_PARSER in data:
params[ATTR_PARSER] = self._parsers.get(
data[ATTR_PARSER], self._parse_mode
)
if CONF_TIMEOUT in data:
params[CONF_TIMEOUT] = data[CONF_TIMEOUT]
if ATTR_DISABLE_NOTIF in data:
params[ATTR_DISABLE_NOTIF] = data[ATTR_DISABLE_NOTIF]
if ATTR_DISABLE_WEB_PREV in data:
params[ATTR_DISABLE_WEB_PREV] = data[ATTR_DISABLE_WEB_PREV]
if ATTR_REPLY_TO_MSGID in data:
params[ATTR_REPLY_TO_MSGID] = data[ATTR_REPLY_TO_MSGID]
# Keyboards:
if ATTR_KEYBOARD in data:
keys = data.get(ATTR_KEYBOARD)
keys = keys if isinstance(keys, list) else [keys]
if keys:
params[ATTR_REPLYMARKUP] = ReplyKeyboardMarkup(
[[key.strip() for key in row.split(",")] for row in keys]
)
else:
params[ATTR_REPLYMARKUP] = ReplyKeyboardRemove(True)
elif ATTR_KEYBOARD_INLINE in data:
keys = data.get(ATTR_KEYBOARD_INLINE)
keys = keys if isinstance(keys, list) else [keys]
params[ATTR_REPLYMARKUP] = InlineKeyboardMarkup(
[_make_row_inline_keyboard(row) for row in keys]
)
return params
def _send_msg(self, func_send, msg_error, *args_msg, **kwargs_msg):
"""Send one message."""
try:
out = func_send(*args_msg, **kwargs_msg)
if not isinstance(out, bool) and hasattr(out, ATTR_MESSAGEID):
chat_id = out.chat_id
self._last_message_id[chat_id] = out[ATTR_MESSAGEID]
_LOGGER.debug(
"Last message ID: %s (from chat_id %s)",
self._last_message_id,
chat_id,
)
elif not isinstance(out, bool):
_LOGGER.warning(
"Update last message: out_type:%s, out=%s", type(out), out
)
return out
except TelegramError as exc:
_LOGGER.error(
"%s: %s. Args: %s, kwargs: %s", msg_error, exc, args_msg, kwargs_msg
)
def send_message(self, message="", target=None, **kwargs):
"""Send a message to one or multiple pre-allowed chat IDs."""
title = kwargs.get(ATTR_TITLE)
text = f"{title}\n{message}" if title else message
params = self._get_msg_kwargs(kwargs)
for chat_id in self._get_target_chat_ids(target):
_LOGGER.debug("Send message in chat ID %s with params: %s", chat_id, params)
self._send_msg(
self.bot.sendMessage, "Error sending message", chat_id, text, **params
)
def delete_message(self, chat_id=None, **kwargs):
"""Delete a previously sent message."""
chat_id = self._get_target_chat_ids(chat_id)[0]
message_id, _ = self._get_msg_ids(kwargs, chat_id)
_LOGGER.debug("Delete message %s in chat ID %s", message_id, chat_id)
deleted = self._send_msg(
self.bot.deleteMessage, "Error deleting message", chat_id, message_id
)
# reduce message_id anyway:
if self._last_message_id[chat_id] is not None:
# change last msg_id for deque(n_msgs)?
self._last_message_id[chat_id] -= 1
return deleted
def edit_message(self, type_edit, chat_id=None, **kwargs):
"""Edit a previously sent message."""
chat_id = self._get_target_chat_ids(chat_id)[0]
message_id, inline_message_id = self._get_msg_ids(kwargs, chat_id)
params = self._get_msg_kwargs(kwargs)
_LOGGER.debug(
"Edit message %s in chat ID %s with params: %s",
message_id or inline_message_id,
chat_id,
params,
)
if type_edit == SERVICE_EDIT_MESSAGE:
message = kwargs.get(ATTR_MESSAGE)
title = kwargs.get(ATTR_TITLE)
text = f"{title}\n{message}" if title else message
_LOGGER.debug(
"Editing message with ID %s.", message_id or inline_message_id
)
return self._send_msg(
self.bot.editMessageText,
"Error editing text message",
text,
chat_id=chat_id,
message_id=message_id,
inline_message_id=inline_message_id,
**params,
)
if type_edit == SERVICE_EDIT_CAPTION:
func_send = self.bot.editMessageCaption
params[ATTR_CAPTION] = kwargs.get(ATTR_CAPTION)
else:
func_send = self.bot.editMessageReplyMarkup
return self._send_msg(
func_send,
"Error editing message attributes",
chat_id=chat_id,
message_id=message_id,
inline_message_id=inline_message_id,
**params,
)
def answer_callback_query(
self, message, callback_query_id, show_alert=False, **kwargs
):
"""Answer a callback originated with a press in an inline keyboard."""
params = self._get_msg_kwargs(kwargs)
_LOGGER.debug(
"Answer callback query with callback ID %s: %s, " "alert: %s.",
callback_query_id,
message,
show_alert,
)
self._send_msg(
self.bot.answerCallbackQuery,
"Error sending answer callback query",
callback_query_id,
text=message,
show_alert=show_alert,
**params,
)
def send_file(self, file_type=SERVICE_SEND_PHOTO, target=None, **kwargs):
"""Send a photo, sticker, video, or document."""
params = self._get_msg_kwargs(kwargs)
caption = kwargs.get(ATTR_CAPTION)
func_send = {
SERVICE_SEND_PHOTO: self.bot.sendPhoto,
SERVICE_SEND_STICKER: self.bot.sendSticker,
SERVICE_SEND_VIDEO: self.bot.sendVideo,
SERVICE_SEND_DOCUMENT: self.bot.sendDocument,
}.get(file_type)
file_content = load_data(
self.hass,
url=kwargs.get(ATTR_URL),
filepath=kwargs.get(ATTR_FILE),
username=kwargs.get(ATTR_USERNAME),
password=kwargs.get(ATTR_PASSWORD),
authentication=kwargs.get(ATTR_AUTHENTICATION),
verify_ssl=kwargs.get(ATTR_VERIFY_SSL),
)
if file_content:
for chat_id in self._get_target_chat_ids(target):
_LOGGER.debug("Send file to chat ID %s. Caption: %s.", chat_id, caption)
self._send_msg(
func_send,
"Error sending file",
chat_id,
file_content,
caption=caption,
**params,
)
file_content.seek(0)
else:
_LOGGER.error("Can't send file with kwargs: %s", kwargs)
def send_location(self, latitude, longitude, target=None, **kwargs):
"""Send a location."""
latitude = float(latitude)
longitude = float(longitude)
params = self._get_msg_kwargs(kwargs)
for chat_id in self._get_target_chat_ids(target):
_LOGGER.debug(
"Send location %s/%s to chat ID %s.", latitude, longitude, chat_id
)
self._send_msg(
self.bot.sendLocation,
"Error sending location",
chat_id=chat_id,
latitude=latitude,
longitude=longitude,
**params,
)
def leave_chat(self, chat_id=None):
"""Remove bot from chat."""
chat_id = self._get_target_chat_ids(chat_id)[0]
_LOGGER.debug("Leave from chat ID %s", chat_id)
leaved = self._send_msg(self.bot.leaveChat, "Error leaving chat", chat_id)
return leaved
class BaseTelegramBotEntity:
"""The base class for the telegram bot."""
def __init__(self, hass, allowed_chat_ids):
"""Initialize the bot base class."""
self.allowed_chat_ids = allowed_chat_ids
self.hass = hass
def _get_message_data(self, msg_data):
"""Return boolean msg_data_is_ok and dict msg_data."""
if not msg_data:
return False, None
bad_fields = (
"text" not in msg_data and "data" not in msg_data and "chat" not in msg_data
)
if bad_fields or "from" not in msg_data:
# Message is not correct.
_LOGGER.error("Incoming message does not have required data (%s)", msg_data)
return False, None
if msg_data["from"].get("id") not in self.allowed_chat_ids or (
"chat" in msg_data
and msg_data["chat"].get("id") not in self.allowed_chat_ids
):
# Origin is not allowed.
_LOGGER.error("Incoming message is not allowed (%s)", msg_data)
return True, None
data = {
ATTR_USER_ID: msg_data["from"]["id"],
ATTR_FROM_FIRST: msg_data["from"]["first_name"],
}
if "message_id" in msg_data:
data[ATTR_MSGID] = msg_data["message_id"]
if "last_name" in msg_data["from"]:
data[ATTR_FROM_LAST] = msg_data["from"]["last_name"]
if "chat" in msg_data:
data[ATTR_CHAT_ID] = msg_data["chat"]["id"]
elif ATTR_MESSAGE in msg_data and "chat" in msg_data[ATTR_MESSAGE]:
data[ATTR_CHAT_ID] = msg_data[ATTR_MESSAGE]["chat"]["id"]
return True, data
def process_message(self, data):
"""Check for basic message rules and fire an event if message is ok."""
if ATTR_MSG in data or ATTR_EDITED_MSG in data:
event = EVENT_TELEGRAM_COMMAND
if ATTR_MSG in data:
data = data.get(ATTR_MSG)
else:
data = data.get(ATTR_EDITED_MSG)
message_ok, event_data = self._get_message_data(data)
if event_data is None:
return message_ok
if ATTR_MSGID in data:
event_data[ATTR_MSGID] = data[ATTR_MSGID]
if "text" in data:
if data["text"][0] == "/":
pieces = data["text"].split(" ")
event_data[ATTR_COMMAND] = pieces[0]
event_data[ATTR_ARGS] = pieces[1:]
else:
event_data[ATTR_TEXT] = data["text"]
event = EVENT_TELEGRAM_TEXT
else:
_LOGGER.warning("Message without text data received: %s", data)
event_data[ATTR_TEXT] = str(data)
event = EVENT_TELEGRAM_TEXT
self.hass.bus.async_fire(event, event_data)
return True
if ATTR_CALLBACK_QUERY in data:
event = EVENT_TELEGRAM_CALLBACK
data = data.get(ATTR_CALLBACK_QUERY)
message_ok, event_data = self._get_message_data(data)
if event_data is None:
return message_ok
event_data[ATTR_DATA] = data[ATTR_DATA]
event_data[ATTR_MSG] = data[ATTR_MSG]
event_data[ATTR_CHAT_INSTANCE] = data[ATTR_CHAT_INSTANCE]
event_data[ATTR_MSGID] = data[ATTR_MSGID]
self.hass.bus.async_fire(event, event_data)
return True
_LOGGER.warning("Message with unknown data received: %s", data)
return True
| |
import cv2
import numpy as np
def gray(img):
"""
Convert `img` to grayscale
"""
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def upscale(img, factor=5):
"""
Upscale `img` by `factor`.
"""
return cv2.resize(img, None, fx=factor, fy=factor,
interpolation=cv2.INTER_CUBIC)
def blur(img, kernel_size=4):
"""
Blur `img` using `kernel_size`
"""
return cv2.blur(img, (kernel_size, kernel_size))
def median_blur(img, kernel_size=1):
"""
Median blur `img` using `kernel_size`
"""
return cv2.medianBlur(img, kernel_size)
def threshold(img, threshold=160):
"""
Perform binary thresholding on `img` with `threshold`
"""
ret, img = cv2.threshold(img, threshold, 255, cv2.THRESH_BINARY)
return img
def adaptive_threshold(img, threshold=200):
"""
Perform adaptive thresholding on `img` with `threshold`
"""
return cv2.adaptiveThreshold(img, threshold,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 11, 2)
def invert(img):
"""
Invert binary image
"""
return cv2.bitwise_not(img)
def mser(img, delta=3,
min_area=500, max_area=35000,
max_variation=0.1, min_diversity=0.5):
"""
Find image segments using MSER (Maximally stable extremal regions)
algorithm.
Returns list of (x, y, w, h) tuples of detected segments.
"""
mser = cv2.MSER(delta, min_area, max_area, max_variation,
min_diversity)
regions = mser.detect(img)
hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]
segs = []
for s in hulls:
x, y, w, h = cv2.boundingRect(s)
segs.append((x, y, w, h))
return segs
def kmeans_quantize(img, clusters=2):
"""
Color quantization into number of clusters
"""
Z = img.reshape((-1, 3))
Z = np.float32(Z)
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_MAX_ITER, 20, 1)
ret, label, center = cv2.kmeans(
Z, clusters, criteria, 1, flags=cv2.KMEANS_RANDOM_CENTERS)
center = np.uint8(center)
res = center[label.flatten()]
return res.reshape((img.shape))
def erode(im, iters=1, k1_size=2, k2_size=2):
"""
Erode image
"""
elem_type = cv2.MORPH_RECT
k = cv2.getStructuringElement(elem_type, (k1_size, k2_size))
return cv2.erode(im, k, iterations=iters)
def dilate(im, iters=5, k1_size=5, k2_size=2):
"""
Dilate image
"""
elem_type = cv2.MORPH_RECT
k = cv2.getStructuringElement(elem_type, (k1_size, k2_size))
return cv2.dilate(im, k, iterations=iters)
def contours(im, bounding_box_x_adjust=4):
"""
Find contours and their bounding boxes
Returns list of found segments
"""
cim = im.copy() # findContours alters src image
contours, hierarchy = cv2.findContours(
cim, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
segs = []
for c in contours:
x, y, w, h = cv2.boundingRect(c)
y -= bounding_box_x_adjust
y = max(1, y)
segs.append((x, y, w, h))
return segs
def same(a, b):
"""
Return True if img `a` is the same as `b`
"""
if a is None or b is None:
return False
if a.shape != b.shape:
return False
return not any(cv2.sumElems(cv2.absdiff(a, b)))
def similar(a, b, max_differ=200):
"""
Return True if img `a` differs from `b`
only in `max_differ` pixels
"""
if a is None or b is None:
return False
if a.shape != b.shape:
return False
cc = cv2.compare(gray(a), gray(b), cv2.CMP_NE)
diff = cv2.countNonZero(cc)
#print("Differs {0}".format(diff))
return diff < max_differ
# COMPOSED
def mser_segments(img, delta=3,
min_area=500, max_area=35000,
max_variation=0.1, min_diversity=0.5):
"""
Find image segments using MSER (Maximally stable extremal regions)
algorithm.
Returns list of (x, y, w, h) tuples of detected segments.
"""
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
mser = cv2.MSER(delta, min_area, max_area, max_variation,
min_diversity)
regions = mser.detect(gray)
hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]
segs = []
for s in hulls:
x, y, w, h = cv2.boundingRect(s)
segs.append((x, y, w, h))
return segs
def template_match(target, template):
"""
Match template against target
"""
h, w, d = template.shape
res = cv2.matchTemplate(target, template, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
return max_val, max_loc[0] + w / 2, max_loc[1] + h / 2
def ocr_optimize(img, upscale=5, threshold=160, blur_kernel_size=4):
"""
Optimize image for further OCR processing
- upscale
- convert to gray
- blur
- threshold
"""
img = cv2.resize(img, None, fx=upscale, fy=upscale,
interpolation=cv2.INTER_CUBIC)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.blur(img, (blur_kernel_size, blur_kernel_size))
ret, img = cv2.threshold(img, threshold, 255, cv2.THRESH_BINARY)
return img
def ocr_optimize_adaptive(img, upscale=5, blur=1):
"""
Optimize image for further OCR processing
- upscale
- convert to gray
- blur
- adaptive threshold
"""
img = cv2.resize(img, None, fx=upscale, fy=upscale,
interpolation=cv2.INTER_CUBIC)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.bitwise_not(img)
img = cv2.medianBlur(img, blur)
img = cv2.adaptiveThreshold(img, 255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 11, 2)
return img
def sobel_optimize(img, upscale=5, median_blur=1, color_offset=200):
"""
Optimize image for OCR processing using Sobel operator
Unused for now
"""
img = cv2.resize(img, None, fx=upscale, fy=upscale,
interpolation=cv2.INTER_CUBIC)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ddepth = cv2.CV_16S
delta = 0
scale = 1
grad_x = cv2.Sobel(gray, ddepth, 1, 0, ksize=3,
scale=scale, delta=delta, borderType=cv2.BORDER_DEFAULT)
grad_y = cv2.Sobel(gray, ddepth, 0, 1, ksize=3,
scale=scale, delta=delta, borderType=cv2.BORDER_DEFAULT)
abs_grad_x = cv2.convertScaleAbs(grad_x)
abs_grad_y = cv2.convertScaleAbs(grad_y)
im = cv2.addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0)
im = cv2.medianBlur(gray, median_blur)
im[im >= color_offset] = 255
im[im < color_offset] = 0 # black
return im
def contour_segments(im, invert=True, threshold=130,
erode_iters=1, erode_k1_size=2, erode_k2_size=2,
dilate_iters=5, dilate_k1_size=5, dilate_k2_size=2,
bounding_box_x_adjust=4):
"""
Pre-process image using erosion/dilation and find
segments resembling text lines using contour search
"""
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
ret, im = cv2.threshold(gray, threshold, 255, cv2.THRESH_BINARY)
if invert:
im = cv2.bitwise_not(im)
im = erode(im, erode_iters, erode_k1_size, erode_k2_size)
im = dilate(im, dilate_iters, dilate_k1_size, dilate_k2_size)
return contours(im)
| |
"""
Classes implementing logic related to the conditional distributions
in the VAE framework
"""
__authors__ = "Vincent Dumoulin"
__copyright__ = "Copyright 2014, Universite de Montreal"
__credits__ = ["Vincent Dumoulin"]
__license__ = "3-clause BSD"
__maintainer__ = "Vincent Dumoulin"
__email__ = "pylearn-dev@googlegroups"
import numpy
import theano
import theano.tensor as T
from theano.compat.python2x import OrderedDict
from pylearn2.models import Model
from pylearn2.models.mlp import Linear, CompositeLayer
from pylearn2.space import VectorSpace, CompositeSpace
from pylearn2.utils.rng import make_theano_rng
from pylearn2.utils import wraps, sharedX
pi = sharedX(numpy.pi)
class Conditional(Model):
"""
Abstract class implementing methods related to a conditional distribution
:math:`f_\\omega(\\mathbf{a} \\mid \\mathbf{b})`. Used in the VAE framework
for the conditional :math:`p_\\theta(\\mathbf{x} \\mid \\mathbf{z})` and
the posterior :math:`q_\\phi(\\mathbf{z} \\mid \\mathbf{x})`.
Parameters
----------
mlp : pylearn2.models.mlp.MLP
An MLP mapping the variable conditioned on (e.g. x for the posterior
distribution or z for the conditional distribution in the VAE
framework) to the distribution parameters. Note that the MLP must be
**nested**, meaning that its input space must not have already been
defined, as `Conditional` will do it automatically.
name : str
A string identifier for this conditional distribution (e.g. "posterior"
or "conditional")
output_layer_required : bool, optional
If `True`, the MLP's output is the last hidden representation from
which parameters of the conditional distribution will be computed, and
`Conditional` will add its own default output layer to the MLP. If
`False`, the MLP's last layer **is** the output layer. Defaults to
`True`.
"""
def __init__(self, mlp, name, output_layer_required=True):
super(Conditional, self).__init__()
if not mlp._nested:
raise ValueError(str(self.__class__) + " expects an MLP whose " +
"input space has not been defined yet. You " +
"should not specify 'nvis' or 'input_space' " +
"when instantiating the MLP.")
self.mlp = mlp
self.name = name
self.output_layer_required = output_layer_required
def get_weights(self):
"""
Returns its MLP's weights
"""
return self.mlp.get_weights()
def get_lr_scalers(self):
"""
Returns the encoding model's learning rate scalers
"""
return self.mlp.get_lr_scalers()
def _get_default_output_layer(self):
"""
Returns a default `Layer` mapping the MLP's last hidden representation
to parameters of the conditional distribution
"""
raise NotImplementedError(str(self.__class__) + " does not implement "
"_get_default_output_layer")
def _get_required_mlp_output_space(self):
"""
Returns the expected output space of the MLP, i.e. a description of how
the parameters output by the MLP should look like.
"""
raise NotImplementedError(str(self.__class__) + " does not implement "
"_get_required_mlp_output_space")
def _validate_mlp(self):
"""
Makes sure the MLP's output layer is compatible with the parameters
expected by the conditional distribution
"""
expected_output_space = self._get_required_mlp_output_space()
mlp_output_space = self.mlp.get_output_space()
if not mlp_output_space == expected_output_space:
raise ValueError("the specified MLP's output space is " +
"incompatible with " + str(self.__class__) + ": "
"expected " + str(expected_output_space) + " but "
"encoding model's output space is " +
str(mlp_output_space))
def monitoring_channels_from_conditional_params(self, conditional_params):
"""
Get monitoring channels from the parameters of the conditional
distribution.
By default, no monitoring channel is computed.
Parameters
----------
conditional_params : tuple of tensor_like
Parameters of the conditional distribution
"""
return OrderedDict()
def _modify_updates(self, updates):
"""
Modifies the parameters before a learning update is applied.
By default, only calls the MLP's `modify_updates` method.
"""
self.mlp.modify_updates(updates)
def get_vae(self):
"""
Returns the VAE that this `Conditional` instance belongs to, or None
if it has not been assigned to a VAE yet.
"""
if hasattr(self, 'vae'):
return self.vae
else:
return None
def set_vae(self, vae):
"""
Assigns this `Conditional` instance to a VAE.
Parameters
----------
vae : pylearn2.models.vae.VAE
VAE to assign to
"""
if self.get_vae() is not None:
raise RuntimeError("this " + str(self.__class__) + " instance " +
"already belongs to another VAE")
self.vae = vae
self.rng = self.vae.rng
self.theano_rng = make_theano_rng(int(self.rng.randint(2 ** 30)),
which_method=["normal", "uniform"])
self.batch_size = vae.batch_size
def initialize_parameters(self, input_space, ndim):
"""
Initialize model parameters.
Parameters
----------
input_space : pylearn2.space.Space
The input space for the MLP
ndim : int
Number of units of a in f(a | b)
"""
self.ndim = ndim
self.input_space = input_space
if self.output_layer_required:
self.mlp.add_layers([self._get_default_output_layer()])
self.mlp.set_mlp(self)
self.mlp.set_input_space(self.input_space)
self._validate_mlp()
self._params = self.mlp.get_params()
for param in self._params:
param.name = self.name + "_" + param.name
def encode_conditional_params(self, X):
"""
Maps input `X` to a tuple of parameters of the conditional distribution
Parameters
----------
X : tensor_like
Input
Returns
-------
conditional_params : tuple of tensor_like
Tuple of parameters for the conditional distribution
"""
conditional_params = self.mlp.fprop(X)
if not type(conditional_params) == tuple:
conditional_params = (conditional_params, )
return conditional_params
def conditional_expectation(self, conditional_params):
"""
Given parameters of the conditional distribution, returns the
expected value of a in p(a | b).
Parameters
----------
conditional_params : tuple of tensor_like
Tuple of parameters for the conditional distribution
"""
raise NotImplementedError(str(self.__class__) + " does not implement "
"conditional_expectation.")
def sample_from_conditional(self, conditional_params, epsilon=None,
num_samples=None):
"""
Given a tuple of conditional parameters and an epsilon noise sample,
generates samples from the conditional distribution.
Parameters
----------
conditional_params : tuple of tensor_like
Tuple of parameters for the conditional distribution
epsilon : tensor_like, optional
Noise sample used to sample with the reparametrization trick. If
`None`, sampling will be done without the reparametrization trick.
Defaults to `None`.
num_samples : int, optional
Number of requested samples, in case the reparametrization trick is
not used
Returns
-------
rval : tensor_like
Samples
"""
raise NotImplementedError(str(self.__class__) + " does not implement "
"sample_from_conditional.")
def sample_from_epsilon(self, shape):
"""
Samples from a canonical noise distribution from which conditional
samples will be drawn using the reparametrization trick.
Parameters
----------
shape : tuple of int
Shape of the requested samples
Returns
-------
epsilon : tensor_like
Noise samples
Notes
-----
If using the reparametrization trick is not possible for this
particular conditional distribution, will raise an exception.
"""
raise NotImplementedError(str(self.__class__) + " does not implement "
"sample_from_epsilon, which probably "
"means it is not able to sample using the "
"reparametrization trick.")
def log_conditional(self, samples, conditional_params):
"""
Given the conditional parameters, computes the log-conditional
probabilities of samples of this distribution.
Parameters
----------
samples : tensor_like
Conditional samples
conditional_params : tuple of tensor_like
Tuple of parameters for the conditional distribution
Returns
-------
log_conditonal : tensor_like
Log-conditional probabilities
"""
raise NotImplementedError(str(self.__class__) + " does not implement "
"log_conditional.")
class BernoulliVector(Conditional):
"""
Implements a vectorial bernoulli conditional distribution, i.e.
.. math::
f_\\omega(\\mathbf{a} \\mid \\mathbf{b})
= \\prod_i \\mu_i(\\mathbf{b})^{a_i}
(1 - \\mu_i(\\mathbf{b}))^{(1 - a_i)}
Parameters
----------
See `Conditional`
"""
@wraps(Conditional._get_default_output_layer)
def _get_default_output_layer(self):
return Linear(dim=self.ndim, layer_name='mu', irange=0.01)
@wraps(Conditional._get_required_mlp_output_space)
def _get_required_mlp_output_space(self):
return VectorSpace(dim=self.ndim)
@wraps(Conditional.sample_from_conditional)
def sample_from_conditional(self, conditional_params, epsilon=None,
num_samples=None):
if epsilon is not None:
raise ValueError(str(self.__class__) + " is not able to sample " +
"using the reparametrization trick.")
if num_samples is None:
raise ValueError("number of requested samples needs to be given.")
# We express mu in terms of the pre-sigmoid activations. See
# `log_conditional` for more details.
conditional_probs = T.nnet.sigmoid(conditional_params[0])
return self.theano_rng.uniform(
size=(num_samples, self.ndim),
dtype=theano.config.floatX
) < conditional_probs
@wraps(Conditional.conditional_expectation)
def conditional_expectation(self, conditional_params):
# `conditional_params` is composed of pre-sigmoid activations; see
# `log_conditional` for more details.
return T.nnet.sigmoid(conditional_params[0])
@wraps(Conditional.log_conditional)
def log_conditional(self, samples, conditional_params):
# We express the probability in terms of the pre-sigmoid activations,
# which lets us apply the log sigmoid(x) -> -softplus(-x)
# optimization manually
(S,) = conditional_params
# If there are multiple samples per data point, make sure mu and
# log_sigma are broadcasted correctly.
if samples.ndim == 3:
if S.ndim == 2:
S = S.dimshuffle('x', 0, 1)
return -(
samples * T.nnet.softplus(-S) + (1 - samples) * T.nnet.softplus(S)
).sum(axis=2)
class DiagonalGaussian(Conditional):
"""
Implements a normal conditional distribution with diagonal covariance
matrix, i.e.
.. math::
f_\\omega(\\mathbf{a} \\mid \\mathbf{b})
= \\prod_i \\exp(-(a_i - \\mu_i(\\mathbf{b}))^2 /
(2\\sigma_i(\\mathbf{b})^2 ) /
(\\sqrt{2 \\pi} \\sigma_i(\\mathbf{b}))
Parameters
----------
See `Conditional`
"""
@wraps(Conditional._get_default_output_layer)
def _get_default_output_layer(self):
return CompositeLayer(
layer_name='conditional',
layers=[Linear(dim=self.ndim, layer_name='mu', irange=0.01),
Linear(dim=self.ndim, layer_name='log_sigma', irange=0.01)]
)
@wraps(Conditional._get_required_mlp_output_space)
def _get_required_mlp_output_space(self):
return CompositeSpace([VectorSpace(dim=self.ndim),
VectorSpace(dim=self.ndim)])
@wraps(Conditional.monitoring_channels_from_conditional_params)
def monitoring_channels_from_conditional_params(self, conditional_params):
rval = OrderedDict()
mu, log_sigma = conditional_params
rval[self.name + '_sigma_min'] = T.exp(log_sigma).min()
rval[self.name + '_sigma_max'] = T.exp(log_sigma).max()
rval[self.name + '_sigma_mean'] = T.exp(log_sigma).mean()
rval[self.name + '_sigma_std'] = T.exp(log_sigma).std()
return rval
@wraps(Conditional.sample_from_conditional)
def sample_from_conditional(self, conditional_params, epsilon=None,
num_samples=None):
(mu, log_sigma) = conditional_params
if epsilon is None:
if num_samples is None:
raise ValueError("number of requested samples needs to be "
"given.")
return self.theano_rng.normal(size=mu.shape,
avg=mu,
std=T.exp(log_sigma),
dtype=theano.config.floatX)
else:
# If there are multiple samples per data point, make sure mu and
# log_sigma are broadcasted correctly.
if epsilon.ndim == 3:
if mu.ndim == 2:
mu = mu.dimshuffle('x', 0, 1)
if log_sigma.ndim == 2:
log_sigma = log_sigma.dimshuffle('x', 0, 1)
return mu + T.exp(log_sigma) * epsilon
@wraps(Conditional.sample_from_epsilon)
def sample_from_epsilon(self, shape):
return self.theano_rng.normal(size=shape, dtype=theano.config.floatX)
@wraps(Conditional.conditional_expectation)
def conditional_expectation(self, conditional_params):
return conditional_params[0]
@wraps(Conditional.log_conditional)
def log_conditional(self, samples, conditional_params):
(mu, log_sigma) = conditional_params
# If there are multiple samples per data point, make sure mu and
# log_sigma are broadcasted correctly.
if samples.ndim == 3:
if log_sigma.ndim == 2:
log_sigma = log_sigma.dimshuffle('x', 0, 1)
if mu.ndim == 2:
mu = mu.dimshuffle('x', 0, 1)
return -0.5 * (
T.log(2 * pi) + 2 * log_sigma + (samples - mu) ** 2 /
T.exp(2 * log_sigma)
).sum(axis=2)
| |
"""
=======
Cliques
=======
Find and manipulate cliques of graphs.
Note that finding the largest clique of a graph has been
shown to be an NP-complete problem; the algorithms here
could take a long time to run.
http://en.wikipedia.org/wiki/Clique_problem
"""
__author__ = """Dan Schult (dschult@colgate.edu)"""
# Copyright (C) 2004-2008 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
__all__ = ['find_cliques', 'find_cliques_recursive', 'make_max_clique_graph',
'make_clique_bipartite' ,'graph_clique_number',
'graph_number_of_cliques', 'node_clique_number',
'number_of_cliques', 'cliques_containing_node',
'project_down', 'project_up']
import networkx
def find_cliques(G):
"""
Search for all maximal cliques in a graph.
This algorithm searches for maximal cliques in a graph.
maximal cliques are the largest complete subgraph containing
a given point. The largest maximal clique is sometimes called
the maximum clique.
This implementation is a generator of lists each
of which contains the members of a maximal clique.
To obtain a list of cliques, use list(find_cliques(G)).
The method essentially unrolls the recursion used in
the references to avoid issues of recursion stack depth.
See Also
--------
find_cliques_recursive :
A recursive version of the same algorithm
Notes
-----
Based on the algorithm published by Bron & Kerbosch (1973) [1]_
as adapated by Tomita, Tanaka and Takahashi (2006) [2]_
and discussed in Cazals and Karande (2008) [3]_.
References
----------
.. [1] Bron, C. and Kerbosch, J. 1973.
Algorithm 457: finding all cliques of an undirected graph.
Commun. ACM 16, 9 (Sep. 1973), 575-577.
http://portal.acm.org/citation.cfm?doid=362342.362367
.. [2] Etsuji Tomita, Akira Tanaka, Haruhisa Takahashi,
The worst-case time complexity for generating all maximal
cliques and computational experiments,
Theoretical Computer Science, Volume 363, Issue 1,
Computing and Combinatorics,
10th Annual International Conference on
Computing and Combinatorics (COCOON 2004), 25 October 2006, Pages 28-42
http://dx.doi.org/10.1016/j.tcs.2006.06.015
.. [3] F. Cazals, C. Karande,
A note on the problem of reporting maximal cliques,
Theoretical Computer Science,
Volume 407, Issues 1-3, 6 November 2008, Pages 564-568,
http://dx.doi.org/10.1016/j.tcs.2008.05.010
"""
# Cache nbrs and find first pivot (highest degree)
maxconn=-1
nnbrs={}
pivotnbrs=set() # handle empty graph
for n,nbrs in G.adjacency_iter():
conn = len(nbrs)
if conn > maxconn:
nnbrs[n] = pivotnbrs = set(nbrs)
maxconn = conn
else:
nnbrs[n] = set(nbrs)
# Initial setup
cand=set(nnbrs)
smallcand = cand - pivotnbrs
done=set()
stack=[]
clique_so_far=[]
# Start main loop
while smallcand or stack:
try:
# Any nodes left to check?
n=smallcand.pop()
except KeyError:
# back out clique_so_far
cand,done,smallcand = stack.pop()
clique_so_far.pop()
continue
# Add next node to clique
clique_so_far.append(n)
cand.remove(n)
done.add(n)
nn=nnbrs[n]
new_cand = cand & nn
new_done = done & nn
# check if we have more to search
if not new_cand:
if not new_done:
# Found a clique!
yield clique_so_far[:]
clique_so_far.pop()
continue
# Shortcut--only one node left!
if not new_done and len(new_cand)==1:
yield clique_so_far + list(new_cand)
clique_so_far.pop()
continue
# find pivot node (max connected in cand)
# look in done nodes first
numb_cand=len(new_cand)
maxconndone=-1
for n in new_done:
cn = new_cand & nnbrs[n]
conn=len(cn)
if conn > maxconndone:
pivotdonenbrs=cn
maxconndone=conn
if maxconndone==numb_cand:
break
# Shortcut--this part of tree already searched
if maxconndone == numb_cand:
clique_so_far.pop()
continue
# still finding pivot node
# look in cand nodes second
maxconn=-1
for n in new_cand:
cn = new_cand & nnbrs[n]
conn=len(cn)
if conn > maxconn:
pivotnbrs=cn
maxconn=conn
if maxconn == numb_cand-1:
break
# pivot node is max connected in cand from done or cand
if maxconndone > maxconn:
pivotnbrs = pivotdonenbrs
# save search status for later backout
stack.append( (cand, done, smallcand) )
cand=new_cand
done=new_done
smallcand = cand - pivotnbrs
def find_cliques_recursive(G):
"""
Recursive search for all maximal cliques in a graph.
This algorithm searches for maximal cliques in a graph.
Maximal cliques are the largest complete subgraph containing
a given point. The largest maximal clique is sometimes called
the maximum clique.
This implementation returns a list of lists each of
which contains the members of a maximal clique.
See Also
--------
find_cliques : An nonrecursive version of the same algorithm
Notes
-----
Based on the algorithm published by Bron & Kerbosch (1973) [1]_
as adapated by Tomita, Tanaka and Takahashi (2006) [2]_
and discussed in Cazals and Karande (2008) [3]_.
References
----------
.. [1] Bron, C. and Kerbosch, J. 1973.
Algorithm 457: finding all cliques of an undirected graph.
Commun. ACM 16, 9 (Sep. 1973), 575-577.
http://portal.acm.org/citation.cfm?doid=362342.362367
.. [2] Etsuji Tomita, Akira Tanaka, Haruhisa Takahashi,
The worst-case time complexity for generating all maximal
cliques and computational experiments,
Theoretical Computer Science, Volume 363, Issue 1,
Computing and Combinatorics,
10th Annual International Conference on
Computing and Combinatorics (COCOON 2004), 25 October 2006, Pages 28-42
http://dx.doi.org/10.1016/j.tcs.2006.06.015
.. [3] F. Cazals, C. Karande,
A note on the problem of reporting maximal cliques,
Theoretical Computer Science,
Volume 407, Issues 1-3, 6 November 2008, Pages 564-568,
http://dx.doi.org/10.1016/j.tcs.2008.05.010
"""
nnbrs={}
for n,nbrs in G.adjacency_iter():
nnbrs[n]=set(nbrs)
if not nnbrs: return [] # empty graph
cand=set(nnbrs)
done=set()
clique_so_far=[]
cliques=[]
_extend(nnbrs,cand,done,clique_so_far,cliques)
return cliques
def _extend(nnbrs,cand,done,so_far,cliques):
# find pivot node (max connections in cand)
maxconn=-1
numb_cand=len(cand)
for n in done:
cn = cand & nnbrs[n]
conn=len(cn)
if conn > maxconn:
pivotnbrs=cn
maxconn=conn
if conn==numb_cand:
# All possible cliques already found
return
for n in cand:
cn = cand & nnbrs[n]
conn=len(cn)
if conn > maxconn:
pivotnbrs=cn
maxconn=conn
# Use pivot to reduce number of nodes to examine
smallercand = cand - pivotnbrs
for n in smallercand:
cand.remove(n)
so_far.append(n)
nn=nnbrs[n]
new_cand=cand & nn
new_done=done & nn
if not new_cand and not new_done:
# Found the clique
cliques.append(so_far[:])
elif not new_done and len(new_cand) is 1:
# shortcut if only one node left
cliques.append(so_far+list(new_cand))
else:
_extend(nnbrs, new_cand, new_done, so_far, cliques)
done.add(so_far.pop())
def make_max_clique_graph(G,create_using=None,name=None):
""" Create the maximal clique graph of a graph.
Finds the maximal cliques and treats these as nodes.
The nodes are connected if they have common members in
the original graph. Theory has done a lot with clique
graphs, but I haven't seen much on maximal clique graphs.
Notes
-----
This should be the same as make_clique_bipartite followed
by project_up, but it saves all the intermediate steps.
"""
cliq=list(map(set,find_cliques(G)))
size=len(cliq)
if create_using:
B=create_using
B.clear()
else:
B=networkx.Graph()
if name is not None:
B.name=name
for i,cl in enumerate(cliq):
B.add_node(i+1)
for j,other_cl in enumerate(cliq[:i]):
# if not cl.isdisjoint(other_cl): #Requires 2.6
intersect=cl & other_cl
if intersect: # Not empty
B.add_edge(i+1,j+1)
return B
def make_clique_bipartite(G,fpos=None,create_using=None,name=None):
""" Create a bipartite clique graph from a graph G.
Nodes of G are retained as the "bottom nodes" of B and
cliques of G become "top nodes" of B.
Edges are present if a bottom node belongs to the clique
represented by the top node.
Returns a Graph with additional attribute dict B.node_type
which is keyed by nodes to "Bottom" or "Top" appropriately.
if fpos is not None, a second additional attribute dict B.pos
is created to hold the position tuple of each node for viewing
the bipartite graph.
"""
cliq=list(find_cliques(G))
if create_using:
B=create_using
B.clear()
else:
B=networkx.Graph()
if name is not None:
B.name=name
B.add_nodes_from(G)
B.node_type={} # New Attribute for B
for n in B:
B.node_type[n]="Bottom"
if fpos:
B.pos={} # New Attribute for B
delta_cpos=1./len(cliq)
delta_ppos=1./G.order()
cpos=0.
ppos=0.
for i,cl in enumerate(cliq):
name= -i-1 # Top nodes get negative names
B.add_node(name)
B.node_type[name]="Top"
if fpos:
if name not in B.pos:
B.pos[name]=(0.2,cpos)
cpos +=delta_cpos
for v in cl:
B.add_edge(name,v)
if fpos is not None:
if v not in B.pos:
B.pos[v]=(0.8,ppos)
ppos +=delta_ppos
return B
def project_down(B,create_using=None,name=None):
"""Project a bipartite graph B down onto its "bottom nodes".
The nodes retain their names and are connected if they
share a common top node in the bipartite graph.
Returns a Graph.
"""
if create_using:
G=create_using
G.clear()
else:
G=networkx.Graph()
if name is not None:
G.name=name
for v,Bvnbrs in B.adjacency_iter():
if B.node_type[v]=="Bottom":
G.add_node(v)
for cv in Bvnbrs:
G.add_edges_from([(v,u) for u in B[cv] if u!=v])
return G
def project_up(B,create_using=None,name=None):
""" Project a bipartite graph B down onto its "bottom nodes".
The nodes retain their names and are connected if they
share a common Bottom Node in the Bipartite Graph.
Returns a Graph.
"""
if create_using:
G=create_using
G.clear()
else:
G=networkx.Graph()
if name is not None:
G.name=name
for v,Bvnbrs in B.adjacency_iter():
if B.node_type[v]=="Top":
vname= -v #Change sign of name for Top Nodes
G.add_node(vname)
for cv in Bvnbrs:
# Note: -u changes the name (not Top node anymore)
G.add_edges_from([(vname,-u) for u in B[cv] if u!=v])
return G
def graph_clique_number(G,cliques=None):
"""Return the clique number (size of the largest clique) for G.
An optional list of cliques can be input if already computed.
"""
if cliques is None:
cliques=find_cliques(G)
return max( [len(c) for c in cliques] )
def graph_number_of_cliques(G,cliques=None):
""" Returns the number of maximal cliques in G.
An optional list of cliques can be input if already computed.
"""
if cliques is None:
cliques=list(find_cliques(G))
return len(cliques)
def node_clique_number(G,nodes=None,cliques=None):
""" Returns the size of the largest maximal clique containing
each given node.
Returns a single or list depending on input nodes.
Optional list of cliques can be input if already computed.
"""
if cliques is None:
if nodes is not None:
# Use ego_graph to decrease size of graph
if isinstance(nodes,list):
d={}
for n in nodes:
H=networkx.ego_graph(G,n)
d[n]=max( (len(c) for c in find_cliques(H)) )
else:
H=networkx.ego_graph(G,nodes)
d=max( (len(c) for c in find_cliques(H)) )
return d
# nodes is None--find all cliques
cliques=list(find_cliques(G))
if nodes is None:
nodes=G.nodes() # none, get entire graph
if not isinstance(nodes, list): # check for a list
v=nodes
# assume it is a single value
d=max([len(c) for c in cliques if v in c])
else:
d={}
for v in nodes:
d[v]=max([len(c) for c in cliques if v in c])
return d
# if nodes is None: # none, use entire graph
# nodes=G.nodes()
# elif not isinstance(nodes, list): # check for a list
# nodes=[nodes] # assume it is a single value
# if cliques is None:
# cliques=list(find_cliques(G))
# d={}
# for v in nodes:
# d[v]=max([len(c) for c in cliques if v in c])
# if nodes in G:
# return d[v] #return single value
# return d
def number_of_cliques(G,nodes=None,cliques=None):
""" Returns the number of maximal cliques for each node.
Returns a single or list depending on input nodes.
Optional list of cliques can be input if already computed.
"""
if cliques is None:
cliques=list(find_cliques(G))
if nodes is None:
nodes=G.nodes() # none, get entire graph
if not isinstance(nodes, list): # check for a list
v=nodes
# assume it is a single value
numcliq=len([1 for c in cliques if v in c])
else:
numcliq={}
for v in nodes:
numcliq[v]=len([1 for c in cliques if v in c])
return numcliq
def cliques_containing_node(G,nodes=None,cliques=None):
""" Returns a list of cliques containing the given node.
Returns a single list or list of lists depending on input nodes.
Optional list of cliques can be input if already computed.
"""
if cliques is None:
cliques=list(find_cliques(G))
if nodes is None:
nodes=G.nodes() # none, get entire graph
if not isinstance(nodes, list): # check for a list
v=nodes
# assume it is a single value
vcliques=[c for c in cliques if v in c]
else:
vcliques={}
for v in nodes:
vcliques[v]=[c for c in cliques if v in c]
return vcliques
| |
import pyfits
import chimera
from chimera import config
import numpy as np
from pyraf import iraf
from datetime import datetime, timedelta
MONTHS = {"Jan": 1, "Feb": 2, "March": 3, "April": 4, "May": 5, "June": 6, "July": 7, "Aug": 8, "Sep": 9, "Oct": 10, "Nov": 11, "Dec": 12}
class Aperphot:
def __init__(self, sci_file, coords):
self.sci_file = sci_file
self.coords = coords
# load configuration file
cfg = config.Config()
self.cfg_data = cfg.load()
# Set header keyword parameters
self.setkeywords()
# Set parameters
self.setparams()
def setkeywords(self):
"""
Set FITS image header keyword parameters.
Parameters
----------
Returns
-------
None
"""
header = pyfits.getheader(self.sci_file, ignore_missing_end = True)
self.nx = header["NAXIS1"]
self.ny = header["NAXIS2"]
self.nframes = header["NAXIS3"]
self.exptime = header["EXPTIME"]
self.kintime = header["KINCYCTI"]
self.sn = header["SERIALN"].split("=")[1].strip()
self.amptype = header["AMPTYPE"].split()[0]
self.emgain = header["EMGAIN"]
self.hreadout = header["HREADOUT"].strip()
self.preampg = header["PREAMPG"].strip()
utcstart = header["UTCSTART"]
self.utcstart = self.parser(utcstart)
return
def setparams(self):
"""
Set datapars, centerpars, fitskypars and photpars.
Parameteres
-----------
Returns
-------
None
"""
# Set parameters for daophot
self.fwhmpsf = self.cfg_data["Phot"]["fwhmpsf"]
self.sigma = self.cfg_data["Phot"]["sigma"]
self.exposure = self.cfg_data["Phot"]["exposure"]
self.calgorithm = self.cfg_data["Phot"]["calgorithm"]
self.cbox = self.cfg_data["Phot"]["cbox"]
self.maxshift = self.cfg_data["Phot"]["maxshift"]
self.salgorithm = self.cfg_data["Phot"]["salgorithm"]
self.annulus = self.cfg_data["Phot"]["annulus"]
self.dannulus = self.cfg_data["Phot"]["dannulus"]
self.apertures = self.cfg_data["Phot"]["apertures"]
self.zmag = self.cfg_data["Phot"]["zmag"]
self.readnoise = float(self.cfg_data["Detector"][self.sn][self.amptype][self.hreadout][self.preampg][1])
self.epadu = float(self.cfg_data["Detector"][self.sn][self.amptype][self.hreadout][self.preampg][0])
if self.amptype == "EMGAIN":
self.readnoise /= self.emgain
self.epadu /= self.emgain
# Set parameters for phot
self.method = "exact"
self.inner_radius = 14
self.outer_radius = 30
return
def setiraf(self):
"""
Set IRAF global parameters and load DAOPHOT package for aperture
photometry.
Parameters
----------
Returns
-------
None
"""
iraf.prcacheOff()
iraf.set(writepars=0)
# Load IRAF packages
iraf.noao(_doprint = 0)
iraf.noao.digiphot(_doprint = 0)
iraf.noao.digiphot.daophot(_doprint = 0)
return
def parser(self, utcstart):
"""
Datetime parser for CHIMERA UTCSTART header keyword.
Parameters
----------
utcstart : string
Datetime for start of frame (in UTC)
Returns
-------
dt : datetime struct
Datetime structure
"""
month, date, year, time = utcstart.split("-")
month = MONTHS[month]
date = int(date)
year = int(year)
hour, minu, sec = time.split(":")
hour = int(hour)
minu = int(minu)
sec, ms = sec.split(".")
sec = int(sec)
ms = int(ms) * 1000
dt = datetime(year, month, date, hour, minu, sec, ms)
return dt
def addtime(self, secs):
"""
Add time in seconds to UTC datetime.
Parameters
----------
secs : float
Time to add to UTC in seconds.
Returns
-------
dt : datetime structure
"""
td = timedelta(0, secs)
return self.utcstart + td
def daocog(self, tolerance = 0.01):
"""
Curve of growth to determine nominal aperture for photometry using DAOPHOT.
Parameters
----------
tolerance : float
Magnitude difference tolerance between different apertures
Returns
-------
aperture : float
Nominal aperture radius for photmetry
"""
# load iraf packages
self.setiraf()
# Randomly peform curve of growth on 5 frames
framenum = np.random.randint(1, self.nframes, 5)
apertures = np.linspace(2,20,19)
# Iterate through the frames and determine nominal aperture
nom_aper = np.zeros(5, dtype = np.float32)
cnt = 0
for val in framenum:
outfile = self.sci_file.replace(".fits", "." + str(val) + ".cog.phot.1")
iraf.delete(outfile)
self.daophot(val, self.coords, outfile, apertures = "2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20")
mags = iraf.pdump(outfile, "mag", "yes", Stdout = 1)
mags_arr = np.array(mags[1].split(),dtype = np.float32)
mags_diff = np.diff(mags_arr)
idx = np.where(np.abs(mags_diff) < 0.01)
if len(idx[0]) != 0:
nom_aper[cnt] = apertures[idx[0][0]]
else:
nom_aper[cnt] = 12.0
cnt += 1
iraf.delete(outfile)
return np.median(nom_aper)
def cog(self, window_size, method, tolerance = 0.01):
"""
Curve of growth to determine nominal aperture for photometry using
astropy photutils.
Parameters
----------
tolerance : float
Magnitude difference tolerance between different apertures
Returns
-------
aperture : float
Nominal aperture radius for photmetry
"""
# Aperture values in pixels
apertures = np.array([2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20])
naper = apertures.shape[0]
# Randomly peform curve of growth on 5 frames
framenum = np.random.randint(1, self.nframes, 5)
apertures = np.linspace(2,20,19)
# Read input image and star position
image = chimera.fitsread(self.sci_file)
pos = np.loadtxt(self.coords, ndmin = 2)
# Iterate through the frames and determine nominal aperture
nom_aper = np.zeros(5, dtype = np.float32)
cnt = 0
for val in framenum:
mags_arr = np.zeros(len(apertures))
objpos = chimera.recenter(image[val,:,:], pos, window_size, method)
for i in range(naper):
flux = self.phot(image[val,:,:], objpos, aper = apertures[i])
mags_arr[i] = -2.5 * np.log10(flux['flux'])
mags_diff = np.diff(mags_arr)
idx = np.where(np.abs(mags_diff) < 0.01)
if len(idx[0]) != 0:
nom_aper[cnt] = apertures[idx[0][0]]
else:
nom_aper[cnt] = 12.0
cnt += 1
return np.median(nom_aper)
def daophot(self, framenum, coords, outfile, apertures, verbose = "no"):
"""
Aperture photometry of stars in the coords file using IRAF PHOT routine.
Parameters
----------
framenum : int
Frame number in the image cube to perform aperture photometry on.
coords : string
Text file with coordinate of the stars
outfile : string
Text file to which photometry results are written
Returns
-------
None
"""
# load iraf packages
self.setiraf()
iraf.delete(outfile)
iraf.phot(image = self.sci_file + "[,," + str(framenum) + "]", coords = coords, output = outfile, fwhmpsf = self.fwhmpsf, sigma = self.sigma, readnoise = self.readnoise, epadu = self.epadu, exposure = self.exposure, calgorithm = self.calgorithm, cbox = self.cbox, maxshift = self.maxshift, salgorithm = self.salgorithm, annulus = self.annulus, dannulus = self.dannulus, apertures = apertures, zmag = self.zmag, interactive = "no", verify = "no", verbose = verbose)
return
def phot(self, image, objpos, aper):
"""
Aperture photometry using Astropy's photutils.
Parameters
----------
image : numpy array
2D image array
objpos : list of tuple
Object poistions as list of tuples
aper : float
Aperture radius in pixels
Returns
-------
phot_table : astropy table
Output table with stellar photometry
"""
try:
from astropy.table import hstack
from photutils import aperture_photometry, CircularAnnulus, CircularAperture
except ImportError:
pass
apertures = CircularAperture(objpos, r = aper)
annulus_apertures = CircularAnnulus(objpos, r_in = self.inner_radius, r_out = self.outer_radius)
rawflux_table = aperture_photometry(image, apertures = apertures, method = self.method)
bkgflux_table = aperture_photometry(image, apertures = annulus_apertures, method = self.method)
phot_table = hstack([rawflux_table, bkgflux_table], table_names = ["raw", "bkg"])
bkg = phot_table["aperture_sum_bkg"] / annulus_apertures.area()
phot_table["msky"] = bkg
phot_table["area"] = apertures.area()
phot_table["nsky"] = annulus_apertures.area()
bkg_sum = bkg * apertures.area()
final_sum = phot_table["aperture_sum_raw"] - bkg_sum
phot_table["flux"] = final_sum
return phot_table
| |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects for user."""
from __future__ import annotations
import re
from core import feconf
from core import utils
from core.constants import constants
class UserSettings:
"""Value object representing a user's settings.
Attributes:
user_id: str. The unique ID of the user.
email: str. The user email.
roles: list(str). Roles of the user.
username: str or None. Identifiable username to display in the UI.
last_agreed_to_terms: datetime.datetime or None. When the user last
agreed to the terms of the site.
last_started_state_editor_tutorial: datetime.datetime or None. When
the user last started the state editor tutorial.
last_started_state_translation_tutorial: datetime.datetime or None. When
the user last started the state translation tutorial.
last_logged_in: datetime.datetime or None. When the user last logged in.
last_created_an_exploration: datetime.datetime or None. When the user
last created an exploration.
last_edited_an_exploration: datetime.datetime or None. When the user
last edited an exploration.
profile_picture_data_url: str or None. User uploaded profile picture as
a dataURI string.
default_dashboard: str or None. The default dashboard of the user.
user_bio: str. User-specified biography.
subject_interests: list(str) or None. Subject interests specified by
the user.
first_contribution_msec: float or None. The time in milliseconds when
the user first contributed to Oppia.
preferred_language_codes: list(str) or None. Exploration language
preferences specified by the user.
preferred_site_language_code: str or None. System language preference.
preferred_audio_language_code: str or None. Audio language preference.
pin: str or None. The PIN of the user's profile for android.
display_alias: str or None. Display name of a user who is logged
into the Android app. None when the request is coming from web
because we don't use it there.
"""
def __init__(
self, user_id, email, roles, banned, username=None,
last_agreed_to_terms=None, last_started_state_editor_tutorial=None,
last_started_state_translation_tutorial=None, last_logged_in=None,
last_created_an_exploration=None, last_edited_an_exploration=None,
profile_picture_data_url=None, default_dashboard=None,
creator_dashboard_display_pref=(
constants.ALLOWED_CREATOR_DASHBOARD_DISPLAY_PREFS['CARD']),
user_bio='', subject_interests=None, first_contribution_msec=None,
preferred_language_codes=None, preferred_site_language_code=None,
preferred_audio_language_code=None, pin=None, display_alias=None,
deleted=False, created_on=None):
"""Constructs a UserSettings domain object.
Args:
user_id: str. The unique ID of the user.
email: str. The user email.
roles: list(str). Roles of the user.
banned: bool. Whether the uses is banned.
username: str or None. Identifiable username to display in the UI.
last_agreed_to_terms: datetime.datetime or None. When the user
last agreed to the terms of the site.
last_started_state_editor_tutorial: datetime.datetime or None. When
the user last started the state editor tutorial.
last_started_state_translation_tutorial: datetime.datetime or None.
When the user last started the state translation tutorial.
last_logged_in: datetime.datetime or None. When the user last
logged in.
last_created_an_exploration: datetime.datetime or None. When the
user last created an exploration.
last_edited_an_exploration: datetime.datetime or None. When the
user last edited an exploration.
profile_picture_data_url: str or None. User uploaded profile
picture as a dataURI string.
default_dashboard: str|None. The default dashboard of the user.
creator_dashboard_display_pref: str. The creator dashboard of the
user.
user_bio: str. User-specified biography.
subject_interests: list(str) or None. Subject interests specified by
the user.
first_contribution_msec: float or None. The time in milliseconds
when the user first contributed to Oppia.
preferred_language_codes: list(str) or None. Exploration language
preferences specified by the user.
preferred_site_language_code: str or None. System language
preference.
preferred_audio_language_code: str or None. Default language used
for audio translations preference.
pin: str or None. The PIN of the user's profile for android.
display_alias: str or None. Display name of a user who is logged
into the Android app. None when the request is coming from
web because we don't use it there.
deleted: bool. Whether the user has requested removal of their
account.
created_on: datetime.datetime. When the user was created on.
"""
self.user_id = user_id
self.email = email
self.roles = roles
self.username = username
self.last_agreed_to_terms = last_agreed_to_terms
self.last_started_state_editor_tutorial = (
last_started_state_editor_tutorial)
self.last_started_state_translation_tutorial = (
last_started_state_translation_tutorial)
self.last_logged_in = last_logged_in
self.last_edited_an_exploration = last_edited_an_exploration
self.last_created_an_exploration = last_created_an_exploration
self.profile_picture_data_url = profile_picture_data_url
self.default_dashboard = default_dashboard
self.creator_dashboard_display_pref = creator_dashboard_display_pref
self.user_bio = user_bio
self.subject_interests = (
subject_interests if subject_interests else [])
self.first_contribution_msec = first_contribution_msec
self.preferred_language_codes = (
preferred_language_codes if preferred_language_codes else [])
self.preferred_site_language_code = preferred_site_language_code
self.preferred_audio_language_code = preferred_audio_language_code
self.pin = pin
self.display_alias = display_alias
self.banned = banned
self.deleted = deleted
self.created_on = created_on
def validate(self):
"""Checks that the user_id, email, roles, banned, pin and display_alias
fields of this UserSettings domain object are valid.
Raises:
ValidationError. The user_id is not str.
ValidationError. The email is not str.
ValidationError. The email is invalid.
ValidationError. The roles is not a list.
ValidationError. Given role does not exist.
ValidationError. The pin is not str.
ValidationError. The display alias is not str.
"""
if not isinstance(self.user_id, str):
raise utils.ValidationError(
'Expected user_id to be a string, received %s' % self.user_id)
if not self.user_id:
raise utils.ValidationError('No user id specified.')
if not utils.is_user_id_valid(
self.user_id,
allow_system_user_id=True,
allow_pseudonymous_id=True
):
raise utils.ValidationError('The user ID is in a wrong format.')
if not isinstance(self.banned, bool):
raise utils.ValidationError(
'Expected banned to be a bool, received %s' % self.banned)
if not isinstance(self.roles, list):
raise utils.ValidationError(
'Expected roles to be a list, received %s' % self.roles)
if self.banned:
if self.roles:
raise utils.ValidationError(
'Expected roles for banned user to be empty, '
'recieved %s.' % self.roles)
else:
default_roles = []
if len(self.roles) != len(set(self.roles)):
raise utils.ValidationError(
'Roles contains duplicate values: %s' % self.roles)
for role in self.roles:
if not isinstance(role, str):
raise utils.ValidationError(
'Expected roles to be a string, received %s' % role)
if role not in feconf.ALLOWED_USER_ROLES:
raise utils.ValidationError(
'Role %s does not exist.' % role)
if role in feconf.ALLOWED_DEFAULT_USER_ROLES_ON_REGISTRATION:
default_roles.append(role)
if len(default_roles) != 1:
raise utils.ValidationError(
'Expected roles to contains one default role.')
if self.pin is not None:
if not isinstance(self.pin, str):
raise utils.ValidationError(
'Expected PIN to be a string, received %s' %
self.pin
)
if (
len(self.pin) != feconf.FULL_USER_PIN_LENGTH and
len(self.pin) != feconf.PROFILE_USER_PIN_LENGTH
):
raise utils.ValidationError(
'User PIN can only be of length %s or %s' %
(
feconf.FULL_USER_PIN_LENGTH,
feconf.PROFILE_USER_PIN_LENGTH
)
)
for character in self.pin:
if character < '0' or character > '9':
raise utils.ValidationError(
'Only numeric characters are allowed in PIN.'
)
if (self.display_alias is not None and
not isinstance(self.display_alias, str)):
raise utils.ValidationError(
'Expected display_alias to be a string, received %s' %
self.display_alias
)
if not isinstance(self.email, str):
raise utils.ValidationError(
'Expected email to be a string, received %s' % self.email)
if not self.email:
raise utils.ValidationError('No user email specified.')
if ('@' not in self.email or self.email.startswith('@')
or self.email.endswith('@')):
raise utils.ValidationError(
'Invalid email address: %s' % self.email)
if not isinstance(self.creator_dashboard_display_pref, str):
raise utils.ValidationError(
'Expected dashboard display preference to be a string, '
'received %s' % self.creator_dashboard_display_pref)
if (self.creator_dashboard_display_pref not in
list(constants.ALLOWED_CREATOR_DASHBOARD_DISPLAY_PREFS.values(
))):
raise utils.ValidationError(
'%s is not a valid value for the dashboard display '
'preferences.' % (self.creator_dashboard_display_pref))
def populate_from_modifiable_user_data(self, modifiable_user_data):
"""Populate the UserSettings domain object using the user data in
modifiable_user_data.
Args:
modifiable_user_data: ModifiableUserData. The modifiable user
data object with the information to be updated.
Raises:
ValidationError. None or empty value is provided for display alias
attribute.
"""
if (not modifiable_user_data.display_alias or
not isinstance(modifiable_user_data.display_alias, str)):
raise utils.ValidationError(
'Expected display_alias to be a string, received %s.' %
modifiable_user_data.display_alias
)
self.display_alias = modifiable_user_data.display_alias
self.preferred_language_codes = (
modifiable_user_data.preferred_language_codes)
self.preferred_site_language_code = (
modifiable_user_data.preferred_site_language_code)
self.preferred_audio_language_code = (
modifiable_user_data.preferred_audio_language_code)
self.pin = modifiable_user_data.pin
def to_dict(self):
"""Convert the UserSettings domain instance into a dictionary form
with its keys as the attributes of this class.
Rerurns:
dict. A dictionary containing the UserSettings class information
in a dictionary form.
"""
return {
'email': self.email,
'roles': self.roles,
'banned': self.banned,
'username': self.username,
'normalized_username': self.normalized_username,
'last_agreed_to_terms': self.last_agreed_to_terms,
'last_started_state_editor_tutorial': (
self.last_started_state_editor_tutorial),
'last_started_state_translation_tutorial': (
self.last_started_state_translation_tutorial),
'last_logged_in': self.last_logged_in,
'last_edited_an_exploration': (
self.last_edited_an_exploration),
'last_created_an_exploration': (
self.last_created_an_exploration),
'profile_picture_data_url': self.profile_picture_data_url,
'default_dashboard': self.default_dashboard,
'creator_dashboard_display_pref': (
self.creator_dashboard_display_pref),
'user_bio': self.user_bio,
'subject_interests': self.subject_interests,
'first_contribution_msec': self.first_contribution_msec,
'preferred_language_codes': self.preferred_language_codes,
'preferred_site_language_code': (
self.preferred_site_language_code),
'preferred_audio_language_code': (
self.preferred_audio_language_code),
'pin': self.pin,
'display_alias': self.display_alias,
'deleted': self.deleted,
'created_on': self.created_on
}
@property
def truncated_email(self):
"""Returns truncated email by replacing last two characters before @
with period.
Returns:
str. The truncated email address of this UserSettings
domain object.
"""
first_part = self.email[: self.email.find('@')]
last_part = self.email[self.email.find('@'):]
if len(first_part) <= 1:
first_part = '..'
elif len(first_part) <= 3:
first_part = '%s..' % first_part[0]
else:
first_part = first_part[:-3] + '..'
return '%s%s' % (first_part, last_part)
@property
def normalized_username(self):
"""Returns username in lowercase or None if it does not exist.
Returns:
str or None. If this object has a 'username' property, returns
the normalized version of the username. Otherwise, returns None.
"""
return self.normalize_username(self.username)
@classmethod
def normalize_username(cls, username):
"""Returns the normalized version of the given username,
or None if the passed-in 'username' is None.
Args:
username: str. Identifiable username to display in the UI.
Returns:
str or None. The normalized version of the given username,
or None if the passed-in username is None.
"""
return username.lower() if username else None
@classmethod
def require_valid_username(cls, username: str):
"""Checks if the given username is valid or not.
Args:
username: str. The username to validate.
Raises:
ValidationError. An empty username is supplied.
ValidationError. The given username exceeds the maximum allowed
number of characters.
ValidationError. The given username contains non-alphanumeric
characters.
ValidationError. The given username contains reserved substrings.
"""
if not username:
raise utils.ValidationError('Empty username supplied.')
if len(username) > constants.MAX_USERNAME_LENGTH:
raise utils.ValidationError(
'A username can have at most %s characters.'
% constants.MAX_USERNAME_LENGTH)
if not re.match(feconf.ALPHANUMERIC_REGEX, username):
raise utils.ValidationError(
'Usernames can only have alphanumeric characters.')
# Disallow usernames that contain the system usernames or the
# strings "admin" or "oppia".
reserved_usernames = (
set(feconf.SYSTEM_USERS.values()) | {'admin', 'oppia'}
)
for reserved_username in reserved_usernames:
if reserved_username in username.lower().strip():
raise utils.ValidationError('This username is not available.')
def mark_banned(self):
"""Marks a user banned."""
self.banned = True
self.roles = []
def unmark_banned(self, default_role):
"""Unmarks ban for a banned user.
Args:
default_role: str. The role assigned to the user after marking
unbanned.
"""
self.banned = False
self.roles = [default_role]
class UserActionsInfo:
"""A class representing information of user actions.
Attributes:
user_id: str. The unique ID of the user.
roles: list(str). The roles of the user.
actions: list(str). A list of actions accessible to the role.
"""
def __init__(self, user_id, roles, actions):
self._user_id = user_id
self._roles = roles
self._actions = actions
@property
def user_id(self):
"""Returns the unique ID of the user.
Returns:
user_id: str. The unique ID of the user.
"""
return self._user_id
@property
def roles(self):
"""Returns the roles of user.
Returns:
role: list(str). The roles of the user.
"""
return self._roles
@property
def actions(self):
"""Returns list of actions accessible to a user.
Returns:
actions: list(str). List of actions accessible to a user ID.
"""
return self._actions
class UserContributions:
"""Value object representing a user's contributions.
Attributes:
user_id: str. The unique ID of the user.
created_exploration_ids: list(str). IDs of explorations that this
user has created.
edited_exploration_ids: list(str). IDs of explorations that this
user has edited.
"""
def __init__(
self, user_id, created_exploration_ids, edited_exploration_ids):
"""Constructs a UserContributions domain object.
Args:
user_id: str. The unique ID of the user.
created_exploration_ids: list(str). IDs of explorations that this
user has created.
edited_exploration_ids: list(str). IDs of explorations that this
user has edited.
"""
self.user_id = user_id
self.created_exploration_ids = created_exploration_ids
self.edited_exploration_ids = edited_exploration_ids
def validate(self):
"""Checks that user_id, created_exploration_ids and
edited_exploration_ids fields of this UserContributions
domain object are valid.
Raises:
ValidationError. The user_id is not str.
ValidationError. The created_exploration_ids is not a list.
ValidationError. The exploration_id in created_exploration_ids
is not str.
ValidationError. The edited_exploration_ids is not a list.
ValidationError. The exploration_id in edited_exploration_ids
is not str.
"""
if not isinstance(self.user_id, str):
raise utils.ValidationError(
'Expected user_id to be a string, received %s' % self.user_id)
if not self.user_id:
raise utils.ValidationError('No user id specified.')
if not isinstance(self.created_exploration_ids, list):
raise utils.ValidationError(
'Expected created_exploration_ids to be a list, received %s'
% self.created_exploration_ids)
for exploration_id in self.created_exploration_ids:
if not isinstance(exploration_id, str):
raise utils.ValidationError(
'Expected exploration_id in created_exploration_ids '
'to be a string, received %s' % (
exploration_id))
if not isinstance(self.edited_exploration_ids, list):
raise utils.ValidationError(
'Expected edited_exploration_ids to be a list, received %s'
% self.edited_exploration_ids)
for exploration_id in self.edited_exploration_ids:
if not isinstance(exploration_id, str):
raise utils.ValidationError(
'Expected exploration_id in edited_exploration_ids '
'to be a string, received %s' % (
exploration_id))
class UserGlobalPrefs:
"""Domain object for user global email preferences.
Attributes:
can_receive_email_updates: bool. Whether the user can receive
email updates.
can_receive_editor_role_email: bool. Whether the user can receive
emails notifying them of role changes.
can_receive_feedback_message_email: bool. Whether the user can
receive emails when users submit feedback to their explorations.
can_receive_subscription_email: bool. Whether the user can receive
subscription emails notifying them about new explorations.
"""
def __init__(
self, can_receive_email_updates, can_receive_editor_role_email,
can_receive_feedback_message_email,
can_receive_subscription_email):
"""Constructs a UserGlobalPrefs domain object.
Args:
can_receive_email_updates: bool. Whether the user can receive
email updates.
can_receive_editor_role_email: bool. Whether the user can receive
emails notifying them of role changes.
can_receive_feedback_message_email: bool. Whether the user can
receive emails when users submit feedback to their explorations.
can_receive_subscription_email: bool. Whether the user can receive
subscription emails notifying them about new explorations.
"""
self.can_receive_email_updates = can_receive_email_updates
self.can_receive_editor_role_email = can_receive_editor_role_email
self.can_receive_feedback_message_email = ( # pylint: disable=invalid-name
can_receive_feedback_message_email)
self.can_receive_subscription_email = can_receive_subscription_email
@classmethod
def create_default_prefs(cls):
"""Returns UserGlobalPrefs with default attributes."""
return cls(
feconf.DEFAULT_EMAIL_UPDATES_PREFERENCE,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
class UserExplorationPrefs:
"""Domain object for user exploration email preferences.
Attributes:
mute_feedback_notifications: bool. Whether the given user has muted
feedback emails.
mute_suggestion_notifications: bool. Whether the given user has
muted suggestion emails.
"""
def __init__(
self, mute_feedback_notifications, mute_suggestion_notifications):
"""Constructs a UserExplorationPrefs domain object.
Args:
mute_feedback_notifications: bool. Whether the given user has muted
feedback emails.
mute_suggestion_notifications: bool. Whether the given user has
muted suggestion emails.
"""
self.mute_feedback_notifications = mute_feedback_notifications
self.mute_suggestion_notifications = mute_suggestion_notifications
@classmethod
def create_default_prefs(cls):
"""Returns UserExplorationPrefs with default attributes."""
return cls(
feconf.DEFAULT_FEEDBACK_NOTIFICATIONS_MUTED_PREFERENCE,
feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE)
def to_dict(self):
"""Return dictionary representation of UserExplorationPrefs.
Returns:
dict. The keys of the dict are:
'mute_feedback_notifications': bool. Whether the given user has
muted feedback emails.
'mute_suggestion_notifications': bool. Whether the given user
has muted suggestion emails.
"""
return {
'mute_feedback_notifications': self.mute_feedback_notifications,
'mute_suggestion_notifications': self.mute_suggestion_notifications
}
class ExpUserLastPlaythrough:
"""Domain object for an exploration last playthrough model."""
def __init__(
self, user_id, exploration_id, last_played_exp_version,
last_updated, last_played_state_name):
self.id = '%s.%s' % (user_id, exploration_id)
self.user_id = user_id
self.exploration_id = exploration_id
self.last_played_exp_version = last_played_exp_version
self.last_updated = last_updated
self.last_played_state_name = last_played_state_name
def update_last_played_information(
self, last_played_exp_version, last_played_state_name):
"""Updates the last playthrough information of the user.
Args:
last_played_exp_version: int. The version of the exploration that
was played by the user.
last_played_state_name: str. The name of the state at which the
learner left the exploration.
"""
self.last_played_exp_version = last_played_exp_version
self.last_played_state_name = last_played_state_name
class IncompleteActivities:
"""Domain object for the incomplete activities model."""
def __init__(
self, user_id, exploration_ids, collection_ids, story_ids,
partially_learnt_topic_ids, partially_mastered_topic_id=None):
self.id = user_id
self.exploration_ids = exploration_ids
self.collection_ids = collection_ids
self.story_ids = story_ids
self.partially_learnt_topic_ids = partially_learnt_topic_ids
self.partially_mastered_topic_id = partially_mastered_topic_id
def add_exploration_id(self, exploration_id):
"""Adds the exploration id to the list of incomplete exploration ids.
Args:
exploration_id: str. The exploration id to be inserted into the
incomplete list.
"""
self.exploration_ids.append(exploration_id)
def remove_exploration_id(self, exploration_id):
"""Removes the exploration id from the list of incomplete exploration
ids.
Args:
exploration_id: str. The exploration id to be removed from the
incomplete list.
"""
self.exploration_ids.remove(exploration_id)
def add_collection_id(self, collection_id):
"""Adds the collection id to the list of incomplete collection ids.
Args:
collection_id: str. The collection id to be inserted into the
incomplete list.
"""
self.collection_ids.append(collection_id)
def remove_collection_id(self, collection_id):
"""Removes the collection id from the list of incomplete collection
ids.
Args:
collection_id: str. The collection id to be removed from the
incomplete list.
"""
self.collection_ids.remove(collection_id)
def add_story_id(self, story_id):
"""Adds the story id to the list of incomplete story ids.
Args:
story_id: str. The story id to be inserted into the
incomplete list.
"""
self.story_ids.append(story_id)
def remove_story_id(self, story_id):
"""Removes the story id from the list of incomplete story
ids.
Args:
story_id: str. The story id to be removed from the
incomplete list.
"""
self.story_ids.remove(story_id)
def add_partially_learnt_topic_id(self, partially_learnt_topic_id):
"""Adds the topic id to the list of partially learnt topic ids.
Args:
partially_learnt_topic_id: str. The topic id to be inserted in the
partially learnt list.
"""
self.partially_learnt_topic_ids.append(partially_learnt_topic_id)
def remove_partially_learnt_topic_id(self, partially_learnt_topic_id):
"""Removes the topic id from the list of partially learnt topic
ids.
Args:
partially_learnt_topic_id: str. The topic id to be removed from the
partially learnt list.
"""
self.partially_learnt_topic_ids.remove(partially_learnt_topic_id)
class CompletedActivities:
"""Domain object for the activities completed by learner model."""
def __init__(
self, user_id, exploration_ids, collection_ids, story_ids,
learnt_topic_ids, mastered_topic_ids=None):
self.id = user_id
self.exploration_ids = exploration_ids
self.collection_ids = collection_ids
self.story_ids = story_ids
self.learnt_topic_ids = learnt_topic_ids
self.mastered_topic_ids = mastered_topic_ids
def add_exploration_id(self, exploration_id):
"""Adds the exploration id to the list of completed exploration ids.
Args:
exploration_id: str. The exploration id to be inserted into the
completed list.
"""
self.exploration_ids.append(exploration_id)
def remove_exploration_id(self, exploration_id):
"""Removes the exploration id from the list of completed exploration
ids.
Args:
exploration_id: str. The exploration id to be removed from the
completed list.
"""
self.exploration_ids.remove(exploration_id)
def add_collection_id(self, collection_id):
"""Adds the collection id to the list of completed collection ids.
Args:
collection_id: str. The collection id to be inserted into the
completed list.
"""
self.collection_ids.append(collection_id)
def remove_collection_id(self, collection_id):
"""Removes the collection id from the list of completed collection
ids.
Args:
collection_id: str. The collection id to be removed from the
completed list.
"""
self.collection_ids.remove(collection_id)
def add_story_id(self, story_id):
"""Adds the story id to the list of completed story ids.
Args:
story_id: str. The story id to be inserted in the
completed list.
"""
self.story_ids.append(story_id)
def remove_story_id(self, story_id):
"""Removes the story id from the list of completed story
ids.
Args:
story_id: str. The story id to be removed from the
completed list.
"""
self.story_ids.remove(story_id)
def add_learnt_topic_id(self, learnt_topic_id):
"""Adds the topic id to the list of learnt topic ids.
Args:
learnt_topic_id: str. The topic id to be inserted in the
learnt list.
"""
self.learnt_topic_ids.append(learnt_topic_id)
def remove_learnt_topic_id(self, learnt_topic_id):
"""Removes the topic id from the list of learnt topic
ids.
Args:
learnt_topic_id: str. The topic id to be removed from the
learnt list.
"""
self.learnt_topic_ids.remove(learnt_topic_id)
class LearnerGoals:
"""Domain object for the learner goals model."""
def __init__(
self, user_id, topic_ids_to_learn,
topic_ids_to_master):
self.id = user_id
self.topic_ids_to_learn = topic_ids_to_learn
self.topic_ids_to_master = topic_ids_to_master
def add_topic_id_to_learn(self, topic_id):
"""Adds the topic id to 'topic IDs to learn' list.
Args:
topic_id: str. The topic id to be inserted to the learn list.
"""
self.topic_ids_to_learn.append(topic_id)
def remove_topic_id_from_learn(self, topic_id):
"""Removes the topic id from the 'topic IDs to learn' list.
topic_id: str. The id of the topic to be removed.
"""
self.topic_ids_to_learn.remove(topic_id)
def to_dict(self):
"""Return dictionary representation of LearnerGoals.
Returns:
dict. A dictionary containing the LearnerGoals class information
in a dictionary form.
"""
return {
'topic_ids_to_learn': self.topic_ids_to_learn,
'topic_ids_to_master': self.topic_ids_to_master
}
class LearnerPlaylist:
"""Domain object for the learner playlist model."""
def __init__(
self, user_id, exploration_ids, collection_ids):
self.id = user_id
self.exploration_ids = exploration_ids
self.collection_ids = collection_ids
def insert_exploration_id_at_given_position(
self, exploration_id, position_to_be_inserted):
"""Inserts the given exploration id at the given position.
Args:
exploration_id: str. The exploration id to be inserted into the
play later list.
position_to_be_inserted: int. The position at which it
is to be inserted.
"""
self.exploration_ids.insert(
position_to_be_inserted, exploration_id)
def add_exploration_id_to_list(self, exploration_id):
"""Inserts the exploration id at the end of the list.
Args:
exploration_id: str. The exploration id to be appended to the end
of the list.
"""
self.exploration_ids.append(exploration_id)
def insert_collection_id_at_given_position(
self, collection_id, position_to_be_inserted):
"""Inserts the given collection id at the given position.
Args:
collection_id: str. The collection id to be inserted into the
play later list.
position_to_be_inserted: int. The position at which it
is to be inserted.
"""
self.collection_ids.insert(position_to_be_inserted, collection_id)
def add_collection_id_to_list(self, collection_id):
"""Inserts the collection id at the end of the list.
Args:
collection_id: str. The collection id to be appended to the end
of the list.
"""
self.collection_ids.append(collection_id)
def remove_exploration_id(self, exploration_id):
"""Removes the exploration id from the learner playlist.
exploration_id: str. The id of the exploration to be removed.
"""
self.exploration_ids.remove(exploration_id)
def remove_collection_id(self, collection_id):
"""Removes the collection id from the learner playlist.
collection_id: str. The id of the collection to be removed.
"""
self.collection_ids.remove(collection_id)
class UserContributionProficiency:
"""Domain object for UserContributionProficiencyModel."""
def __init__(self, user_id, score_category, score, onboarding_email_sent):
self.user_id = user_id
self.score_category = score_category
self.score = score
self.onboarding_email_sent = onboarding_email_sent
def increment_score(self, increment_by):
"""Increments the score of the user in the category by the given amount.
In the first version of the scoring system, the increment_by quantity
will be +1, i.e, each user gains a point for a successful contribution
and doesn't lose score in any way.
Args:
increment_by: float. The amount to increase the score of the user
by.
"""
self.score += increment_by
def can_user_review_category(self):
"""Checks if user can review suggestions in category score_category.
If the user has score above the minimum required score, then the user
is allowed to review.
Returns:
bool. Whether the user can review suggestions under category
score_category.
"""
return self.score >= feconf.MINIMUM_SCORE_REQUIRED_TO_REVIEW
def mark_onboarding_email_as_sent(self):
"""Marks the email as sent."""
self.onboarding_email_sent = True
class UserContributionRights:
"""Domain object for the UserContributionRightsModel."""
def __init__(
self, user_id, can_review_translation_for_language_codes,
can_review_voiceover_for_language_codes, can_review_questions,
can_submit_questions):
self.id = user_id
self.can_review_translation_for_language_codes = (
can_review_translation_for_language_codes)
self.can_review_voiceover_for_language_codes = (
can_review_voiceover_for_language_codes)
self.can_review_questions = can_review_questions
self.can_submit_questions = can_submit_questions
def can_review_at_least_one_item(self):
"""Checks whether user has rights to review at least one item.
Returns:
boolean. Whether user has rights to review at east one item.
"""
return (
self.can_review_translation_for_language_codes or
self.can_review_voiceover_for_language_codes or
self.can_review_questions)
def validate(self):
"""Validates different attributes of the class."""
if not isinstance(self.can_review_translation_for_language_codes, list):
raise utils.ValidationError(
'Expected can_review_translation_for_language_codes to be a '
'list, found: %s' % type(
self.can_review_translation_for_language_codes))
for language_code in self.can_review_translation_for_language_codes:
if not utils.is_supported_audio_language_code(language_code):
raise utils.ValidationError('Invalid language_code: %s' % (
language_code))
if len(self.can_review_translation_for_language_codes) != len(set(
self.can_review_translation_for_language_codes)):
raise utils.ValidationError(
'Expected can_review_translation_for_language_codes list not '
'to have duplicate values, found: %s' % (
self.can_review_translation_for_language_codes))
if not isinstance(self.can_review_voiceover_for_language_codes, list):
raise utils.ValidationError(
'Expected can_review_voiceover_for_language_codes to be a '
'list, found: %s' % type(
self.can_review_voiceover_for_language_codes))
for language_code in self.can_review_voiceover_for_language_codes:
if not utils.is_supported_audio_language_code(language_code):
raise utils.ValidationError('Invalid language_code: %s' % (
language_code))
if len(self.can_review_voiceover_for_language_codes) != len(set(
self.can_review_voiceover_for_language_codes)):
raise utils.ValidationError(
'Expected can_review_voiceover_for_language_codes list not to '
'have duplicate values, found: %s' % (
self.can_review_voiceover_for_language_codes))
if not isinstance(self.can_review_questions, bool):
raise utils.ValidationError(
'Expected can_review_questions to be a boolean value, '
'found: %s' % type(self.can_review_questions))
if not isinstance(self.can_submit_questions, bool):
raise utils.ValidationError(
'Expected can_submit_questions to be a boolean value, '
'found: %s' % type(self.can_submit_questions))
class ModifiableUserData:
"""Domain object to represent the new values in a UserSettingsModel change
submitted by the Android client.
"""
def __init__(
self, display_alias, pin, preferred_language_codes,
preferred_site_language_code, preferred_audio_language_code,
user_id=None):
"""Constructs a ModifiableUserData domain object.
Args:
display_alias: str. Display alias of the user shown on Android.
pin: str or None. PIN of the user used for PIN based authentication
on Android. None if it hasn't been set till now.
preferred_language_codes: list(str) or None. Exploration language
preferences specified by the user.
preferred_site_language_code: str or None. System language
preference.
preferred_audio_language_code: str or None. Audio language
preference.
user_id: str or None. User ID of the user whose data is being
updated. None if request did not have a user_id for the user
yet and expects the backend to create a new user entry for it.
"""
self.display_alias = display_alias
self.pin = pin
self.preferred_language_codes = preferred_language_codes
self.preferred_site_language_code = preferred_site_language_code
self.preferred_audio_language_code = preferred_audio_language_code
# The user_id is not intended to be a modifiable attribute, it is just
# needed to identify the object.
self.user_id = user_id
@classmethod
def from_dict(cls, modifiable_user_data_dict):
"""Return a ModifiableUserData domain object from a dict.
Args:
modifiable_user_data_dict: dict. The dict representation of
ModifiableUserData object.
Returns:
ModifiableUserData. The corresponding ModifiableUserData domain
object.
"""
return ModifiableUserData(
modifiable_user_data_dict['display_alias'],
modifiable_user_data_dict['pin'],
modifiable_user_data_dict['preferred_language_codes'],
modifiable_user_data_dict['preferred_site_language_code'],
modifiable_user_data_dict['preferred_audio_language_code'],
modifiable_user_data_dict['user_id'],
)
CURRENT_SCHEMA_VERSION = 1
@classmethod
def from_raw_dict(cls, raw_user_data_dict):
"""Converts the raw_user_data_dict into a ModifiableUserData domain
object by converting it according to the latest schema format.
Args:
raw_user_data_dict: dict. The input raw form of user_data dict
coming from the controller layer, which has to be converted.
Returns:
ModifiableUserData. The domain object representing the user data
dict transformed according to the latest schema version.
Raises:
Exception. No schema version specified.
Exception. Schema version is not of type int.
Exception. Invalid schema version.
"""
data_schema_version = raw_user_data_dict.get('schema_version')
if data_schema_version is None:
raise Exception(
'Invalid modifiable user data: no schema version specified.')
if not isinstance(data_schema_version, int):
raise Exception(
'Version has invalid type, expected int, '
'received %s' % type(data_schema_version)
)
if (
not isinstance(data_schema_version, int) or
data_schema_version < 1 or
data_schema_version > cls.CURRENT_SCHEMA_VERSION
):
raise Exception(
'Invalid version %s received. At present we can only process v1'
' to v%s modifiable user data.' % (
data_schema_version, cls.CURRENT_SCHEMA_VERSION)
)
return cls.from_dict(raw_user_data_dict)
| |
import json
import mimetypes
import os
from flask import send_from_directory, Response, request, render_template
from flask_login import current_user
from lmda import app, start_last_modified, db
from lmda.models import Thumbnail, Authority, File
from lmda.views import paste
mimetypes.add_type('video/webm', 'webm')
class JsonResponse:
def __init__(self):
self.errors = []
class PastUpload:
def __init__(self, id, name, local_name, extension, has_thumb):
self.id = id
self.name = name
self.local_name = local_name
self.extension = extension
self.has_thumb = has_thumb
class ReturnThumbnail:
def __init__(self, url, parent, width, height):
self.url = url
self.width = width
self.height = height
self.parent = parent
class ResponseEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (list, dict, str, int, float, bool, type(None))):
return json.JSONEncoder.default(self, obj)
values = obj.__dict__
return values
@app.route('/file/<name>', methods=['DELETE'])
def delete_file(name):
api_key = request.form.get('api_key')
# TODO delete pastes too
response = JsonResponse()
user = current_user
if user.is_anonymous and api_key is not None:
from lmda.models import User
user = User.by_api_key(api_key)
if user is None or user.is_anonymous:
response.errors = ['Not signed in']
return Response(json.dumps(response, cls=ResponseEncoder), status=400, mimetype='application/json')
file = File.by_name(name)
if file is None:
response.errors = ["File doesn't exist"]
return Response(json.dumps(response, cls=ResponseEncoder), status=400, mimetype='application/json')
if file.owner is not user.id:
response.errors = ['Not your file']
return Response(json.dumps(response, cls=ResponseEncoder), status=400, mimetype='application/json')
filename = file.name + '.' + file.extension
os.remove(os.getcwd() + '/' + app.config['UPLOAD_FOLDER'] + '/' + filename)
db.session.delete(file)
db.session.commit()
return Response(json.dumps(response, cls=ResponseEncoder), mimetype='application/json')
@app.route('/<name>', methods=['GET'])
def view_image(name):
path = os.path.join(app.config['UPLOAD_FOLDER'], name)
if '.' not in name:
for extension in app.config['ALLOWED_TYPES']:
if os.path.isfile(path + '.' + extension): # file exists
if 'If-Modified-Since' in request.headers:
# TODO check file mod time
return Response(status=304)
return send_from_directory(os.getcwd() + '/' + app.config['UPLOAD_FOLDER'], name + '.' + extension,
mimetype=mimetypes.types_map.get('.' + extension, 'application/octet-stream'))
elif os.path.isfile(os.getcwd() + '/' + path):
if 'If-Modified-Since' in request.headers:
# TODO check file mod time
return Response(status=304)
filename, file_extension = os.path.splitext(path)
return send_from_directory(os.getcwd() + '/' + app.config['UPLOAD_FOLDER'], name,
mimetype=mimetypes.types_map.get(file_extension, 'application/octet-stream'))
return paste.view_paste(name)
@app.route('/api/file/thumbnails/<name>')
def get_thumbnails(name):
response = JsonResponse()
response.thumbnails = []
for t in Thumbnail.by_parent(name):
response.thumbnails.append(ReturnThumbnail(t.url, t.parent_name, t.width, t.height))
return Response(json.dumps(response, cls=ResponseEncoder), mimetype='application/json')
@app.route('/api/admin/uploads')
def get_admin_uploads():
api_key = request.form.get('api_key')
n = int(request.args.get('n', 10))
page_num = int(request.args.get('page', 1))
searchText = request.args.get('nameContains', None)
owner_username = request.args.get('ownerUsername')
n = max(min(n, 50), 1) # Clamp n between 1 and 50
user = current_user
if user.is_anonymous and api_key is not None:
from lmda.models import User
user = User.by_api_key(api_key)
response = JsonResponse()
if user is None or user.is_anonymous:
response.errors = ['Not signed in']
return Response(json.dumps(response, cls=ResponseEncoder), status=400, mimetype='application/json')
authority = Authority.by_user_id(user.id)
if authority is None:
response.errors = ['No authority']
return Response(json.dumps(response, cls=ResponseEncoder), status=400, mimetype='application/json')
from lmda.models import File, User
query = File.query
if owner_username is not None:
target_owner = User.by_name(owner_username)
if target_owner is not None:
query = query.filter(File.owner == target_owner.id)
if searchText is not None:
query = query.filter(File.local_name.ilike('%' + searchText + '%'))
query = query.order_by(File.id.desc())
pagination = query.paginate(page=page_num, per_page=n)
files = []
for f in pagination.items:
pu = PastUpload(f.id, f.name, f.local_name, f.extension, f.has_thumbnail)
files.append(pu)
response.files = files
response.number_pages = pagination.pages
return Response(json.dumps(response, cls=ResponseEncoder), mimetype='application/json')
@app.route('/api/user/uploads')
def get_past_uploads():
api_key = request.form.get('api_key')
n = int(request.args.get('n', 10))
page_num = int(request.args.get('page', 1))
searchText = request.args.get('nameContains', None)
n = max(min(n, 50), 1) # Clamp n between 1 and 50
user = current_user
if user.is_anonymous and api_key is not None:
from lmda.models import User
user = User.by_api_key(api_key)
response = JsonResponse()
if user is None:
response.errors = ['Not signed in']
return Response(json.dumps(response, cls=ResponseEncoder), status=400, mimetype='application/json')
if not current_user.is_anonymous:
from lmda.models import File
query = File.query.filter(File.owner == user.id).order_by(File.id.desc())
if searchText is not None:
query = query.filter(File.local_name.ilike('%' + searchText + '%'))
pagination = query.paginate(page=page_num, per_page=n)
files = []
for f in pagination.items:
pu = PastUpload(f.id, f.name, f.local_name, f.extension, f.has_thumbnail)
files.append(pu)
response.files = files
response.number_pages = pagination.pages
return Response(json.dumps(response, cls=ResponseEncoder), mimetype='application/json')
else:
response.errors = ['Not signed in']
return Response(json.dumps(response, cls=ResponseEncoder), status=400, mimetype='application/json')
@app.route('/user/uploads')
def view_past_uploads():
if 'If-Modified-Since' in request.headers:
if request.headers['If-Modified-Since'] == start_last_modified:
return Response(status=304)
response = Response(render_template('pastUploads.html'))
response.headers['Last-Modified'] = start_last_modified
return response
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.