source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
ddp.py | # Copyright 2021 MosaicML. All Rights Reserved.
from __future__ import annotations
import collections.abc
import logging
import os
import subprocess
import sys
import time
import warnings
from abc import ABC, abstractmethod
from dataclasses import dataclass
from threading import Thread
from typing import Callable, Iterator, List, Optional, Sequence, TypeVar, cast
import torch
import torch.distributed
import torch.utils.data
import yahp as hp
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data.distributed import DistributedSampler
from composer.core.state import State
from composer.core.types import Batch, DataLoader, Model, Tensor
from composer.datasets import DataloaderHparams, DataloaderSpec, WrappedDataLoader
logger = logging.getLogger(__name__)
TObj = TypeVar("TObj")
class DataloaderMultipleIterationWarning(Warning):
pass
class DDPDataLoader(WrappedDataLoader):
"""
DDPDataLoader wraps a dataloader and a distributed sampler to ensure that
sampler.set_epoch() is called after each iteration (epoch) through the dataset
See https://pytorch.org/docs/stable/data.html#torch.utils.data.distributed.DistributedSampler
"""
def __init__(self, dataloader: DataLoader) -> None:
super().__init__(dataloader)
if not isinstance(self.dataloader.sampler, DistributedSampler):
raise ValueError("When using the DDP data loader, the sampler must be a DistributedSampler")
self._iterator: Optional[Iterator[Batch]] = None
def __iter__(self) -> DDPDataLoader:
if self._iterator is not None:
warnings.warn(
"The dataloader detected the start of a new iteration before the previous iteration finished. "
"The dataloader is skipping ahead to the start of the next epoch. "
"Multiple simultaneous iterations through the DDP dataloader prohibited, since "
"it automatically tracks the current epoch.",
category=DataloaderMultipleIterationWarning)
assert isinstance(self.sampler, DistributedSampler)
self.sampler.set_epoch(epoch=self.sampler.epoch + 1)
self._iterator = iter(self.dataloader)
return self
def __next__(self) -> Batch:
assert self._iterator is not None
try:
return next(self._iterator)
except StopIteration:
self._iterator = None
assert isinstance(self.sampler, DistributedSampler)
self.sampler.set_epoch(epoch=self.sampler.epoch + 1)
raise
class DDP:
def __init__(self,
*,
nproc_per_node: int,
store_hparams: StoreHparams,
node_rank: int,
num_nodes: int,
backend: str,
fork_rank_0: bool,
find_unused_parameters: bool = False):
self.nproc_per_node = nproc_per_node
self.world_size = num_nodes * nproc_per_node
self.num_nodes = num_nodes
self.node_rank = node_rank
self.store_hparams = store_hparams
self.last_return_code: Optional[int] = None
self.backend = backend
self.fork_rank_0 = fork_rank_0
self.processes: List[subprocess.Popen[str]] = []
self.find_unused_parameters = find_unused_parameters
if backend == 'nccl':
if not torch.cuda.is_available():
raise ValueError('CUDA not available but gpu backend requested.')
if torch.cuda.device_count() < nproc_per_node:
raise ValueError(f'Requested {nproc_per_node} GPUs, but '\
f'only {torch.cuda.device_count()} available.')
if not torch.distributed.is_nccl_available():
raise ValueError('Requested NCCL backend not available in torch.distributed')
def barrier(self) -> None:
if torch.distributed.is_available():
torch.distributed.barrier()
# If not on DDP, then do nothing
def all_reduce(
self,
tensor: torch.Tensor,
reduce_operation: str = "SUM",
) -> None:
if torch.distributed.is_available():
reduce_op = getattr(torch.distributed.ReduceOp, reduce_operation.upper())
torch.distributed.all_reduce(tensor, op=reduce_op)
else:
raise NotImplementedError("Non-DDP versions of reduce operations are not yet implemented")
def all_gather(self, tensor: torch.Tensor) -> Sequence[Tensor]:
"""gather_to_rank_zero collects a tensor from each rank, and returns a sequence of tensors indexed by rank
Args:
tensor (torch.Tensor): tensor from each rank to be gathered
Returns:
Sequence[Tensor]: A sequence of tensors indexed by rank
"""
if torch.distributed.is_available():
obj_gather_list = [torch.zeros_like(tensor) for _ in range(self.world_size)]
torch.distributed.all_gather(obj_gather_list, tensor)
return obj_gather_list
else:
return [tensor]
def all_gather_object(self, obj: TObj) -> List[TObj]:
"""gather_object_to_rank_zero collects a pickleable object from each rank, and returns a list of
these objects indexed by rank
Args:
obj (TObj): Object to be gathered
Returns:
List[TObj]: A list of objects indexed by rank
"""
if torch.distributed.is_available():
obj_gather_list = [None for _ in range(self.world_size)]
torch.distributed.all_gather_object(obj_gather_list, obj)
# torch.distributed will replace the None's in obj_gather_list with the gathered objects on rank 0
# or will just be None on non-rank-0
return cast(List[TObj], obj_gather_list)
else:
return [obj]
def launch(self, state: State, loop: Callable[[], None]):
if os.environ.get("RANK") is None:
os.environ["WORLD_SIZE"] = str(self.world_size)
logger.info("Starting DDP on node_rank(%d) with world_size(%d)", self.node_rank, self.world_size)
if torch.distributed.is_available():
# Adapted from torch.distributed.launch
# set PyTorch distributed related environmental variables
current_env = os.environ.copy()
# TODO omp num threads -- this parameter needs to be auto-tuned
for local_rank in range(self.nproc_per_node):
# each process's rank
global_rank = self.nproc_per_node * self.node_rank + local_rank
current_env["RANK"] = str(global_rank)
if local_rank == 0 and not self.fork_rank_0:
os.environ["RANK"] = str(global_rank)
else:
logger.info("Launching process for global_rank(%d) on node_rank(%d)", global_rank,
self.node_rank)
# spawn the processes
cmd = [
sys.executable,
"-u",
*sys.argv,
]
if local_rank == 0:
# Attaching rank 0 to the main stdout/stderr so interactive
# terminal output will work without issue (e.g. tqdm)
process = subprocess.Popen(cmd, env=current_env, text=True)
else:
# Other processes, except in the case of an error, should not print anything
process = subprocess.Popen(
cmd,
env=current_env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
)
self.processes.append(process)
if self.fork_rank_0:
self.monitor()
return
else:
Thread(target=self.monitor, daemon=True).start()
else:
if self.world_size != 1:
raise ValueError("Must have world size == 1 when torch.distributed is not available")
if self.node_rank != 0:
raise ValueError("Must have a node_rank == 0 when torch.distributed is not available")
os.environ["RANK"] = "0"
# We are now on the correct process
global_rank = int(os.environ["RANK"])
assert global_rank // self.world_size == self.node_rank
assert os.environ["WORLD_SIZE"] == str(
self.world_size
), f"os.environ['WORLD_SIZE']({os.environ['WORLD_SIZE']}) != self.world_size({self.world_size})"
is_main = global_rank == 0
if torch.distributed.is_available():
logger.info("Initializing ddp: GLOBAL_RANK: %s, WORLD_SIZE: %s", global_rank, self.world_size)
store = self.store_hparams.initialize_object(is_main, state.world_size)
torch.distributed.init_process_group(self.backend,
rank=global_rank,
world_size=self.world_size,
store=store)
assert torch.distributed.is_initialized()
assert state.is_rank_set, "state.is_rank_set should be set after torch.distributed is initialized"
assert state.local_rank == global_rank % self.nproc_per_node, "state.local_rank is incorrect"
assert state.nproc_per_node == self.nproc_per_node, "state.nproc_per_node is incorrect"
assert state.global_rank == torch.distributed.get_rank(
), "state.global_rank != torch.distributed.get_rank()"
logger.info("All DDP processes registered. world_size=%s.", self.world_size)
logger.info("Starting process with global_rank=%s", global_rank)
try:
loop()
finally:
self.cleanup()
def prepare_module(self, module: Model) -> Model:
if torch.distributed.is_available():
if any((p.requires_grad for p in module.parameters())):
ddp_model = DistributedDataParallel(module, find_unused_parameters=self.find_unused_parameters)
return cast(Model, ddp_model)
return module
else:
return module
def create_dataloader(self, batch_size: int, dataloader_hparams: DataloaderHparams,
dataloader_spec: DataloaderSpec) -> DataLoader:
if torch.distributed.is_available():
sampler = torch.utils.data.DistributedSampler[int](dataloader_spec.dataset,
drop_last=dataloader_spec.drop_last,
shuffle=dataloader_spec.shuffle)
else:
assert isinstance(dataloader_spec.dataset, collections.abc.Sized)
sampler = torch.utils.data.RandomSampler(dataloader_spec.dataset, generator=dataloader_spec.generator)
dataloader = dataloader_hparams.initialize_object(batch_size, sampler, dataloader_spec)
if torch.distributed.is_available():
dataloader = DDPDataLoader(dataloader)
return dataloader
def monitor(self) -> None:
# Monitor checks whether any subprocesses have died unexpectedly
alive_processes = set(self.processes)
while len(alive_processes) > 0:
finished_processes: List[subprocess.Popen[str]] = []
for process in alive_processes:
if process.poll() is None:
# the process is still running
continue
else:
if process.returncode != 0:
if process.stdout is None:
output = ""
else:
output = process.stdout.read()
if process.stderr is None:
stderr = ""
else:
stderr = process.stderr.read()
exc = subprocess.CalledProcessError(
process.returncode,
cmd=process.args,
output=output,
stderr=stderr,
)
if self.fork_rank_0:
raise exc
else:
logger.exception("Error in subprocess", exc_info=exc)
sys.exit(1)
else:
# exited cleanly
finished_processes.append(process)
alive_processes = set(alive_processes) - set(finished_processes)
time.sleep(1)
def cleanup(self) -> None:
for process in self.processes:
logger.info("Killing subprocess %s", process.pid)
try:
process.kill()
except Exception:
pass
if torch.distributed.is_initialized():
torch.distributed.destroy_process_group()
@dataclass
class StoreHparams(hp.Hparams, ABC):
@abstractmethod
def initialize_object(self, is_main: bool, world_size: int) -> torch.distributed.Store:
pass
@dataclass
class TCPStoreHparams(StoreHparams):
host_name: str = hp.optional(doc="Rank 0 address", default="127.0.0.1")
port: int = hp.optional(doc="Rank 0 port", default=43297)
def initialize_object(self, is_main: bool, world_size: int) -> torch.distributed.Store:
return torch.distributed.TCPStore(self.host_name, self.port, world_size, is_main)
@dataclass
class FileStoreHparams(StoreHparams):
file_name: str = hp.required(doc="Path to store file")
def initialize_object(self, is_main: bool, world_size: int) -> torch.distributed.Store:
return torch.distributed.FileStore(self.file_name, world_size)
@dataclass
class DDPHparams(hp.Hparams):
hparams_registry = {
"store": {
"tcp": TCPStoreHparams,
"file": FileStoreHparams,
}
}
store: StoreHparams = hp.optional(doc="Store", default_factory=TCPStoreHparams)
node_rank: int = hp.optional(doc="Node ID for multi-node training", default=0)
num_nodes: int = hp.optional(doc="Number of nodes used for training", default=1)
fork_rank_0: bool = hp.optional(
doc="Whether to fork the local rank 0 process, or use the existing process for rank 0 training.",
default=False,
)
def initialize_object(self, nproc_per_node: int, backend: str, find_unused_parameters: bool) -> DDP:
return DDP(
backend=backend,
nproc_per_node=nproc_per_node,
store_hparams=self.store,
node_rank=self.node_rank,
num_nodes=self.num_nodes,
fork_rank_0=self.fork_rank_0,
find_unused_parameters=find_unused_parameters,
)
|
test_sys.py | import builtins
import codecs
import gc
import locale
import operator
import os
import struct
import subprocess
import sys
import sysconfig
import test.support
from test import support
from test.support import os_helper
from test.support.script_helper import assert_python_ok, assert_python_failure
from test.support import threading_helper
import textwrap
import unittest
import warnings
# count the number of test runs, used to create unique
# strings to intern in test_intern()
INTERN_NUMRUNS = 0
class DisplayHookTest(unittest.TestCase):
def test_original_displayhook(self):
dh = sys.__displayhook__
with support.captured_stdout() as out:
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del builtins._
with support.captured_stdout() as out:
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(builtins, "_"))
# sys.displayhook() requires arguments
self.assertRaises(TypeError, dh)
stdout = sys.stdout
try:
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
finally:
sys.stdout = stdout
def test_lost_displayhook(self):
displayhook = sys.displayhook
try:
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
finally:
sys.displayhook = displayhook
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
with support.swap_attr(sys, 'displayhook', baddisplayhook):
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
class ExceptHookTest(unittest.TestCase):
def test_original_excepthook(self):
try:
raise ValueError(42)
except ValueError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
self.assertRaises(TypeError, sys.__excepthook__)
def test_excepthook_bytes_filename(self):
# bpo-37467: sys.excepthook() must not crash if a filename
# is a bytes string
with warnings.catch_warnings():
warnings.simplefilter('ignore', BytesWarning)
try:
raise SyntaxError("msg", (b"bytes_filename", 123, 0, "text"))
except SyntaxError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
err = err.getvalue()
self.assertIn(""" File "b'bytes_filename'", line 123\n""", err)
self.assertIn(""" text\n""", err)
self.assertTrue(err.endswith("SyntaxError: msg\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
class SysModuleTest(unittest.TestCase):
def tearDown(self):
test.support.reap_children()
def test_exit(self):
# call with two arguments
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
with self.assertRaises(SystemExit) as cm:
sys.exit()
self.assertIsNone(cm.exception.code)
rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()')
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# call with integer argument
with self.assertRaises(SystemExit) as cm:
sys.exit(42)
self.assertEqual(cm.exception.code, 42)
# call with tuple argument with one entry
# entry will be unpacked
with self.assertRaises(SystemExit) as cm:
sys.exit((42,))
self.assertEqual(cm.exception.code, 42)
# call with string argument
with self.assertRaises(SystemExit) as cm:
sys.exit("exit")
self.assertEqual(cm.exception.code, "exit")
# call with tuple argument with two entries
with self.assertRaises(SystemExit) as cm:
sys.exit((17, 23))
self.assertEqual(cm.exception.code, (17, 23))
# test that the exit machinery handles SystemExits properly
rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)')
self.assertEqual(rc, 47)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def check_exit_message(code, expected, **env_vars):
rc, out, err = assert_python_failure('-c', code, **env_vars)
self.assertEqual(rc, 1)
self.assertEqual(out, b'')
self.assertTrue(err.startswith(expected),
"%s doesn't start with %s" % (ascii(err), ascii(expected)))
# test that stderr buffer is flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the exit message is written with backslashreplace error
# handler to stderr
check_exit_message(
r'import sys; sys.exit("surrogates:\uDCFF")',
b"surrogates:\\udcff")
# test that the unicode message is encoded to the stderr encoding
# instead of the default encoding (utf8)
check_exit_message(
r'import sys; sys.exit("h\xe9")',
b"h\xe9", PYTHONIOENCODING='latin-1')
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, "a")
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
# sanity check
self.assertTrue(orig < 0.5, orig)
try:
for n in 0.00001, 0.05, 3.0, orig:
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_recovery(self):
if hasattr(sys, 'gettrace') and sys.gettrace():
self.skipTest('fatal error if run with a trace function')
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for depth in (10, 25, 50, 75, 100, 250, 1000):
try:
sys.setrecursionlimit(depth)
except RecursionError:
# Issue #25274: The recursion limit is too low at the
# current recursion depth
continue
# Issue #5392: test stack overflow after hitting recursion
# limit twice
self.assertRaises(RecursionError, f)
self.assertRaises(RecursionError, f)
finally:
sys.setrecursionlimit(oldlimit)
@test.support.cpython_only
def test_setrecursionlimit_recursion_depth(self):
# Issue #25274: Setting a low recursion limit must be blocked if the
# current recursion depth is already higher than the "lower-water
# mark". Otherwise, it may not be possible anymore to
# reset the overflowed flag to 0.
from _testinternalcapi import get_recursion_depth
def set_recursion_limit_at_depth(depth, limit):
recursion_depth = get_recursion_depth()
if recursion_depth >= depth:
with self.assertRaises(RecursionError) as cm:
sys.setrecursionlimit(limit)
self.assertRegex(str(cm.exception),
"cannot set the recursion limit to [0-9]+ "
"at the recursion depth [0-9]+: "
"the limit is too low")
else:
set_recursion_limit_at_depth(depth, limit)
oldlimit = sys.getrecursionlimit()
try:
sys.setrecursionlimit(1000)
for limit in (10, 25, 50, 75, 100, 150, 200):
# formula extracted from _Py_RecursionLimitLowerWaterMark()
if limit > 200:
depth = limit - 50
else:
depth = limit * 3 // 4
set_recursion_limit_at_depth(depth, limit)
finally:
sys.setrecursionlimit(oldlimit)
# The error message is specific to CPython
@test.support.cpython_only
def test_recursionlimit_fatalerror(self):
# A fatal error occurs if a second recursion limit is hit when recovering
# from a first one.
code = textwrap.dedent("""
import sys
def f():
try:
f()
except RecursionError:
f()
sys.setrecursionlimit(%d)
f()""")
with test.support.SuppressCrashReport():
for i in (50, 1000):
sub = subprocess.Popen([sys.executable, '-c', code % i],
stderr=subprocess.PIPE)
err = sub.communicate()[1]
self.assertTrue(sub.returncode, sub.returncode)
self.assertIn(
b"Fatal Python error: _Py_CheckRecursiveCall: "
b"Cannot recover from stack overflow",
err)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
@unittest.skipUnless(hasattr(sys, "setdlopenflags"),
'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
@test.support.refcount_test
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
@threading_helper.reap_threads
def test_current_frames(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
for tid in d:
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
@threading_helper.reap_threads
def test_current_exceptions(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
while True:
try:
raise ValueError("oops")
except ValueError:
if leave_g.wait(timeout=support.LONG_TIMEOUT):
break
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_exceptions()
for tid in d:
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
self.assertEqual((None, None, None), d.pop(main_id))
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
exc_type, exc_value, exc_tb = d.pop(thread_id)
stack = traceback.extract_stack(exc_tb.tb_frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertTrue(sourceline.startswith("if leave_g.wait("))
# Reap the spawned thread.
leave_g.set()
t.join()
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
for arg in sys.argv:
self.assertIsInstance(arg, str)
self.assertIsInstance(sys.orig_argv, list)
for arg in sys.orig_argv:
self.assertIsInstance(arg, str)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.base_exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 9)
self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width)
# sys.hash_info.modulus should be a prime; we do a quick
# probable primality test (doesn't exclude the possibility of
# a Carmichael number)
for x in range(1, 100):
self.assertEqual(
pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus),
1,
"sys.hash_info.modulus {} is a non-prime".format(
sys.hash_info.modulus)
)
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
algo = sysconfig.get_config_var("Py_HASH_ALGORITHM")
if sys.hash_info.algorithm in {"fnv", "siphash24"}:
self.assertIn(sys.hash_info.hash_bits, {32, 64})
self.assertIn(sys.hash_info.seed_bits, {32, 64, 128})
if algo == 1:
self.assertEqual(sys.hash_info.algorithm, "siphash24")
elif algo == 2:
self.assertEqual(sys.hash_info.algorithm, "fnv")
else:
self.assertIn(sys.hash_info.algorithm, {"fnv", "siphash24"})
else:
# PY_HASH_EXTERNAL
self.assertEqual(algo, 0)
self.assertGreaterEqual(sys.hash_info.cutoff, 0)
self.assertLess(sys.hash_info.cutoff, 8)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertEqual(sys.maxunicode, 0x10FFFF)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.base_prefix, str)
self.assertIsInstance(sys.platlibdir, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
def test_thread_info(self):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'pthread', 'solaris', None))
self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global INTERN_NUMRUNS
INTERN_NUMRUNS += 1
self.assertRaises(TypeError, sys.intern)
s = "never interned before" + str(INTERN_NUMRUNS)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug",
"inspect", "interactive", "optimize",
"dont_write_bytecode", "no_user_site", "no_site",
"ignore_environment", "verbose", "bytes_warning", "quiet",
"hash_randomization", "isolated", "dev_mode", "utf8_mode")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
attr_type = bool if attr == "dev_mode" else int
self.assertEqual(type(getattr(sys.flags, attr)), attr_type, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
self.assertIn(sys.flags.utf8_mode, {0, 1, 2})
def assert_raise_on_new_sys_type(self, sys_attr):
# Users are intentionally prevented from creating new instances of
# sys.flags, sys.version_info, and sys.getwindowsversion.
attr_type = type(sys_attr)
with self.assertRaises(TypeError):
attr_type()
with self.assertRaises(TypeError):
attr_type.__new__(attr_type)
def test_sys_flags_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.flags)
def test_sys_version_info_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.version_info)
def test_sys_getwindowsversion_no_instantiation(self):
# Skip if not being run on Windows.
test.support.get_attribute(sys, "getwindowsversion")
self.assert_raise_on_new_sys_type(sys.getwindowsversion())
@test.support.cpython_only
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
expected = ("\xa2" + os.linesep).encode("cp424")
self.assertEqual(out, expected)
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
env["PYTHONIOENCODING"] = "ascii"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = "ascii:"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = ":surrogateescape"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xdcbd))'],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'\xbd')
@unittest.skipUnless(os_helper.FS_NONASCII,
'requires OS support of non-ASCII encodings')
@unittest.skipUnless(sys.getfilesystemencoding() == locale.getpreferredencoding(False),
'requires FS encoding to match locale')
def test_ioencoding_nonascii(self):
env = dict(os.environ)
env["PYTHONIOENCODING"] = ""
p = subprocess.Popen([sys.executable, "-c",
'print(%a)' % os_helper.FS_NONASCII],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, os.fsencode(os_helper.FS_NONASCII))
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to a non existent program name and Python is unable to
# retrieve the real program name
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c",
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode("ASCII")
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
def c_locale_get_error_handler(self, locale, isolated=False, encoding=None):
# Force the POSIX locale
env = os.environ.copy()
env["LC_ALL"] = locale
env["PYTHONCOERCECLOCALE"] = "0"
code = '\n'.join((
'import sys',
'def dump(name):',
' std = getattr(sys, name)',
' print("%s: %s" % (name, std.errors))',
'dump("stdin")',
'dump("stdout")',
'dump("stderr")',
))
args = [sys.executable, "-X", "utf8=0", "-c", code]
if isolated:
args.append("-I")
if encoding is not None:
env['PYTHONIOENCODING'] = encoding
else:
env.pop('PYTHONIOENCODING', None)
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
universal_newlines=True)
stdout, stderr = p.communicate()
return stdout
def check_locale_surrogateescape(self, locale):
out = self.c_locale_get_error_handler(locale, isolated=True)
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
# replace the default error handler
out = self.c_locale_get_error_handler(locale, encoding=':ignore')
self.assertEqual(out,
'stdin: ignore\n'
'stdout: ignore\n'
'stderr: backslashreplace\n')
# force the encoding
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1:')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
# have no any effect
out = self.c_locale_get_error_handler(locale, encoding=':')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
def test_c_locale_surrogateescape(self):
self.check_locale_surrogateescape('C')
def test_posix_locale_surrogateescape(self):
self.check_locale_surrogateescape('POSIX')
def test_implementation(self):
# This test applies to all implementations equally.
levels = {'alpha': 0xA, 'beta': 0xB, 'candidate': 0xC, 'final': 0xF}
self.assertTrue(hasattr(sys.implementation, 'name'))
self.assertTrue(hasattr(sys.implementation, 'version'))
self.assertTrue(hasattr(sys.implementation, 'hexversion'))
self.assertTrue(hasattr(sys.implementation, 'cache_tag'))
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
hexversion = (version.major << 24 | version.minor << 16 |
version.micro << 8 | levels[version.releaselevel] << 4 |
version.serial << 0)
self.assertEqual(sys.implementation.hexversion, hexversion)
# PEP 421 requires that .name be lower case.
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
@test.support.cpython_only
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
from test.support.script_helper import assert_python_ok
args = ['-c', 'import sys; sys._debugmallocstats()']
ret, out, err = assert_python_ok(*args)
self.assertIn(b"free PyDictObjects", err)
# The function has no parameter
self.assertRaises(TypeError, sys._debugmallocstats, True)
@unittest.skipUnless(hasattr(sys, "getallocatedblocks"),
"sys.getallocatedblocks unavailable on this build")
def test_getallocatedblocks(self):
try:
import _testcapi
except ImportError:
with_pymalloc = support.with_pymalloc()
else:
try:
alloc_name = _testcapi.pymem_getallocatorsname()
except RuntimeError as exc:
# "cannot get allocators name" (ex: tracemalloc is used)
with_pymalloc = True
else:
with_pymalloc = (alloc_name in ('pymalloc', 'pymalloc_debug'))
# Some sanity checks
a = sys.getallocatedblocks()
self.assertIs(type(a), int)
if with_pymalloc:
self.assertGreater(a, 0)
else:
# When WITH_PYMALLOC isn't available, we don't know anything
# about the underlying implementation: the function might
# return 0 or something greater.
self.assertGreaterEqual(a, 0)
try:
# While we could imagine a Python session where the number of
# multiple buffer objects would exceed the sharing of references,
# it is unlikely to happen in a normal test run.
self.assertLess(a, sys.gettotalrefcount())
except AttributeError:
# gettotalrefcount() not available
pass
gc.collect()
b = sys.getallocatedblocks()
self.assertLessEqual(b, a)
gc.collect()
c = sys.getallocatedblocks()
self.assertIn(c, range(b - 50, b + 50))
def test_is_finalizing(self):
self.assertIs(sys.is_finalizing(), False)
# Don't use the atexit module because _Py_Finalizing is only set
# after calling atexit callbacks
code = """if 1:
import sys
class AtExit:
is_finalizing = sys.is_finalizing
print = print
def __del__(self):
self.print(self.is_finalizing(), flush=True)
# Keep a reference in the __main__ module namespace, so the
# AtExit destructor will be called at Python exit
ref = AtExit()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(stdout.rstrip(), b'True')
def test_issue20602(self):
# sys.flags and sys.float_info were wiped during shutdown.
code = """if 1:
import sys
class A:
def __del__(self, sys=sys):
print(sys.flags)
print(sys.float_info)
a = A()
"""
rc, out, err = assert_python_ok('-c', code)
out = out.splitlines()
self.assertIn(b'sys.flags', out[0])
self.assertIn(b'sys.float_info', out[1])
def test_sys_ignores_cleaning_up_user_data(self):
code = """if 1:
import struct, sys
class C:
def __init__(self):
self.pack = struct.pack
def __del__(self):
self.pack('I', -42)
sys.x = C()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(rc, 0)
self.assertEqual(stdout.rstrip(), b"")
self.assertEqual(stderr.rstrip(), b"")
@unittest.skipUnless(hasattr(sys, 'getandroidapilevel'),
'need sys.getandroidapilevel()')
def test_getandroidapilevel(self):
level = sys.getandroidapilevel()
self.assertIsInstance(level, int)
self.assertGreater(level, 0)
def test_sys_tracebacklimit(self):
code = """if 1:
import sys
def f1():
1 / 0
def f2():
f1()
sys.tracebacklimit = %r
f2()
"""
def check(tracebacklimit, expected):
p = subprocess.Popen([sys.executable, '-c', code % tracebacklimit],
stderr=subprocess.PIPE)
out = p.communicate()[1]
self.assertEqual(out.splitlines(), expected)
traceback = [
b'Traceback (most recent call last):',
b' File "<string>", line 8, in <module>',
b' File "<string>", line 6, in f2',
b' File "<string>", line 4, in f1',
b'ZeroDivisionError: division by zero'
]
check(10, traceback)
check(3, traceback)
check(2, traceback[:1] + traceback[2:])
check(1, traceback[:1] + traceback[3:])
check(0, [traceback[-1]])
check(-1, [traceback[-1]])
check(1<<1000, traceback)
check(-1<<1000, [traceback[-1]])
check(None, traceback)
def test_no_duplicates_in_meta_path(self):
self.assertEqual(len(sys.meta_path), len(set(sys.meta_path)))
@unittest.skipUnless(hasattr(sys, "_enablelegacywindowsfsencoding"),
'needs sys._enablelegacywindowsfsencoding()')
def test__enablelegacywindowsfsencoding(self):
code = ('import sys',
'sys._enablelegacywindowsfsencoding()',
'print(sys.getfilesystemencoding(), sys.getfilesystemencodeerrors())')
rc, out, err = assert_python_ok('-c', '; '.join(code))
out = out.decode('ascii', 'replace').rstrip()
self.assertEqual(out, 'mbcs replace')
def test_orig_argv(self):
code = textwrap.dedent('''
import sys
print(sys.argv)
print(sys.orig_argv)
''')
args = [sys.executable, '-I', '-X', 'utf8', '-c', code, 'arg']
proc = subprocess.run(args, check=True, capture_output=True, text=True)
expected = [
repr(['-c', 'arg']), # sys.argv
repr(args), # sys.orig_argv
]
self.assertEqual(proc.stdout.rstrip().splitlines(), expected,
proc)
@test.support.cpython_only
class UnraisableHookTest(unittest.TestCase):
def write_unraisable_exc(self, exc, err_msg, obj):
import _testcapi
import types
err_msg2 = f"Exception ignored {err_msg}"
try:
_testcapi.write_unraisable_exc(exc, err_msg, obj)
return types.SimpleNamespace(exc_type=type(exc),
exc_value=exc,
exc_traceback=exc.__traceback__,
err_msg=err_msg2,
object=obj)
finally:
# Explicitly break any reference cycle
exc = None
def test_original_unraisablehook(self):
for err_msg in (None, "original hook"):
with self.subTest(err_msg=err_msg):
obj = "an object"
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
self.write_unraisable_exc(ValueError(42), err_msg, obj)
err = stderr.getvalue()
if err_msg is not None:
self.assertIn(f'Exception ignored {err_msg}: {obj!r}\n', err)
else:
self.assertIn(f'Exception ignored in: {obj!r}\n', err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('ValueError: 42\n', err)
def test_original_unraisablehook_err(self):
# bpo-22836: PyErr_WriteUnraisable() should give sensible reports
class BrokenDel:
def __del__(self):
exc = ValueError("del is broken")
# The following line is included in the traceback report:
raise exc
class BrokenStrException(Exception):
def __str__(self):
raise Exception("str() is broken")
class BrokenExceptionDel:
def __del__(self):
exc = BrokenStrException()
# The following line is included in the traceback report:
raise exc
for test_class in (BrokenDel, BrokenExceptionDel):
with self.subTest(test_class):
obj = test_class()
with test.support.captured_stderr() as stderr, \
test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
# Trigger obj.__del__()
del obj
report = stderr.getvalue()
self.assertIn("Exception ignored", report)
self.assertIn(test_class.__del__.__qualname__, report)
self.assertIn("test_sys.py", report)
self.assertIn("raise exc", report)
if test_class is BrokenExceptionDel:
self.assertIn("BrokenStrException", report)
self.assertIn("<exception str() failed>", report)
else:
self.assertIn("ValueError", report)
self.assertIn("del is broken", report)
self.assertTrue(report.endswith("\n"))
def test_original_unraisablehook_wrong_type(self):
exc = ValueError(42)
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
with self.assertRaises(TypeError):
sys.unraisablehook(exc)
def test_custom_unraisablehook(self):
hook_args = None
def hook_func(args):
nonlocal hook_args
hook_args = args
obj = object()
try:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
expected = self.write_unraisable_exc(ValueError(42),
"custom hook", obj)
for attr in "exc_type exc_value exc_traceback err_msg object".split():
self.assertEqual(getattr(hook_args, attr),
getattr(expected, attr),
(hook_args, expected))
finally:
# expected and hook_args contain an exception: break reference cycle
expected = None
hook_args = None
def test_custom_unraisablehook_fail(self):
def hook_func(*args):
raise Exception("hook_func failed")
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
self.write_unraisable_exc(ValueError(42),
"custom hook fail", None)
err = stderr.getvalue()
self.assertIn(f'Exception ignored in sys.unraisablehook: '
f'{hook_func!r}\n',
err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('Exception: hook_func failed\n', err)
@test.support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.int_info.sizeof_digit
import _testinternalcapi
self.gc_headsize = _testinternalcapi.SIZEOF_PYGC_HEAD
check_sizeof = test.support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
vsize = test.support.calcvobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit)
# but lists are
self.assertEqual(sys.getsizeof([]), vsize('Pn') + gc_header_size)
def test_errors(self):
class BadSizeof:
def __sizeof__(self):
raise ValueError
self.assertRaises(ValueError, sys.getsizeof, BadSizeof())
class InvalidSizeof:
def __sizeof__(self):
return None
self.assertRaises(TypeError, sys.getsizeof, InvalidSizeof())
sentinel = ["sentinel"]
self.assertIs(sys.getsizeof(InvalidSizeof(), sentinel), sentinel)
class FloatSizeof:
def __sizeof__(self):
return 4.5
self.assertRaises(TypeError, sys.getsizeof, FloatSizeof())
self.assertIs(sys.getsizeof(FloatSizeof(), sentinel), sentinel)
class OverflowSizeof(int):
def __sizeof__(self):
return int(self)
self.assertEqual(sys.getsizeof(OverflowSizeof(sys.maxsize)),
sys.maxsize + self.gc_headsize)
with self.assertRaises(OverflowError):
sys.getsizeof(OverflowSizeof(sys.maxsize + 1))
with self.assertRaises(ValueError):
sys.getsizeof(OverflowSizeof(-1))
with self.assertRaises((ValueError, OverflowError)):
sys.getsizeof(OverflowSizeof(-sys.maxsize - 1))
def test_default(self):
size = test.support.calcvobjsize
self.assertEqual(sys.getsizeof(True), size('') + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size('') + self.longdigit)
def test_objecttypes(self):
# check all types defined in Objects/
calcsize = struct.calcsize
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# bool
check(True, vsize('') + self.longdigit)
# buffer
# XXX
# builtin_function_or_method
check(len, size('5P'))
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('n2Pi') + x.__alloc__())
# bytearray_iterator
check(iter(bytearray()), size('nP'))
# bytes
check(b'', vsize('n') + 1)
check(b'x' * 10, vsize('n') + 11)
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size('P'))
# code
def check_code_size(a, expected_size):
self.assertGreaterEqual(sys.getsizeof(a), expected_size)
check_code_size(get_cell().__code__, size('6i13P'))
check_code_size(get_cell.__code__, size('6i13P'))
def get_cell2(x):
def inner():
return x
return inner
check_code_size(get_cell2.__code__, size('6i13P') + calcsize('n'))
# complex
check(complex(0,1), size('2d'))
# method_descriptor (descriptor object)
check(str.lower, size('3PPP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size('3PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size('3PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size('3P2P'))
# method-wrapper (descriptor object)
check({}.__iter__, size('2P'))
# empty dict
check({}, size('nQ2P'))
# dict
check({"a": 1}, size('nQ2P') + calcsize('2nP2n') + 8 + (8*2//3)*calcsize('n2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size('nQ2P') + calcsize('2nP2n') + 16 + (16*2//3)*calcsize('n2P'))
# dictionary-keyview
check({}.keys(), size('P'))
# dictionary-valueview
check({}.values(), size('P'))
# dictionary-itemview
check({}.items(), size('P'))
# dictionary iterator
check(iter({}), size('P2nPn'))
# dictionary-keyiterator
check(iter({}.keys()), size('P2nPn'))
# dictionary-valueiterator
check(iter({}.values()), size('P2nPn'))
# dictionary-itemiterator
check(iter({}.items()), size('P2nPn'))
# dictproxy
class C(object): pass
check(C.__dict__, size('P'))
# BaseException
check(BaseException(), size('5Pb'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", b"", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size('5Pb 2P2nP'))
# ellipses
check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
# enumerate
check(enumerate([]), size('n3P'))
# reverse
check(reversed(''), size('nP'))
# float
check(float(0), size('d'))
# sys.floatinfo
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, vsize('4Pi2c4P3ic' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size('13P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size('PP'))
# classmethod
check(bar, size('PP'))
# generator
def get_gen(): yield 1
check(get_gen(), size('P2PPP4P'))
# iterator
check(iter('abc'), size('lP'))
# callable-iterator
import re
check(re.finditer('',''), size('2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(list(sample), vsize('Pn') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size('lP'))
# listreverseiterator (list)
check(reversed([]), size('nP'))
# int
check(0, vsize(''))
check(1, vsize('') + self.longdigit)
check(-1, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.int_info.bits_per_digit
check(int(PyLong_BASE), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# module
check(unittest, size('PnPPP'))
# None
check(None, size(''))
# NotImplementedType
check(NotImplemented, size(''))
# object
check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size('4Pi'))
# PyCapsule
# XXX
# rangeiterator
check(iter(range(1)), size('4l'))
# reverse
check(reversed(''), size('nP'))
# range
check(range(1), size('4P'))
check(range(66000), size('4P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3nP' + PySet_MINSIZE*'nP' + '2nP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*calcsize('nP'))
check(frozenset(sample), s + newsize*calcsize('nP'))
# setiterator
check(iter(set()), size('P3n'))
# slice
check(slice(0), size('3P'))
# super
check(super(int), size('3P'))
# tuple
check((), vsize(''))
check((1,2,3), vsize('') + 3*self.P)
# type
# static type: PyTypeObject
fmt = 'P2nPI13Pl4Pn9Pn11PIPP'
s = vsize(fmt)
check(int, s)
# class
s = vsize(fmt + # PyTypeObject
'4P' # PyAsyncMethods
'36P' # PyNumberMethods
'3P' # PyMappingMethods
'10P' # PySequenceMethods
'2P' # PyBufferProcs
'5P')
class newstyleclass(object): pass
# Separate block for PyDictKeysObject with 8 keys and 5 entries
check(newstyleclass, s + calcsize("2nP2n0P") + 8 + 5*calcsize("n2P"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + 5*self.P)
o = newstyleclass()
o.a = o.b = o.c = o.d = o.e = o.f = o.g = o.h = 1
# Separate block for PyDictKeysObject with 16 keys and 10 entries
check(newstyleclass, s + calcsize("2nP2n0P") + 16 + 10*calcsize("n2P"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + 10*self.P)
# unicode
# each tuple contains a string and its expected character size
# don't put any static strings here, as they may contain
# wchar_t or UTF-8 representations
samples = ['1'*100, '\xff'*50,
'\u0100'*40, '\uffff'*100,
'\U00010000'*30, '\U0010ffff'*100]
asciifields = "nnbP"
compactfields = asciifields + "nPn"
unicodefields = compactfields + "P"
for s in samples:
maxchar = ord(max(s))
if maxchar < 128:
L = size(asciifields) + len(s) + 1
elif maxchar < 256:
L = size(compactfields) + len(s) + 1
elif maxchar < 65536:
L = size(compactfields) + 2*(len(s) + 1)
else:
L = size(compactfields) + 4*(len(s) + 1)
check(s, L)
# verify that the UTF-8 size is accounted for
s = chr(0x4000) # 4 bytes canonical representation
check(s, size(compactfields) + 4)
# compile() will trigger the generation of the UTF-8
# representation as a side effect
compile(s, "<stdin>", "eval")
check(s, size(compactfields) + 4 + 4)
# TODO: add check that forces the presence of wchar_t representation
# TODO: add check that forces layout of unicodefields
# weakref
import weakref
check(weakref.ref(int), size('2Pn2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size('2Pn2P'))
def check_slots(self, obj, base, extra):
expected = sys.getsizeof(base) + struct.calcsize(extra)
if gc.is_tracked(obj) and not gc.is_tracked(base):
expected += self.gc_headsize
self.assertEqual(sys.getsizeof(obj), expected)
def test_slots(self):
# check all subclassable types defined in Objects/ that allow
# non-empty __slots__
check = self.check_slots
class BA(bytearray):
__slots__ = 'a', 'b', 'c'
check(BA(), bytearray(), '3P')
class D(dict):
__slots__ = 'a', 'b', 'c'
check(D(x=[]), {'x': []}, '3P')
class L(list):
__slots__ = 'a', 'b', 'c'
check(L(), [], '3P')
class S(set):
__slots__ = 'a', 'b', 'c'
check(S(), set(), '3P')
class FS(frozenset):
__slots__ = 'a', 'b', 'c'
check(FS(), frozenset(), '3P')
from collections import OrderedDict
class OD(OrderedDict):
__slots__ = 'a', 'b', 'c'
check(OD(x=[]), OrderedDict(x=[]), '3P')
def test_pythontypes(self):
# check all types defined in Python/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size('P'))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb is not None:
check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_asyncgen_hooks(self):
old = sys.get_asyncgen_hooks()
self.assertIsNone(old.firstiter)
self.assertIsNone(old.finalizer)
firstiter = lambda *a: None
sys.set_asyncgen_hooks(firstiter=firstiter)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, None)
self.assertIs(hooks[1], None)
finalizer = lambda *a: None
sys.set_asyncgen_hooks(finalizer=finalizer)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, finalizer)
self.assertIs(hooks[1], finalizer)
sys.set_asyncgen_hooks(*old)
cur = sys.get_asyncgen_hooks()
self.assertIsNone(cur.firstiter)
self.assertIsNone(cur.finalizer)
if __name__ == "__main__":
unittest.main()
|
mp.py | import os
import psutil
import sys
from multiprocessing import Process, Lock, Event as ProcessEvent
from multiprocessing.pool import ThreadPool
from threading import Thread, Event as TrEvent
from time import sleep
from typing import List, Dict
from ..py3_interop import AbstractContextManager
try:
from multiprocessing import SimpleQueue
except ImportError: # noqa
from multiprocessing.queues import SimpleQueue
class SingletonThreadPool(object):
__lock = None
__thread_pool = None
__thread_pool_pid = None
@classmethod
def get(cls):
if os.getpid() != cls.__thread_pool_pid:
cls.__thread_pool = ThreadPool(1)
cls.__thread_pool_pid = os.getpid()
return cls.__thread_pool
class SafeQueue(object):
__thread_pool = SingletonThreadPool()
def __init__(self, *args, **kwargs):
self._q = SimpleQueue(*args, **kwargs)
def empty(self):
return self._q.empty()
def get(self):
return self._q.get()
def put(self, obj):
# make sure the block put is done in the thread pool i.e. in the background
SafeQueue.__thread_pool.get().apply_async(self._q.put, args=(obj, ))
class SafeEvent(object):
__thread_pool = SingletonThreadPool()
def __init__(self):
self._event = ProcessEvent()
def is_set(self):
return self._event.is_set()
def set(self):
if not BackgroundMonitor.is_subprocess_enabled() or BackgroundMonitor.is_subprocess_alive():
self._event.set()
# SafeEvent.__thread_pool.get().apply_async(func=self._event.set, args=())
def clear(self):
return self._event.clear()
def wait(self, timeout=None):
return self._event.wait(timeout=timeout)
class SingletonLock(AbstractContextManager):
_instances = []
def __init__(self):
self._lock = None
SingletonLock._instances.append(self)
def acquire(self, *args, **kwargs):
self.create()
return self._lock.acquire(*args, **kwargs)
def release(self, *args, **kwargs):
if self._lock is None:
return None
return self._lock.release(*args, **kwargs)
def create(self):
if self._lock is None:
self._lock = Lock()
@classmethod
def instantiate(cls):
for i in cls._instances:
i.create()
def __enter__(self):
"""Return `self` upon entering the runtime context."""
self.acquire()
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Raise any exception triggered within the runtime context."""
# Do whatever cleanup.
self.release()
if any((exc_type, exc_value, traceback,)):
raise (exc_type, exc_value, traceback)
class BackgroundMonitor(object):
# If we will need multiple monitoring contexts (i.e. subprocesses) this will become a dict
_main_process = None
_parent_pid = None
_sub_process_started = None
_instances = {} # type: Dict[int, List[BackgroundMonitor]]
def __init__(self, task, wait_period):
self._event = TrEvent()
self._done_ev = TrEvent()
self._start_ev = TrEvent()
self._task_pid = os.getpid()
self._thread = None
self._wait_timeout = wait_period
self._subprocess = None if task.is_main_task() else False
self._task_obj_id = id(task)
def start(self):
if not self._thread:
self._thread = True
self._event.clear()
self._done_ev.clear()
if self._subprocess is False:
# start the thread we are in threading mode.
self._start()
else:
# append to instances
if self not in self._get_instances():
self._get_instances().append(self)
def wait(self, timeout=None):
if not self._thread:
return
self._done_ev.wait(timeout=timeout)
def _start(self):
# if we already started do nothing
if isinstance(self._thread, Thread):
return
self._thread = Thread(target=self._daemon)
self._thread.daemon = True
self._thread.start()
def stop(self):
if not self._thread:
return
if not self.is_subprocess() or self.is_subprocess_alive():
self._event.set()
if isinstance(self._thread, Thread):
try:
self._get_instances().remove(self)
except ValueError:
pass
self._thread = None
def daemon(self):
while True:
if self._event.wait(self._wait_timeout):
break
self._daemon_step()
def _daemon(self):
self._start_ev.set()
self.daemon()
self.post_execution()
def post_execution(self):
self._done_ev.set()
def set_subprocess_mode(self):
# called just before launching the daemon in a subprocess
if not self._subprocess:
self._subprocess = True
if not isinstance(self._done_ev, SafeEvent):
self._done_ev = SafeEvent()
if not isinstance(self._start_ev, SafeEvent):
self._start_ev = SafeEvent()
if not isinstance(self._event, SafeEvent):
self._event = SafeEvent()
def _daemon_step(self):
pass
@classmethod
def start_all(cls, task, wait_for_subprocess=False):
# noinspection PyProtectedMember
execute_in_subprocess = task._report_subprocess_enabled
if not execute_in_subprocess:
for d in BackgroundMonitor._instances.get(id(task), []):
d._start()
elif not BackgroundMonitor._main_process:
cls._parent_pid = os.getpid()
cls._sub_process_started = SafeEvent()
cls._sub_process_started.clear()
# setup
for d in BackgroundMonitor._instances.get(id(task), []):
d.set_subprocess_mode()
BackgroundMonitor._main_process = Process(target=cls._background_process_start, args=(id(task), ))
BackgroundMonitor._main_process.daemon = True
BackgroundMonitor._main_process.start()
# wait until subprocess is up
if wait_for_subprocess:
cls._sub_process_started.wait()
@classmethod
def _background_process_start(cls, task_obj_id):
is_debugger_running = bool(getattr(sys, 'gettrace', None) and sys.gettrace())
# restore original signal, this will prevent any deadlocks
# Do not change the exception we need to catch base exception as well
# noinspection PyBroadException
try:
from ... import Task
# noinspection PyProtectedMember
Task.current_task()._remove_at_exit_callbacks()
except: # noqa
pass
# if a debugger is running, wait for it to attach to the subprocess
if is_debugger_running:
sleep(3)
# launch all the threads
for d in cls._instances.get(task_obj_id, []):
d._start()
if cls._sub_process_started:
cls._sub_process_started.set()
# wait until we are signaled
for i in BackgroundMonitor._instances.get(task_obj_id, []):
# noinspection PyBroadException
try:
if i._thread and i._thread.is_alive():
# DO Not change, we need to catch base exception, if the process gte's killed
try:
i._thread.join()
except: # noqa
break
else:
pass
except: # noqa
pass
# we are done, leave process
return
def is_alive(self):
if self.is_subprocess():
return self.is_subprocess_alive() and self._thread \
and self._start_ev.is_set() and not self._done_ev.is_set()
else:
return isinstance(self._thread, Thread) and self._thread.is_alive()
@classmethod
def is_subprocess_alive(cls):
if not cls._main_process:
return False
# noinspection PyBroadException
try:
return \
cls._main_process.is_alive() and \
psutil.Process(cls._main_process.pid).status() != psutil.STATUS_ZOMBIE
except Exception:
current_pid = cls._main_process.pid
if not current_pid:
return False
try:
parent = psutil.Process(cls._parent_pid)
except psutil.Error:
# could not find parent process id
return
for child in parent.children(recursive=True):
# kill ourselves last (if we need to)
if child.pid == current_pid:
return child.status() != psutil.STATUS_ZOMBIE
return False
def is_subprocess(self):
return self._subprocess is not False and bool(self._main_process)
def _get_instances(self):
return self._instances.setdefault(self._task_obj_id, [])
@classmethod
def is_subprocess_enabled(cls):
return bool(cls._main_process)
|
transaction.py | #!/usr/bin/python3
import functools
import sys
import threading
import time
from enum import IntEnum
from hashlib import sha1
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import black
import requests
from eth_abi import decode_abi
from hexbytes import HexBytes
from web3.exceptions import TimeExhausted, TransactionNotFound
from brownie._config import CONFIG
from brownie.convert import EthAddress, Wei
from brownie.exceptions import RPCRequestError
from brownie.project import build
from brownie.project import main as project_main
from brownie.project.compiler.solidity import SOLIDITY_ERROR_CODES
from brownie.project.sources import highlight_source
from brownie.test import coverage
from brownie.utils import color
from brownie.utils.output import build_tree
from . import state
from .event import EventDict, _decode_logs, _decode_trace
from .web3 import web3
def trace_property(fn: Callable) -> Any:
# attributes that are only available after querying the tranasaction trace
@property # type: ignore
def wrapper(self: "TransactionReceipt") -> Any:
if self.status < 0:
return None
if self._trace_exc is not None:
raise self._trace_exc
try:
return fn(self)
except RPCRequestError as exc:
if web3.supports_traces:
# if the node client supports traces, raise the actual error
raise exc
raise RPCRequestError(
f"Accessing `TransactionReceipt.{fn.__name__}` on a {self.status.name.lower()} "
"transaction requires the `debug_traceTransaction` RPC endpoint, but the node "
"client does not support it or has not made it available."
) from None
return wrapper
def trace_inspection(fn: Callable) -> Any:
def wrapper(self: "TransactionReceipt", *args: Any, **kwargs: Any) -> Any:
if self.contract_address:
raise NotImplementedError(
"Trace inspection methods are not available for deployment transactions."
)
if self.input == "0x" and self.gas_used == 21000:
return None
return fn(self, *args, **kwargs)
functools.update_wrapper(wrapper, fn)
return wrapper
class Status(IntEnum):
Dropped = -2
Pending = -1
Reverted = 0
Confirmed = 1
class TransactionReceipt:
"""Attributes and methods relating to a broadcasted transaction.
* All ether values are given as integers denominated in wei.
* Before the tx has confirmed, most attributes are set to None
* Accessing methods / attributes that query debug_traceTransaction
may be very slow if the transaction involved many steps
Attributes:
contract_name: Name of the contract called in the transaction
fn_name: Name of the method called in the transaction
txid: Transaction ID
sender: Address of the sender
receiver: Address of the receiver
value: Amount transferred
gas_price: Gas price
gas_limit: Gas limit
gas_used: Gas used
input: Hexstring input data
confirmations: The number of blocks since the transaction was confirmed
nonce: Transaction nonce
block_number: Block number this transaction was included in
timestamp: Timestamp of the block this transaction was included in
txindex: Index of the transaction within the mined block
contract_address: Address of contract deployed by the transaction
logs: Raw transaction logs
status: Transaction status: -1 pending, 0 reverted, 1 successful
Additional attributes:
(only available if debug_traceTransaction is enabled in the RPC)
events: Decoded transaction log events
trace: Expanded stack trace from debug_traceTransaction
return_value: Return value(s) from contract call
revert_msg: Error string from reverted contract all
modified_state: Boolean, did this contract write to storage?"""
# these are defined as class attributes to expose them in console completion hints
block_number = None
contract_address: Optional[str] = None
contract_name = None
fn_name = None
gas_used = None
logs = None
nonce = None
sender = None
txid: str
txindex = None
def __init__(
self,
txid: Union[str, bytes],
sender: Any = None,
silent: bool = True,
required_confs: int = 1,
is_blocking: bool = True,
name: str = "",
revert_data: Optional[Tuple] = None,
) -> None:
"""Instantiates a new TransactionReceipt object.
Args:
txid: hexstring transaction ID
sender: sender as a hex string or Account object
required_confs: the number of required confirmations before processing the receipt
is_blocking: if True, creating the object is a blocking action until the required
confirmations are received
silent: toggles console verbosity (default True)
name: contract function being called
revert_data: (revert string, program counter, revert type)
"""
self._silent = silent
if isinstance(txid, bytes):
txid = HexBytes(txid).hex()
if not self._silent:
print(f"\rTransaction sent: {color('bright blue')}{txid}{color}")
# this event is set once the transaction is confirmed or dropped
# it is used to waiting during blocking transaction actions
self._confirmed = threading.Event()
# internal attributes
self._call_cost = 0
self._trace_exc: Optional[Exception] = None
self._trace_origin: Optional[str] = None
self._raw_trace: Optional[List] = None
self._trace: Optional[List] = None
self._events: Optional[EventDict] = None
self._return_value: Any = None
self._revert_msg: Optional[str] = None
self._dev_revert_msg: Optional[str] = None
self._modified_state: Optional[bool] = None
self._new_contracts: Optional[List] = None
self._internal_transfers: Optional[List[Dict]] = None
self._subcalls: Optional[List[Dict]] = None
# attributes that can be set immediately
self.sender = sender
self.status = Status(-1)
self.txid = str(txid)
self.contract_name = None
self.fn_name = name
if name and "." in name:
self.contract_name, self.fn_name = name.split(".", maxsplit=1)
# avoid querying the trace to get the revert string if possible
self._revert_msg, self._revert_pc, revert_type = revert_data or (None, None, None)
if self._revert_msg is None and revert_type not in ("revert", "invalid_opcode"):
self._revert_msg = revert_type
if self._revert_pc is not None:
self._dev_revert_msg = build._get_dev_revert(self._revert_pc) or None
self._await_transaction(required_confs, is_blocking)
def __repr__(self) -> str:
color_str = {-2: "dark white", -1: "bright yellow", 0: "bright red", 1: ""}[self.status]
return f"<Transaction '{color(color_str)}{self.txid}{color}'>"
def __hash__(self) -> int:
return hash(self.txid)
@trace_property
def events(self) -> Optional[EventDict]:
if not self.status and self._events is None:
self._get_trace()
# get events from the trace - handled lazily so that other
# trace operations are not blocked in case of a decoding error
initial_address = str(self.receiver or self.contract_address)
self._events = _decode_trace(self._raw_trace, initial_address) # type: ignore
return self._events
@trace_property
def internal_transfers(self) -> Optional[List]:
if not self.status:
return []
if self._internal_transfers is None:
self._expand_trace()
return self._internal_transfers
@trace_property
def modified_state(self) -> Optional[bool]:
if not self.status:
self._modified_state = False
elif self._modified_state is None:
self._get_trace()
return self._modified_state
@trace_property
def new_contracts(self) -> Optional[List]:
if not self.status:
return []
if self._new_contracts is None:
self._expand_trace()
return self._new_contracts
@trace_property
def return_value(self) -> Optional[str]:
if not self.status:
return None
if self._return_value is None:
self._get_trace()
return self._return_value
@trace_property
def revert_msg(self) -> Optional[str]:
if self.status:
return None
if self._revert_msg is None:
self._get_trace()
elif self.contract_address and self._revert_msg == "out of gas":
self._get_trace()
return self._revert_msg
@trace_property
def dev_revert_msg(self) -> Optional[str]:
if self.status:
return None
if self._dev_revert_msg is None:
self._get_trace()
return self._dev_revert_msg or None
@trace_property
def subcalls(self) -> Optional[List]:
if self._subcalls is None:
self._expand_trace()
return self._subcalls
@trace_property
def trace(self) -> Optional[List]:
if self._trace is None:
self._expand_trace()
return self._trace
@property
def timestamp(self) -> Optional[int]:
if self.status < 0:
return None
return web3.eth.getBlock(self.block_number)["timestamp"]
@property
def confirmations(self) -> int:
if not self.block_number:
return 0
return web3.eth.blockNumber - self.block_number + 1
def replace(
self,
increment: Optional[float] = None,
gas_price: Optional[Wei] = None,
silent: Optional[bool] = None,
) -> "TransactionReceipt":
"""
Rebroadcast this transaction with a higher gas price.
Exactly one of `increment` and `gas_price` must be given.
Arguments
---------
increment : float, optional
Multiplier applied to the gas price of this transaction in order
to determine the new gas price
gas_price : Wei, optional
Absolute gas price to use in the replacement transaction
silent : bool, optional
Toggle console verbosity (default is same setting as this transaction)
Returns
-------
TransactionReceipt
New transaction object
"""
if increment is None and gas_price is None:
raise ValueError("Must give one of `increment` or `gas_price`")
if gas_price is not None and increment is not None:
raise ValueError("Cannot set `increment` and `gas_price` together")
if self.status > -1:
raise ValueError("Transaction has already confirmed")
if increment is not None:
gas_price = Wei(self.gas_price * increment)
if silent is None:
silent = self._silent
sender = self.sender
if isinstance(sender, EthAddress):
# if the transaction wasn't broadcast during this brownie session,
# check if the sender is unlocked - we might be able to replace anyway
from brownie import accounts
if sender in accounts:
sender = accounts.at(sender)
else:
raise ValueError("Sender address not in `accounts`")
return sender.transfer( # type: ignore
self.receiver,
self.value,
gas_limit=self.gas_limit,
gas_price=Wei(gas_price),
data=self.input,
nonce=self.nonce,
required_confs=0,
silent=silent,
)
def wait(self, required_confs: int) -> None:
if required_confs < 1:
return
if self.confirmations > required_confs:
print(f"This transaction already has {self.confirmations} confirmations.")
return
while True:
try:
tx: Dict = web3.eth.getTransaction(self.txid)
break
except TransactionNotFound:
if self.nonce is not None:
sender_nonce = web3.eth.getTransactionCount(str(self.sender))
if sender_nonce > self.nonce:
self.status = Status(-2)
self._confirmed.set()
return
time.sleep(1)
self._await_confirmation(tx["blockNumber"], required_confs)
def _raise_if_reverted(self, exc: Any) -> None:
if self.status or CONFIG.mode == "console":
return
if not web3.supports_traces:
# if traces are not available, do not attempt to determine the revert reason
raise exc or ValueError("Execution reverted")
if self._dev_revert_msg is None:
# no revert message and unable to check dev string - have to get trace
self._expand_trace()
if self.contract_address:
source = ""
elif CONFIG.argv["revert"]:
source = self._traceback_string()
else:
source = self._error_string(1)
raise exc._with_attr(
source=source, revert_msg=self._revert_msg, dev_revert_msg=self._dev_revert_msg
)
def _await_transaction(self, required_confs: int, is_blocking: bool) -> None:
# await tx showing in mempool
while True:
try:
tx: Dict = web3.eth.getTransaction(HexBytes(self.txid))
break
except (TransactionNotFound, ValueError):
if self.sender is None:
# if sender was not explicitly set, this transaction was
# not broadcasted locally and so likely doesn't exist
raise
if self.nonce is not None:
sender_nonce = web3.eth.getTransactionCount(str(self.sender))
if sender_nonce > self.nonce:
self.status = Status(-2)
return
time.sleep(1)
self._set_from_tx(tx)
if not self._silent:
print(
f" Gas price: {color('bright blue')}{self.gas_price / 10 ** 9}{color} gwei"
f" Gas limit: {color('bright blue')}{self.gas_limit}{color}"
f" Nonce: {color('bright blue')}{self.nonce}{color}"
)
# await confirmation of tx in a separate thread which is blocking if
# required_confs > 0 or tx has already confirmed (`blockNumber` != None)
confirm_thread = threading.Thread(
target=self._await_confirmation, args=(tx["blockNumber"], required_confs), daemon=True
)
confirm_thread.start()
if is_blocking and (required_confs > 0 or tx["blockNumber"]):
confirm_thread.join()
def _await_confirmation(self, block_number: int = None, required_confs: int = 1) -> None:
block_number = block_number or self.block_number
if not block_number and not self._silent and required_confs > 0:
if required_confs == 1:
sys.stdout.write("\rWaiting for confirmation... ")
else:
sys.stdout.write(
f"\rRequired confirmations: {color('bright yellow')}0/{required_confs}{color}"
)
sys.stdout.flush()
# await first confirmation
while True:
# if sender nonce is greater than tx nonce, the tx should be confirmed
sender_nonce = web3.eth.getTransactionCount(str(self.sender))
expect_confirmed = bool(sender_nonce > self.nonce) # type: ignore
try:
receipt = web3.eth.waitForTransactionReceipt(
HexBytes(self.txid), timeout=15, poll_latency=1
)
break
except TimeExhausted:
if expect_confirmed:
# if we expected confirmation based on the nonce, tx likely dropped
self.status = Status(-2)
self._confirmed.set()
return
self.block_number = receipt["blockNumber"]
# wait for more confirmations if required and handle uncle blocks
remaining_confs = required_confs
while remaining_confs > 0 and required_confs > 1:
try:
receipt = web3.eth.getTransactionReceipt(self.txid)
self.block_number = receipt["blockNumber"]
except TransactionNotFound:
if not self._silent:
sys.stdout.write(f"\r{color('red')}Transaction was lost...{color}{' ' * 8}")
sys.stdout.flush()
# check if tx is still in mempool, this will raise otherwise
tx = web3.eth.getTransaction(self.txid)
self.block_number = None
return self._await_confirmation(tx["blockNumber"], required_confs)
if required_confs - self.confirmations != remaining_confs:
remaining_confs = required_confs - self.confirmations
if not self._silent:
sys.stdout.write(
f"\rRequired confirmations: {color('bright yellow')}{self.confirmations}/"
f"{required_confs}{color} "
)
if remaining_confs == 0:
sys.stdout.write("\n")
sys.stdout.flush()
if remaining_confs > 0:
time.sleep(1)
self._set_from_receipt(receipt)
# if coverage evaluation is active, evaluate the trace
if (
CONFIG.argv["coverage"]
and not coverage._check_cached(self.coverage_hash)
and self.trace
):
self._expand_trace()
if not self._silent and required_confs > 0:
print(self._confirm_output())
# set the confirmation event and mark other tx's with the same nonce as dropped
self._confirmed.set()
for dropped_tx in state.TxHistory().filter(
sender=self.sender, nonce=self.nonce, key=lambda k: k != self
):
dropped_tx.status = Status(-2)
dropped_tx._confirmed.set()
def _set_from_tx(self, tx: Dict) -> None:
if not self.sender:
self.sender = EthAddress(tx["from"])
self.receiver = EthAddress(tx["to"]) if tx["to"] else None
self.value = Wei(tx["value"])
self.gas_price = tx["gasPrice"]
self.gas_limit = tx["gas"]
self.input = tx["input"]
self.nonce = tx["nonce"]
# if receiver is a known contract, set function name
if not self.fn_name and state._find_contract(tx["to"]) is not None:
contract = state._find_contract(tx["to"])
self.contract_name = contract._name
self.fn_name = contract.get_method(tx["input"])
def _set_from_receipt(self, receipt: Dict) -> None:
"""Sets object attributes based on the transaction reciept."""
self.block_number = receipt["blockNumber"]
self.txindex = receipt["transactionIndex"]
self.gas_used = receipt["gasUsed"]
self.logs = receipt["logs"]
self.status = Status(receipt["status"])
self.contract_address = receipt["contractAddress"]
if self.contract_address and not self.contract_name:
self.contract_name = "UnknownContract"
base = (
f"{self.nonce}{self.block_number}{self.sender}{self.receiver}"
f"{self.value}{self.input}{int(self.status)}{self.gas_used}{self.txindex}"
)
self.coverage_hash = sha1(base.encode()).hexdigest()
if self.status:
self._events = _decode_logs(receipt["logs"])
if self.fn_name:
state.TxHistory()._gas(self._full_name(), receipt["gasUsed"])
def _confirm_output(self) -> str:
status = ""
if not self.status:
revert_msg = self.revert_msg if web3.supports_traces else None
status = f"({color('bright red')}{revert_msg or 'reverted'}{color}) "
result = (
f"\r {self._full_name()} confirmed {status}- "
f"Block: {color('bright blue')}{self.block_number}{color} "
f"Gas used: {color('bright blue')}{self.gas_used}{color} "
f"({color('bright blue')}{self.gas_used / self.gas_limit:.2%}{color})"
)
if self.status and self.contract_address:
result += (
f"\n {self.contract_name} deployed at: "
f"{color('bright blue')}{self.contract_address}{color}"
)
return result + "\n"
def _get_trace(self) -> None:
"""Retrieves the stack trace via debug_traceTransaction and finds the
return value, revert message and event logs in the trace.
"""
# check if trace has already been retrieved, or the tx warrants it
if self._raw_trace is not None:
return
self._raw_trace = []
if self.input == "0x" and self.gas_used == 21000:
self._modified_state = False
self._trace = []
return
if not web3.supports_traces:
raise RPCRequestError("Node client does not support `debug_traceTransaction`")
try:
trace = web3.provider.make_request( # type: ignore
"debug_traceTransaction", (self.txid, {"disableStorage": CONFIG.mode != "console"})
)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as e:
msg = f"Encountered a {type(e).__name__} while requesting "
msg += "`debug_traceTransaction`. The local RPC client has likely crashed."
if CONFIG.argv["coverage"]:
msg += " If the error persists, add the `skip_coverage` marker to this test."
raise RPCRequestError(msg) from None
if "error" in trace:
self._modified_state = None
self._trace_exc = RPCRequestError(trace["error"]["message"])
raise self._trace_exc
self._raw_trace = trace = trace["result"]["structLogs"]
if not trace:
self._modified_state = False
return
if isinstance(trace[0]["gas"], str):
# handle traces where numeric values are returned as hex (Nethermind)
for step in trace:
step["gas"] = int(step["gas"], 16)
step["gasCost"] = int.from_bytes(HexBytes(step["gasCost"]), "big", signed=True)
step["pc"] = int(step["pc"], 16)
if self.status:
self._confirmed_trace(trace)
else:
self._reverted_trace(trace)
def _confirmed_trace(self, trace: Sequence) -> None:
self._modified_state = next((True for i in trace if i["op"] == "SSTORE"), False)
if trace[-1]["op"] != "RETURN" or self.contract_address:
return
contract = state._find_contract(self.receiver)
if contract:
data = _get_memory(trace[-1], -1)
fn = contract.get_method_object(self.input)
self._return_value = fn.decode_output(data)
def _reverted_trace(self, trace: Sequence,) -> None:
self._modified_state = False
if self.contract_address:
step = next((i for i in trace if i["op"] == "CODECOPY"), None)
if step is not None and int(step["stack"][-3], 16) > 24577:
self._revert_msg = "exceeds EIP-170 size limit"
self._dev_revert_msg = ""
if self._dev_revert_msg is not None:
return
# iterate over revert instructions in reverse to find revert message
for step in (i for i in trace[::-1] if i["op"] in ("REVERT", "INVALID")):
if step["op"] == "REVERT" and int(step["stack"][-2], 16):
# get returned error string from stack
data = _get_memory(step, -1)
if data[:4].hex() == "0x4e487b71": # keccak of Panic(uint256)
error_code = int(data[4:].hex(), 16)
if error_code in SOLIDITY_ERROR_CODES:
self._revert_msg = SOLIDITY_ERROR_CODES[error_code]
else:
self._revert_msg = f"Panic (error code: {error_code})"
else:
self._revert_msg = decode_abi(["string"], data[4:])[0]
elif self.contract_address:
self._revert_msg = "invalid opcode" if step["op"] == "INVALID" else ""
self._dev_revert_msg = ""
return
# check for dev revert string using program counter
dev_revert = build._get_dev_revert(step["pc"]) or None
if dev_revert is not None:
self._dev_revert_msg = dev_revert
if self._revert_msg is None:
self._revert_msg = dev_revert
else:
# if none is found, expand the trace and get it from the pcMap
self._expand_trace()
try:
contract = state._find_contract(step["address"])
pc_map = contract._build["pcMap"]
# if this is the function selector revert, check for a jump
if "first_revert" in pc_map[step["pc"]]:
idx = trace.index(step) - 4
if trace[idx]["pc"] != step["pc"] - 4:
step = trace[idx]
# if this is the optimizer revert, find the actual source
if "optimizer_revert" in pc_map[step["pc"]]:
idx = trace.index(step)
while trace[idx]["op"] != "JUMPDEST":
# look for the most recent jump
idx -= 1
idx -= 1
while not trace[idx]["source"]:
# now we're in a yul optimization, keep stepping back
# until we find a source offset
idx -= 1
# at last we have the real location of the revert
step["source"] = trace[idx]["source"]
step = trace[idx]
if "dev" in pc_map[step["pc"]]:
self._dev_revert_msg = pc_map[step["pc"]]["dev"]
else:
# extract the dev revert string from the source code
# TODO this technique appears superior to `_get_dev_revert`, and
# changes in solc 0.8.0 have necessitated it. the old approach
# of building a dev revert map should be refactored out in favor
# of this one.
source = contract._sources.get(step["source"]["filename"])
offset = step["source"]["offset"][1]
line = source[offset:].split("\n")[0]
marker = "//" if contract._build["language"] == "Solidity" else "#"
revert_str = line[line.index(marker) + len(marker) :].strip()
if revert_str.startswith("dev:"):
self._dev_revert_msg = revert_str
if self._revert_msg is None:
self._revert_msg = self._dev_revert_msg
return
except (KeyError, AttributeError, TypeError, ValueError):
pass
if self._revert_msg is not None:
if self._dev_revert_msg is None:
self._dev_revert_msg = ""
return
step = next(i for i in trace[::-1] if i["op"] in ("REVERT", "INVALID"))
self._revert_msg = "invalid opcode" if step["op"] == "INVALID" else ""
def _expand_trace(self) -> None:
"""Adds the following attributes to each step of the stack trace:
address: The address executing this contract.
contractName: The name of the contract.
fn: The name of the function.
jumpDepth: Number of jumps made since entering this contract. The
initial value is 0.
source: {
filename: path to the source file for this step
offset: Start and end offset associated source code
}
"""
if self._raw_trace is None:
self._get_trace()
if self._trace is not None:
# in case `_get_trace` also expanded the trace, do not repeat
return
self._trace = trace = self._raw_trace
self._new_contracts = []
self._internal_transfers = []
self._subcalls = []
if self.contract_address or not trace:
coverage._add_transaction(self.coverage_hash, {})
return
if trace[0]["depth"] == 1:
self._trace_origin = "geth"
self._call_cost = self.gas_used - trace[0]["gas"] + trace[-1]["gas"]
for t in trace:
t["depth"] = t["depth"] - 1
else:
self._trace_origin = "ganache"
if trace[0]["gasCost"] >= 21000:
# in ganache <6.10.0, gas costs are shifted by one step - we can
# identify this when the first step has a gas cost >= 21000
self._call_cost = trace[0]["gasCost"]
for i in range(len(trace) - 1):
trace[i]["gasCost"] = trace[i + 1]["gasCost"]
trace[-1]["gasCost"] = 0
else:
self._call_cost = self.gas_used - trace[0]["gas"] + trace[-1]["gas"]
# last_map gives a quick reference of previous values at each depth
last_map = {0: _get_last_map(self.receiver, self.input[:10])} # type: ignore
coverage_eval: Dict = {last_map[0]["name"]: {}}
for i in range(len(trace)):
# if depth has increased, tx has called into a different contract
if trace[i]["depth"] > trace[i - 1]["depth"]:
step = trace[i - 1]
if step["op"] in ("CREATE", "CREATE2"):
# creating a new contract
out = next(x for x in trace[i:] if x["depth"] == step["depth"])
address = out["stack"][-1][-40:]
sig = f"<{step['op']}>"
calldata = None
self._new_contracts.append(EthAddress(address))
if int(step["stack"][-1], 16):
self._add_internal_xfer(step["address"], address, step["stack"][-1])
else:
# calling an existing contract
stack_idx = -4 if step["op"] in ("CALL", "CALLCODE") else -3
offset = int(step["stack"][stack_idx], 16)
length = int(step["stack"][stack_idx - 1], 16)
calldata = HexBytes("".join(step["memory"]))[offset : offset + length]
sig = calldata[:4].hex()
address = step["stack"][-2][-40:]
last_map[trace[i]["depth"]] = _get_last_map(address, sig)
coverage_eval.setdefault(last_map[trace[i]["depth"]]["name"], {})
self._subcalls.append(
{"from": step["address"], "to": EthAddress(address), "op": step["op"]}
)
if step["op"] in ("CALL", "CALLCODE"):
self._subcalls[-1]["value"] = int(step["stack"][-3], 16)
if calldata and last_map[trace[i]["depth"]].get("function"):
fn = last_map[trace[i]["depth"]]["function"]
self._subcalls[-1]["function"] = fn._input_sig
try:
zip_ = zip(fn.abi["inputs"], fn.decode_input(calldata))
inputs = {i[0]["name"]: i[1] for i in zip_} # type: ignore
self._subcalls[-1]["inputs"] = inputs
except Exception:
self._subcalls[-1]["calldata"] = calldata.hex()
elif calldata:
self._subcalls[-1]["calldata"] = calldata.hex()
# update trace from last_map
last = last_map[trace[i]["depth"]]
trace[i].update(
address=last["address"],
contractName=last["name"],
fn=last["internal_calls"][-1],
jumpDepth=last["jumpDepth"],
source=False,
)
opcode = trace[i]["op"]
if opcode == "CALL" and int(trace[i]["stack"][-3], 16):
self._add_internal_xfer(
last["address"], trace[i]["stack"][-2][-40:], trace[i]["stack"][-3]
)
try:
pc = last["pc_map"][trace[i]["pc"]]
except (KeyError, TypeError):
# we don't have enough information about this contract
continue
if trace[i]["depth"] and opcode in ("RETURN", "REVERT", "INVALID", "SELFDESTRUCT"):
subcall: dict = next(
i for i in self._subcalls[::-1] if i["to"] == last["address"] # type: ignore
)
if opcode == "RETURN":
returndata = _get_memory(trace[i], -1)
if returndata:
fn = last["function"]
try:
return_values = fn.decode_output(returndata)
if len(fn.abi["outputs"]) == 1:
return_values = (return_values,)
subcall["return_value"] = return_values
except Exception:
subcall["returndata"] = returndata.hex()
else:
subcall["return_value"] = None
elif opcode == "SELFDESTRUCT":
subcall["selfdestruct"] = True
else:
if opcode == "REVERT":
data = _get_memory(trace[i], -1)
if len(data) > 4:
try:
subcall["revert_msg"] = decode_abi(["string"], data[4:])[0]
except Exception:
subcall["revert_msg"] = data.hex()
if "revert_msg" not in subcall and "dev" in pc:
subcall["revert_msg"] = pc["dev"]
if "path" not in pc:
continue
trace[i]["source"] = {"filename": last["path_map"][pc["path"]], "offset": pc["offset"]}
if "fn" not in pc:
continue
# calculate coverage
if last["coverage"]:
if pc["path"] not in coverage_eval[last["name"]]:
coverage_eval[last["name"]][pc["path"]] = [set(), set(), set()]
if "statement" in pc:
coverage_eval[last["name"]][pc["path"]][0].add(pc["statement"])
if "branch" in pc:
if pc["op"] != "JUMPI":
last["active_branches"].add(pc["branch"])
elif "active_branches" not in last or pc["branch"] in last["active_branches"]:
# false, true
key = 1 if trace[i + 1]["pc"] == trace[i]["pc"] + 1 else 2
coverage_eval[last["name"]][pc["path"]][key].add(pc["branch"])
if "active_branches" in last:
last["active_branches"].remove(pc["branch"])
# ignore jumps with no function - they are compiler optimizations
if "jump" in pc:
# jump 'i' is calling into an internal function
if pc["jump"] == "i":
try:
fn = last["pc_map"][trace[i + 1]["pc"]]["fn"]
except (KeyError, IndexError):
continue
if fn != last["internal_calls"][-1]:
last["internal_calls"].append(fn)
last["jumpDepth"] += 1
# jump 'o' is returning from an internal function
elif last["jumpDepth"] > 0:
del last["internal_calls"][-1]
last["jumpDepth"] -= 1
coverage._add_transaction(
self.coverage_hash, dict((k, v) for k, v in coverage_eval.items() if v)
)
def _add_internal_xfer(self, from_: str, to: str, value: str) -> None:
self._internal_transfers.append( # type: ignore
{"from": EthAddress(from_), "to": EthAddress(to), "value": Wei(f"0x{value}")}
)
def _full_name(self) -> str:
if self.contract_name and self.fn_name:
return f"{self.contract_name}.{self.fn_name}"
return self.fn_name or "Transaction"
def info(self) -> None:
"""Displays verbose information about the transaction, including decoded event logs."""
result = f"Tx Hash: {self.txid}\nFrom: {self.sender}\n"
if self.contract_address and self.status:
result += f"New {self.contract_name} address: {self.contract_address}\n"
else:
result += f"To: {self.receiver}\n" f"Value: {self.value}\n"
if self.input != "0x" and int(self.input, 16):
result += f"Function: {self._full_name()}\n"
result += (
f"Block: {self.block_number}\nGas Used: "
f"{self.gas_used} / {self.gas_limit} "
f"({self.gas_used / self.gas_limit:.1%})\n"
)
if self.events:
events = list(self.events)
call_tree: List = ["--------------------------"]
while events:
idx = next(
(events.index(i) for i in events if i.address != events[0].address), len(events)
)
contract = state._find_contract(events[0].address)
if contract:
try:
name = contract.name()
except Exception:
name = contract._name
sub_tree: List = [f"{name} ({events[0].address})"]
else:
sub_tree = [f"{events[0].address}"]
for event in events[:idx]:
sub_tree.append([event.name, *(f"{k}: {v}" for k, v in event.items())])
call_tree.append(sub_tree)
events = events[idx:]
event_tree = build_tree([call_tree], multiline_pad=0, pad_depth=[0, 1])
result = f"{result}\nEvents In This Transaction\n{event_tree}"
result = color.highlight(result)
status = ""
if not self.status:
status = f"({color('bright red')}{self.revert_msg or 'reverted'}{color})"
print(f"Transaction was Mined {status}\n---------------------\n{result}")
def _get_trace_gas(self, start: int, stop: int) -> Tuple[int, int]:
total_gas = 0
internal_gas = 0
is_internal = True
trace = self.trace
for i in range(start, stop):
# Check if we are in a subfunction or not
if is_internal and not _step_compare(trace[i], trace[start]):
is_internal = False
# For the internal gas tracking we ignore the gas passed to an external call
if trace[i]["depth"] > trace[start]["depth"]:
internal_gas -= trace[i - 1]["gasCost"]
elif not is_internal and _step_compare(trace[i], trace[start]):
is_internal = True
total_gas += trace[i]["gasCost"]
if is_internal:
internal_gas += trace[i]["gasCost"]
# manually add gas refunds where they occur
if trace[i]["op"] == "SSTORE" and int(trace[i]["stack"][-2], 16) == 0:
# 15000 gas is refunded if a word is set to 0x0
# Note: There is currently no way to check if the value was 0x0 before.
# This will give an incorrect refund if 0x0 is assigned to 0x0.
total_gas -= 15000
if is_internal:
internal_gas -= 15000
if trace[i]["op"] == "SELFDESTRUCT":
# 24000 gas is refunded on selfdestruct
total_gas -= 24000
if is_internal:
internal_gas -= 24000
# For external calls, add the remaining gas returned back
if start > 0 and trace[start]["depth"] > trace[start - 1]["depth"]:
total_gas += trace[start - 1]["gasCost"]
internal_gas += trace[start - 1]["gasCost"]
return internal_gas, total_gas
@trace_inspection
def call_trace(self, expand: bool = False) -> None:
"""
Display the complete sequence of contracts and methods called during
the transaction. The format:
Contract.functionName [instruction] start:stop [gas used]
* start:stop are index values for the `trace` member of this object,
showing the points where the call begins and ends
* for calls that include subcalls, gas use is displayed as
[gas used in this frame / gas used in this frame + subcalls]
* Calls displayed in red ended with a `REVERT` or `INVALID` instruction.
Arguments
---------
expand : bool
If `True`, show an expanded call trace including inputs and return values
"""
trace = self.trace
key = _step_internal(
trace[0], trace[-1], 0, len(trace), self._get_trace_gas(0, len(self.trace))
)
call_tree: List = [[key]]
active_tree: List = [call_tree[0]]
# (index, depth, jumpDepth) for relevent steps in the trace
trace_index = [(0, 0, 0)] + [
(i, trace[i]["depth"], trace[i]["jumpDepth"])
for i in range(1, len(trace))
if not _step_compare(trace[i], trace[i - 1])
]
subcalls = self.subcalls[::-1]
for i, (idx, depth, jump_depth) in enumerate(trace_index[1:], start=1):
last = trace_index[i - 1]
if depth == last[1] and jump_depth < last[2]:
# returning from an internal function, reduce tree by one
active_tree.pop()
continue
elif depth < last[1]:
# returning from an external call, return tree by jumpDepth of the previous depth
active_tree = active_tree[: -(last[2] + 1)]
continue
if depth > last[1]:
# called to a new contract
end = next((x[0] for x in trace_index[i + 1 :] if x[1] < depth), len(trace))
total_gas, internal_gas = self._get_trace_gas(idx, end)
key = _step_external(
trace[idx],
trace[end - 1],
idx,
end,
(total_gas, internal_gas),
subcalls.pop(),
expand,
)
elif depth == last[1] and jump_depth > last[2]:
# jumped into an internal function
end = next(
(
x[0]
for x in trace_index[i + 1 :]
if x[1] < depth or (x[1] == depth and x[2] < jump_depth)
),
len(trace),
)
total_gas, internal_gas = self._get_trace_gas(idx, end)
key = _step_internal(
trace[idx], trace[end - 1], idx, end, (total_gas, internal_gas)
)
active_tree[-1].append([key])
active_tree.append(active_tree[-1][-1])
print(
f"Call trace for '{color('bright blue')}{self.txid}{color}':\n"
f"Initial call cost [{color('bright yellow')}{self._call_cost} gas{color}]"
)
print(build_tree(call_tree).rstrip())
def traceback(self) -> None:
print(self._traceback_string() or "")
@trace_inspection
def _traceback_string(self) -> str:
"""Returns an error traceback for the transaction."""
if self.status == 1:
return ""
trace = self.trace
try:
idx = next(i for i in range(len(trace)) if trace[i]["op"] in ("REVERT", "INVALID"))
trace_range = range(idx, -1, -1)
except StopIteration:
return ""
result = [next(i for i in trace_range if trace[i]["source"])]
depth, jump_depth = trace[idx]["depth"], trace[idx]["jumpDepth"]
while True:
try:
idx = next(
i
for i in trace_range
if trace[i]["depth"] < depth
or (trace[i]["depth"] == depth and trace[i]["jumpDepth"] < jump_depth)
)
result.append(idx)
depth, jump_depth = trace[idx]["depth"], trace[idx]["jumpDepth"]
except StopIteration:
break
return f"{color}Traceback for '{color('bright blue')}{self.txid}{color}':\n" + "\n".join(
self._source_string(i, 0) for i in result[::-1]
)
def error(self, pad: int = 3) -> None:
print(self._error_string(pad) or "")
@trace_inspection
def _error_string(self, pad: int = 3) -> str:
"""Returns the source code that caused the transaction to revert.
Args:
pad: Number of unrelated lines of code to include before and after
Returns: source code string
"""
if self.status == 1:
return ""
# if RPC returned a program counter, try to find source without querying trace
if self._revert_pc:
highlight, linenos, path, fn_name = build._get_error_source_from_pc(self._revert_pc)
if highlight:
return _format_source(highlight, linenos, path, self._revert_pc, -1, fn_name)
self._revert_pc = None
# iterate backward through the trace until a step has a source offset
trace = self.trace
trace_range = range(len(trace) - 1, -1, -1)
try:
idx = next(i for i in trace_range if trace[i]["op"] in {"REVERT", "INVALID"})
idx = next(i for i in trace_range if trace[i]["source"])
return self._source_string(idx, pad)
except StopIteration:
return ""
def source(self, idx: int, pad: int = 3) -> None:
print(self._source_string(idx, pad) or "")
@trace_inspection
def _source_string(self, idx: int, pad: int) -> str:
"""Displays the associated source code for a given stack trace step.
Args:
idx: Stack trace step index
pad: Number of unrelated lines of code to include before and after
Returns: source code string
"""
trace = self.trace[idx]
if not trace.get("source", None):
return ""
contract = state._find_contract(self.trace[idx]["address"])
source, linenos = highlight_source(
contract._sources.get(trace["source"]["filename"]), trace["source"]["offset"], pad
)
if not source:
return ""
return _format_source(
source,
linenos,
trace["source"]["filename"],
trace["pc"],
self.trace.index(trace),
trace["fn"],
)
def _format_source(source: str, linenos: Tuple, path: Path, pc: int, idx: int, fn_name: str) -> str:
ln = f" {color('bright blue')}{linenos[0]}"
if linenos[1] > linenos[0]:
ln = f"s{ln}{color('dark white')}-{color('bright blue')}{linenos[1]}"
return (
f"{color('dark white')}Trace step {color('bright blue')}{idx}{color('dark white')}, "
f"program counter {color('bright blue')}{pc}{color('dark white')}:\n {color('dark white')}"
f"File {color('bright magenta')}\"{path}\"{color('dark white')}, line{ln}"
f"{color('dark white')}, in {color('bright cyan')}{fn_name}{color('dark white')}:{source}"
)
def _step_compare(a: Dict, b: Dict) -> bool:
return a["depth"] == b["depth"] and a["jumpDepth"] == b["jumpDepth"]
def _step_internal(
step: Dict,
last_step: Dict,
start: Union[str, int],
stop: Union[str, int],
gas: Tuple[int, int],
subcall: Dict = None,
) -> str:
if last_step["op"] in {"REVERT", "INVALID"} and _step_compare(step, last_step):
contract_color = color("bright red")
else:
contract_color = color("bright cyan") if not step["jumpDepth"] else color()
key = f"{color('dark white')}{contract_color}{step['fn']} {color('dark white')}"
left_bracket = f"{color('dark white')}["
right_bracket = f"{color('dark white')}]"
if subcall:
key = f"{key}[{color}{subcall['op']}{right_bracket} "
key = f"{key}{start}:{stop}{color}"
if gas:
if gas[0] == gas[1]:
gas_str = f"{color('bright yellow')}{gas[0]} gas"
else:
gas_str = f"{color('bright yellow')}{gas[0]} / {gas[1]} gas"
key = f"{key} {left_bracket}{gas_str}{right_bracket}{color}"
if last_step["op"] == "SELFDESTRUCT":
key = f"{key} {left_bracket}{color('bright red')}SELFDESTRUCT{right_bracket}{color}"
return key
def _convert_0x_to_empty_bytes(value: Any) -> Any:
# black cannot parse `0x` without any trailing zeros, so we temporarily
# replace it with an empty bytestring
final = []
for item in value:
if isinstance(item, (list, tuple)):
final.append(_convert_0x_to_empty_bytes(item))
elif str(item) == "0x":
final.append(b"")
else:
final.append(item)
return type(value)(final)
def _format(value: Any) -> str:
if isinstance(value, (list, tuple)):
value = _convert_0x_to_empty_bytes(value)
mode = black.FileMode(line_length=60)
value = black.format_str(str(value), mode=mode).replace('b""', "0x")
return str(value)
def _step_external(
step: Dict,
last_step: Dict,
start: Union[str, int],
stop: Union[str, int],
gas: Tuple[int, int],
subcall: Dict,
expand: bool,
) -> str:
key = _step_internal(step, last_step, start, stop, gas, subcall)
if not expand:
return key
result: List = [key, f"address: {step['address']}"]
if "value" in subcall:
result.append(f"value: {subcall['value']}")
if "inputs" not in subcall:
result.append(f"calldata: {subcall.get('calldata')}")
elif subcall["inputs"]:
result.append(
["input arguments:", *(f"{k}: {_format(v)}" for k, v in subcall["inputs"].items())]
)
else:
result.append("input arguments: None")
if "return_value" in subcall:
value = subcall["return_value"]
if isinstance(value, tuple) and len(value) > 1:
result.append(["return values:", *(_format(i) for i in value)])
else:
if isinstance(value, tuple):
value = value[0]
result.append(f"return value: {_format(value)}")
elif "returndata" in subcall:
result.append(f"returndata: {subcall['returndata']}")
if "revert_msg" in subcall:
result.append(f"revert reason: {color('bright red')}{subcall['revert_msg']}{color}")
return build_tree([result], multiline_pad=0).rstrip()
def _get_memory(step: Dict, idx: int) -> HexBytes:
offset = int(step["stack"][idx], 16)
length = int(step["stack"][idx - 1], 16)
data = HexBytes("".join(step["memory"]))[offset : offset + length]
# append zero-bytes if allocated memory ends before `length` bytes
data = HexBytes(data + b"\x00" * (length - len(data)))
return data
def _get_last_map(address: EthAddress, sig: str) -> Dict:
contract = state._find_contract(address)
last_map = {"address": EthAddress(address), "jumpDepth": 0, "name": None, "coverage": False}
if contract:
if contract.get_method(sig):
full_fn_name = f"{contract._name}.{contract.get_method(sig)}"
else:
full_fn_name = contract._name
last_map.update(
contract=contract,
function=contract.get_method_object(sig),
name=contract._name,
internal_calls=[full_fn_name],
path_map=contract._build.get("allSourcePaths"),
pc_map=contract._build.get("pcMap"),
)
if isinstance(contract._project, project_main.Project):
# only evaluate coverage for contracts that are part of a `Project`
last_map["coverage"] = True
if contract._build["language"] == "Solidity":
last_map["active_branches"] = set()
else:
last_map.update(contract=None, internal_calls=[f"<UnknownContract>.{sig}"], pc_map=None)
return last_map
|
utils.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2012-11-06 11:50:13
import logging
import hashlib
import datetime
import socket
import base64
import six
from six import iteritems
md5string = lambda x: hashlib.md5(utf8(x)).hexdigest()
class ReadOnlyDict(dict):
"""A Read Only Dict"""
def __setitem__(self, key, value):
raise Exception("dict is read-only")
def getitem(obj, key=0, default=None):
"""Get first element of list or return default"""
try:
return obj[key]
except:
return default
def hide_me(tb, g=globals()):
"""Hide stack traceback of given stack"""
base_tb = tb
try:
while tb and tb.tb_frame.f_globals is not g:
tb = tb.tb_next
while tb and tb.tb_frame.f_globals is g:
tb = tb.tb_next
except Exception as e:
logging.exception(e)
tb = base_tb
if not tb:
tb = base_tb
return tb
def run_in_thread(func, *args, **kwargs):
"""Run function in thread, return a Thread object"""
from threading import Thread
thread = Thread(target=func, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return thread
def run_in_subprocess(func, *args, **kwargs):
"""Run function in subprocess, return a Process object"""
from multiprocessing import Process
thread = Process(target=func, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return thread
def format_date(date, gmt_offset=0, relative=True, shorter=False, full_format=False):
"""Formats the given date (which should be GMT).
By default, we return a relative time (e.g., "2 minutes ago"). You
can return an absolute date string with ``relative=False``.
You can force a full format date ("July 10, 1980") with
``full_format=True``.
This method is primarily intended for dates in the past.
For dates in the future, we fall back to full format.
From tornado
"""
if not date:
return '-'
if isinstance(date, float) or isinstance(date, int):
date = datetime.datetime.utcfromtimestamp(date)
now = datetime.datetime.utcnow()
if date > now:
if relative and (date - now).seconds < 60:
# Due to click skew, things are some things slightly
# in the future. Round timestamps in the immediate
# future down to now in relative mode.
date = now
else:
# Otherwise, future dates always use the full format.
full_format = True
local_date = date - datetime.timedelta(minutes=gmt_offset)
local_now = now - datetime.timedelta(minutes=gmt_offset)
local_yesterday = local_now - datetime.timedelta(hours=24)
difference = now - date
seconds = difference.seconds
days = difference.days
format = None
if not full_format:
if relative and days == 0:
if seconds < 50:
return ("1 second ago" if seconds <= 1 else
"%(seconds)d seconds ago") % {"seconds": seconds}
if seconds < 50 * 60:
minutes = round(seconds / 60.0)
return ("1 minute ago" if minutes <= 1 else
"%(minutes)d minutes ago") % {"minutes": minutes}
hours = round(seconds / (60.0 * 60))
return ("1 hour ago" if hours <= 1 else
"%(hours)d hours ago") % {"hours": hours}
if days == 0:
format = "%(time)s"
elif days == 1 and local_date.day == local_yesterday.day and \
relative:
format = "yesterday" if shorter else "yesterday at %(time)s"
elif days < 5:
format = "%(weekday)s" if shorter else "%(weekday)s at %(time)s"
elif days < 334: # 11mo, since confusing for same month last year
format = "%(month)s-%(day)s" if shorter else \
"%(month)s-%(day)s at %(time)s"
if format is None:
format = "%(month_name)s %(day)s, %(year)s" if shorter else \
"%(month_name)s %(day)s, %(year)s at %(time)s"
str_time = "%d:%02d" % (local_date.hour, local_date.minute)
return format % {
"month_name": local_date.strftime('%b'),
"weekday": local_date.strftime('%A'),
"day": str(local_date.day),
"year": str(local_date.year),
"month": local_date.month,
"time": str_time
}
class TimeoutError(Exception):
pass
try:
import signal
if not hasattr(signal, 'SIGALRM'):
raise ImportError('signal')
class timeout:
"""
Time limit of command
with timeout(3):
time.sleep(10)
"""
def __init__(self, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
raise TimeoutError(self.error_message)
def __enter__(self):
if self.seconds:
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
if self.seconds:
signal.alarm(0)
except ImportError:
class timeout:
"""
Time limit of command (for windows)
"""
def __init__(self, seconds=1, error_message='Timeout'):
pass
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
def utf8(string):
"""
Make sure string is utf8 encoded bytes.
If parameter is a object, object.__str__ will been called before encode as bytes
"""
if isinstance(string, six.text_type):
return string.encode('utf8')
elif isinstance(string, six.binary_type):
return string
else:
return six.text_type(string).encode('utf8')
def text(string, encoding='utf8'):
"""
Make sure string is unicode type, decode with given encoding if it's not.
If parameter is a object, object.__str__ will been called
"""
if isinstance(string, six.text_type):
return string
elif isinstance(string, six.binary_type):
return string.decode(encoding)
else:
return six.text_type(string)
def pretty_unicode(string):
"""
Make sure string is unicode, try to decode with utf8, or unicode escaped string if failed.
"""
if isinstance(string, six.text_type):
return string
try:
return string.decode("utf8")
except UnicodeDecodeError:
return string.decode('Latin-1').encode('unicode_escape').decode("utf8")
def unicode_string(string):
"""
Make sure string is unicode, try to default with utf8, or base64 if failed.
can been decode by `decode_unicode_string`
"""
if isinstance(string, six.text_type):
return string
try:
return string.decode("utf8")
except UnicodeDecodeError:
return '[BASE64-DATA]' + base64.b64encode(string) + '[/BASE64-DATA]'
def unicode_dict(_dict):
"""
Make sure keys and values of dict is unicode.
"""
r = {}
for k, v in iteritems(_dict):
r[unicode_obj(k)] = unicode_obj(v)
return r
def unicode_list(_list):
"""
Make sure every element in list is unicode. bytes will encode in base64
"""
return [unicode_obj(x) for x in _list]
def unicode_obj(obj):
"""
Make sure keys and values of dict/list/tuple is unicode. bytes will encode in base64.
Can been decode by `decode_unicode_obj`
"""
if isinstance(obj, dict):
return unicode_dict(obj)
elif isinstance(obj, (list, tuple)):
return unicode_list(obj)
elif isinstance(obj, six.string_types):
return unicode_string(obj)
elif isinstance(obj, (int, float)):
return obj
elif obj is None:
return obj
else:
try:
return text(obj)
except:
return text(repr(obj))
def decode_unicode_string(string):
"""
Decode string encoded by `unicode_string`
"""
if string.startswith('[BASE64-DATA]') and string.endswith('[/BASE64-DATA]'):
return base64.b64decode(string[len('[BASE64-DATA]'):-len('[/BASE64-DATA]')])
return string
def decode_unicode_obj(obj):
"""
Decode unicoded dict/list/tuple encoded by `unicode_obj`
"""
if isinstance(obj, dict):
r = {}
for k, v in iteritems(obj):
r[decode_unicode_string(k)] = decode_unicode_obj(v)
return r
elif isinstance(obj, six.string_types):
return decode_unicode_string(obj)
elif isinstance(obj, (list, tuple)):
return [decode_unicode_obj(x) for x in obj]
else:
return obj
class Get(object):
"""
Lazy value calculate for object
"""
def __init__(self, getter):
self.getter = getter
def __get__(self, instance, owner):
return self.getter()
class ObjectDict(dict):
"""
Object like dict, every dict[key] can visite by dict.key
If dict[key] is `Get`, calculate it's value.
"""
def __getattr__(self, name):
ret = self.__getitem__(name)
if hasattr(ret, '__get__'):
return ret.__get__(self, ObjectDict)
return ret
def load_object(name):
"""Load object from module"""
if "." not in name:
raise Exception('load object need module.object')
module_name, object_name = name.rsplit('.', 1)
if six.PY2:
module = __import__(module_name, globals(), locals(), [utf8(object_name)], -1)
else:
module = __import__(module_name, globals(), locals(), [object_name])
return getattr(module, object_name)
def get_python_console(namespace=None):
"""
Return a interactive python console instance with caller's stack
"""
if namespace is None:
import inspect
frame = inspect.currentframe()
caller = frame.f_back
if not caller:
logging.error("can't find caller who start this console.")
caller = frame
namespace = dict(caller.f_globals)
namespace.update(caller.f_locals)
try:
from IPython.terminal.interactiveshell import TerminalInteractiveShell
shell = TerminalInteractiveShell(user_ns=namespace)
except ImportError:
try:
import readline
import rlcompleter
readline.set_completer(rlcompleter.Completer(namespace).complete)
readline.parse_and_bind("tab: complete")
except ImportError:
pass
import code
shell = code.InteractiveConsole(namespace)
shell._quit = False
def exit():
shell._quit = True
def readfunc(prompt=""):
if shell._quit:
raise EOFError
return six.moves.input(prompt)
# inject exit method
shell.ask_exit = exit
shell.raw_input = readfunc
return shell
def python_console(namespace=None):
"""Start a interactive python console with caller's stack"""
if namespace is None:
import inspect
frame = inspect.currentframe()
caller = frame.f_back
if not caller:
logging.error("can't find caller who start this console.")
caller = frame
namespace = dict(caller.f_globals)
namespace.update(caller.f_locals)
return get_python_console(namespace=namespace).interact()
def check_port_open(port, addr='127.0.0.1'):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((addr, port))
if result == 0:
return True
else:
return False
def cookie_str(cookie):
def _sanitize(value ):
value = value.replace('\n','%0a')
value = value.replace('\r','%0d')
return value
res = ''
for key, value in cookie.items():
ks = _sanitize( key )
vs = _sanitize( value )
res += ks + '=' + vs + '; '
return res[:-1] |
build_mscoco_data.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts MSCOCO data to TFRecord file format with SequenceExample protos.
The MSCOCO images are expected to reside in JPEG files located in the following
directory structure:
train_image_dir/COCO_train2014_000000000151.jpg
train_image_dir/COCO_train2014_000000000260.jpg
...
and
val_image_dir/COCO_val2014_000000000042.jpg
val_image_dir/COCO_val2014_000000000073.jpg
...
The MSCOCO annotations JSON files are expected to reside in train_captions_file
and val_captions_file respectively.
This script converts the combined MSCOCO data into sharded data files consisting
of 256, 4 and 8 TFRecord files, respectively:
output_dir/train-00000-of-00256
output_dir/train-00001-of-00256
...
output_dir/train-00255-of-00256
and
output_dir/val-00000-of-00004
...
output_dir/val-00003-of-00004
and
output_dir/test-00000-of-00008
...
output_dir/test-00007-of-00008
Each TFRecord file contains ~2300 records. Each record within the TFRecord file
is a serialized SequenceExample proto consisting of precisely one image-caption
pair. Note that each image has multiple captions (usually 5) and therefore each
image is replicated multiple times in the TFRecord files.
The SequenceExample proto contains the following fields:
context:
image/image_id: integer MSCOCO image identifier
image/data: string containing JPEG encoded image in RGB colorspace
feature_lists:
image/caption: list of strings containing the (tokenized) caption words
image/caption_ids: list of integer ids corresponding to the caption words
The captions are tokenized using the NLTK (http://www.nltk.org/) word tokenizer.
The vocabulary of word identifiers is constructed from the sorted list (by
descending frequency) of word tokens in the training set. Only tokens appearing
at least 4 times are considered; all other words get the "unknown" word id.
NOTE: This script will consume around 100GB of disk space because each image
in the MSCOCO dataset is replicated ~5 times (once per caption) in the output.
This is done for two reasons:
1. In order to better shuffle the training data.
2. It makes it easier to perform asynchronous preprocessing of each image in
TensorFlow.
Running this script using 16 threads may take around 1 hour on a HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Counter
from collections import namedtuple
from datetime import datetime
import json
import os.path
import random
import sys
import threading
import nltk.tokenize
import numpy as np
import tensorflow as tf
tf.flags.DEFINE_string("train_image_dir", "/tmp/train2014/",
"Training image directory.")
tf.flags.DEFINE_string("val_image_dir", "/tmp/val2014",
"Validation image directory.")
tf.flags.DEFINE_string("train_captions_file", "/tmp/captions_train2014.json",
"Training captions JSON file.")
tf.flags.DEFINE_string("val_captions_file", "/tmp/captions_val2014.json",
"Validation captions JSON file.")
tf.flags.DEFINE_string("output_dir", "/tmp/", "Output data directory.")
tf.flags.DEFINE_integer("train_shards", 256,
"Number of shards in training TFRecord files.")
tf.flags.DEFINE_integer("val_shards", 4,
"Number of shards in validation TFRecord files.")
tf.flags.DEFINE_integer("test_shards", 8,
"Number of shards in testing TFRecord files.")
tf.flags.DEFINE_string("start_word", "<S>",
"Special word added to the beginning of each sentence.")
tf.flags.DEFINE_string("end_word", "</S>",
"Special word added to the end of each sentence.")
tf.flags.DEFINE_string("unknown_word", "<UNK>",
"Special word meaning 'unknown'.")
tf.flags.DEFINE_integer("min_word_count", 4,
"The minimum number of occurrences of each word in the "
"training set for inclusion in the vocabulary.")
tf.flags.DEFINE_string("word_counts_output_file", "/tmp/word_counts.txt",
"Output vocabulary file of word counts.")
tf.flags.DEFINE_integer("num_threads", 8,
"Number of threads to preprocess the images.")
FLAGS = tf.flags.FLAGS
ImageMetadata = namedtuple("ImageMetadata",
["image_id", "filename", "captions"])
class Vocabulary(object):
"""Simple vocabulary wrapper."""
def __init__(self, vocab, unk_id):
"""Initializes the vocabulary.
Args:
vocab: A dictionary of word to word_id.
unk_id: Id of the special 'unknown' word.
"""
self._vocab = vocab
self._unk_id = unk_id
def word_to_id(self, word):
"""Returns the integer id of a word string."""
if word in self._vocab:
return self._vocab[word]
else:
return self._unk_id
class ImageDecoder(object):
"""Helper class for decoding images in TensorFlow."""
def __init__(self):
# Create a single TensorFlow Session for all image decoding calls.
self._sess = tf.Session()
# TensorFlow ops for JPEG decoding.
self._encoded_jpeg = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._encoded_jpeg, channels=3)
def decode_jpeg(self, encoded_jpeg):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._encoded_jpeg: encoded_jpeg})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _int64_feature(value):
"""Wrapper for inserting an int64 Feature into a SequenceExample proto."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
"""Wrapper for inserting a bytes Feature into a SequenceExample proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)]))
def _int64_feature_list(values):
"""Wrapper for inserting an int64 FeatureList into a SequenceExample proto."""
return tf.train.FeatureList(feature=[_int64_feature(v) for v in values])
def _bytes_feature_list(values):
"""Wrapper for inserting a bytes FeatureList into a SequenceExample proto."""
return tf.train.FeatureList(feature=[_bytes_feature(v) for v in values])
def _to_sequence_example(image, decoder, vocab):
"""Builds a SequenceExample proto for an image-caption pair.
Args:
image: An ImageMetadata object.
decoder: An ImageDecoder object.
vocab: A Vocabulary object.
Returns:
A SequenceExample proto.
"""
with tf.gfile.FastGFile(image.filename, "r") as f:
encoded_image = f.read()
try:
decoder.decode_jpeg(encoded_image)
except (tf.errors.InvalidArgumentError, AssertionError):
print("Skipping file with invalid JPEG data: %s" % image.filename)
return
context = tf.train.Features(feature={
"image/image_id": _int64_feature(image.image_id),
"image/data": _bytes_feature(encoded_image),
})
assert len(image.captions) == 1
caption = image.captions[0]
caption_ids = [vocab.word_to_id(word) for word in caption]
feature_lists = tf.train.FeatureLists(feature_list={
"image/caption": _bytes_feature_list(caption),
"image/caption_ids": _int64_feature_list(caption_ids)
})
sequence_example = tf.train.SequenceExample(
context=context, feature_lists=feature_lists)
return sequence_example
def _process_image_files(thread_index, ranges, name, images, decoder, vocab,
num_shards):
"""Processes and saves a subset of images as TFRecord files in one thread.
Args:
thread_index: Integer thread identifier within [0, len(ranges)].
ranges: A list of pairs of integers specifying the ranges of the dataset to
process in parallel.
name: Unique identifier specifying the dataset.
images: List of ImageMetadata.
decoder: An ImageDecoder object.
vocab: A Vocabulary object.
num_shards: Integer number of shards for the output files.
"""
# Each thread produces N shards where N = num_shards / num_threads. For
# instance, if num_shards = 128, and num_threads = 2, then the first thread
# would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0], ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_images_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in xrange(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = "%s-%.5d-of-%.5d" % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_dir, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
images_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in images_in_shard:
image = images[i]
sequence_example = _to_sequence_example(image, decoder, vocab)
if sequence_example is not None:
writer.write(sequence_example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print("%s [thread %d]: Processed %d of %d items in thread batch." %
(datetime.now(), thread_index, counter, num_images_in_thread))
sys.stdout.flush()
writer.close()
print("%s [thread %d]: Wrote %d image-caption pairs to %s" %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print("%s [thread %d]: Wrote %d image-caption pairs to %d shards." %
(datetime.now(), thread_index, counter, num_shards_per_batch))
sys.stdout.flush()
def _process_dataset(name, images, vocab, num_shards):
"""Processes a complete data set and saves it as a TFRecord.
Args:
name: Unique identifier specifying the dataset.
images: List of ImageMetadata.
vocab: A Vocabulary object.
num_shards: Integer number of shards for the output files.
"""
# Break up each image into a separate entity for each caption.
images = [ImageMetadata(image.image_id, image.filename, [caption])
for image in images for caption in image.captions]
# Shuffle the ordering of images. Make the randomization repeatable.
random.seed(12345)
random.shuffle(images)
# Break the images into num_threads batches. Batch i is defined as
# images[ranges[i][0]:ranges[i][1]].
num_threads = min(num_shards, FLAGS.num_threads)
spacing = np.linspace(0, len(images), num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in xrange(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a utility for decoding JPEG images to run sanity checks.
decoder = ImageDecoder()
# Launch a thread for each batch.
print("Launching %d threads for spacings: %s" % (num_threads, ranges))
for thread_index in xrange(len(ranges)):
args = (thread_index, ranges, name, images, decoder, vocab, num_shards)
t = threading.Thread(target=_process_image_files, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print("%s: Finished processing all %d image-caption pairs in data set '%s'." %
(datetime.now(), len(images), name))
def _create_vocab(captions):
"""Creates the vocabulary of word to word_id.
The vocabulary is saved to disk in a text file of word counts. The id of each
word in the file is its corresponding 0-based line number.
Args:
captions: A list of lists of strings.
Returns:
A Vocabulary object.
"""
print("Creating vocabulary.")
counter = Counter()
for c in captions:
counter.update(c)
print("Total words:", len(counter))
# Filter uncommon words and sort by descending count.
word_counts = [x for x in counter.items() if x[1] >= FLAGS.min_word_count]
word_counts.sort(key=lambda x: x[1], reverse=True)
print("Words in vocabulary:", len(word_counts))
# Write out the word counts file.
with tf.gfile.FastGFile(FLAGS.word_counts_output_file, "w") as f:
f.write("\n".join(["%s %d" % (w, c) for w, c in word_counts]))
print("Wrote vocabulary file:", FLAGS.word_counts_output_file)
# Create the vocabulary dictionary.
reverse_vocab = [x[0] for x in word_counts]
unk_id = len(reverse_vocab)
vocab_dict = dict([(x, y) for (y, x) in enumerate(reverse_vocab)])
vocab = Vocabulary(vocab_dict, unk_id)
return vocab
def _process_caption(caption):
"""Processes a caption string into a list of tonenized words.
Args:
caption: A string caption.
Returns:
A list of strings; the tokenized caption.
"""
tokenized_caption = [FLAGS.start_word]
tokenized_caption.extend(nltk.tokenize.word_tokenize(caption.lower()))
tokenized_caption.append(FLAGS.end_word)
return tokenized_caption
def _load_and_process_metadata(captions_file, image_dir):
"""Loads image metadata from a JSON file and processes the captions.
Args:
captions_file: JSON file containing caption annotations.
image_dir: Directory containing the image files.
Returns:
A list of ImageMetadata.
"""
with tf.gfile.FastGFile(captions_file, "r") as f:
caption_data = json.load(f)
# Extract the filenames.
id_to_filename = [(x["id"], x["file_name"]) for x in caption_data["images"]]
# Extract the captions. Each image_id is associated with multiple captions.
id_to_captions = {}
for annotation in caption_data["annotations"]:
image_id = annotation["image_id"]
caption = annotation["caption"]
id_to_captions.setdefault(image_id, [])
id_to_captions[image_id].append(caption)
assert len(id_to_filename) == len(id_to_captions)
assert set([x[0] for x in id_to_filename]) == set(id_to_captions.keys())
print("Loaded caption metadata for %d images from %s" %
(len(id_to_filename), captions_file))
# Process the captions and combine the data into a list of ImageMetadata.
print("Processing captions.")
image_metadata = []
num_captions = 0
for image_id, base_filename in id_to_filename:
filename = os.path.join(image_dir, base_filename)
captions = [_process_caption(c) for c in id_to_captions[image_id]]
image_metadata.append(ImageMetadata(image_id, filename, captions))
num_captions += len(captions)
print("Finished processing %d captions for %d images in %s" %
(num_captions, len(id_to_filename), captions_file))
return image_metadata
def main(unused_argv):
def _is_valid_num_shards(num_shards):
"""Returns True if num_shards is compatible with FLAGS.num_threads."""
return num_shards < FLAGS.num_threads or not num_shards % FLAGS.num_threads
assert _is_valid_num_shards(FLAGS.train_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.train_shards")
assert _is_valid_num_shards(FLAGS.val_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.val_shards")
assert _is_valid_num_shards(FLAGS.test_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.test_shards")
if not tf.gfile.IsDirectory(FLAGS.output_dir):
tf.gfile.MakeDirs(FLAGS.output_dir)
# Load image metadata from caption files.
mscoco_train_dataset = _load_and_process_metadata(FLAGS.train_captions_file,
FLAGS.train_image_dir)
mscoco_val_dataset = _load_and_process_metadata(FLAGS.val_captions_file,
FLAGS.val_image_dir)
# Redistribute the MSCOCO data as follows:
# train_dataset = 100% of mscoco_train_dataset + 85% of mscoco_val_dataset.
# val_dataset = 5% of mscoco_val_dataset (for validation during training).
# test_dataset = 10% of mscoco_val_dataset (for final evaluation).
train_cutoff = int(0.85 * len(mscoco_val_dataset))
val_cutoff = int(0.90 * len(mscoco_val_dataset))
train_dataset = mscoco_train_dataset + mscoco_val_dataset[0:train_cutoff]
val_dataset = mscoco_val_dataset[train_cutoff:val_cutoff]
test_dataset = mscoco_val_dataset[val_cutoff:]
# Create vocabulary from the training captions.
train_captions = [c for image in train_dataset for c in image.captions]
vocab = _create_vocab(train_captions)
_process_dataset("train", train_dataset, vocab, FLAGS.train_shards)
_process_dataset("val", val_dataset, vocab, FLAGS.val_shards)
_process_dataset("test", test_dataset, vocab, FLAGS.test_shards)
if __name__ == "__main__":
tf.app.run()
|
flterm.py | #!/usr/bin/env python3.5
# This file is Copyright (c) 2015-2020 M-Labs Limited.
# License: BSD
import sys
import os
import time
import asyncio
import asyncserial
import serial
import argparse
if sys.platform == "win32":
import msvcrt
import threading
def init_getkey(callback):
loop = asyncio.get_event_loop()
def getkey_thread():
while True:
c = msvcrt.getch()
# HACK: This may still attempt to use the loop
# after it is closed - see comment below.
loop.call_soon_threadsafe(callback, c)
threading.Thread(target=getkey_thread, daemon=True).start()
def deinit_getkey():
# Python threads suck.
pass
else:
import termios
def init_getkey(callback):
global old_termios
fd = sys.stdin.fileno()
old_termios = termios.tcgetattr(fd)
new = old_termios.copy()
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, new)
loop = asyncio.get_event_loop()
def callback_wrapper():
callback(os.read(sys.stdin.fileno(), 1))
loop.add_reader(sys.stdin.fileno(), callback_wrapper)
def deinit_getkey():
termios.tcsetattr(sys.stdin.fileno(), termios.TCSANOW, old_termios)
sfl_magic_req = b"sL5DdSMmkekro\n"
sfl_magic_ack = b"z6IHG7cYDID6o\n"
# General commands
sfl_cmd_abort = b"\x00"
sfl_cmd_load = b"\x01"
sfl_cmd_jump = b"\x02"
# Replies
sfl_ack_success = b"K"
sfl_ack_crcerror = b"C"
sfl_ack_unknown = b"U"
sfl_ack_error = b"E"
crc16_table = [
0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50A5, 0x60C6, 0x70E7,
0x8108, 0x9129, 0xA14A, 0xB16B, 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF,
0x1231, 0x0210, 0x3273, 0x2252, 0x52B5, 0x4294, 0x72F7, 0x62D6,
0x9339, 0x8318, 0xB37B, 0xA35A, 0xD3BD, 0xC39C, 0xF3FF, 0xE3DE,
0x2462, 0x3443, 0x0420, 0x1401, 0x64E6, 0x74C7, 0x44A4, 0x5485,
0xA56A, 0xB54B, 0x8528, 0x9509, 0xE5EE, 0xF5CF, 0xC5AC, 0xD58D,
0x3653, 0x2672, 0x1611, 0x0630, 0x76D7, 0x66F6, 0x5695, 0x46B4,
0xB75B, 0xA77A, 0x9719, 0x8738, 0xF7DF, 0xE7FE, 0xD79D, 0xC7BC,
0x48C4, 0x58E5, 0x6886, 0x78A7, 0x0840, 0x1861, 0x2802, 0x3823,
0xC9CC, 0xD9ED, 0xE98E, 0xF9AF, 0x8948, 0x9969, 0xA90A, 0xB92B,
0x5AF5, 0x4AD4, 0x7AB7, 0x6A96, 0x1A71, 0x0A50, 0x3A33, 0x2A12,
0xDBFD, 0xCBDC, 0xFBBF, 0xEB9E, 0x9B79, 0x8B58, 0xBB3B, 0xAB1A,
0x6CA6, 0x7C87, 0x4CE4, 0x5CC5, 0x2C22, 0x3C03, 0x0C60, 0x1C41,
0xEDAE, 0xFD8F, 0xCDEC, 0xDDCD, 0xAD2A, 0xBD0B, 0x8D68, 0x9D49,
0x7E97, 0x6EB6, 0x5ED5, 0x4EF4, 0x3E13, 0x2E32, 0x1E51, 0x0E70,
0xFF9F, 0xEFBE, 0xDFDD, 0xCFFC, 0xBF1B, 0xAF3A, 0x9F59, 0x8F78,
0x9188, 0x81A9, 0xB1CA, 0xA1EB, 0xD10C, 0xC12D, 0xF14E, 0xE16F,
0x1080, 0x00A1, 0x30C2, 0x20E3, 0x5004, 0x4025, 0x7046, 0x6067,
0x83B9, 0x9398, 0xA3FB, 0xB3DA, 0xC33D, 0xD31C, 0xE37F, 0xF35E,
0x02B1, 0x1290, 0x22F3, 0x32D2, 0x4235, 0x5214, 0x6277, 0x7256,
0xB5EA, 0xA5CB, 0x95A8, 0x8589, 0xF56E, 0xE54F, 0xD52C, 0xC50D,
0x34E2, 0x24C3, 0x14A0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
0xA7DB, 0xB7FA, 0x8799, 0x97B8, 0xE75F, 0xF77E, 0xC71D, 0xD73C,
0x26D3, 0x36F2, 0x0691, 0x16B0, 0x6657, 0x7676, 0x4615, 0x5634,
0xD94C, 0xC96D, 0xF90E, 0xE92F, 0x99C8, 0x89E9, 0xB98A, 0xA9AB,
0x5844, 0x4865, 0x7806, 0x6827, 0x18C0, 0x08E1, 0x3882, 0x28A3,
0xCB7D, 0xDB5C, 0xEB3F, 0xFB1E, 0x8BF9, 0x9BD8, 0xABBB, 0xBB9A,
0x4A75, 0x5A54, 0x6A37, 0x7A16, 0x0AF1, 0x1AD0, 0x2AB3, 0x3A92,
0xFD2E, 0xED0F, 0xDD6C, 0xCD4D, 0xBDAA, 0xAD8B, 0x9DE8, 0x8DC9,
0x7C26, 0x6C07, 0x5C64, 0x4C45, 0x3CA2, 0x2C83, 0x1CE0, 0x0CC1,
0xEF1F, 0xFF3E, 0xCF5D, 0xDF7C, 0xAF9B, 0xBFBA, 0x8FD9, 0x9FF8,
0x6E17, 0x7E36, 0x4E55, 0x5E74, 0x2E93, 0x3EB2, 0x0ED1, 0x1EF0
]
def crc16(l):
crc = 0
for d in l:
crc = crc16_table[((crc >> 8) ^ d) & 0xff] ^ (crc << 8)
return crc & 0xffff
class SFLFrame:
def __init__(self):
self.cmd = bytes()
self.payload = bytes()
def compute_crc(self):
return crc16(self.cmd + self.payload)
def encode(self):
packet = bytes([len(self.payload)])
packet += self.compute_crc().to_bytes(2, "big")
packet += self.cmd
packet += self.payload
return packet
class Flterm:
def __init__(self, port, speed, kernel_image, kernel_address,
upload_only, output_only):
self.kernel_image = kernel_image
self.kernel_address = kernel_address
self.upload_only = upload_only
self.output_only = output_only
self.port = asyncserial.AsyncSerial(port, baudrate=speed)
if serial.__version__[0] == "2":
self.port.ser.setRTS(False)
else:
self.port.ser.rts = False
def init(self):
if not (self.upload_only or self.output_only):
self.keyqueue = asyncio.Queue(100)
def getkey_callback(c):
self.keyqueue.put_nowait(c)
init_getkey(getkey_callback)
if self.upload_only:
self.main_task = asyncio.ensure_future(self.upload_only_coro())
else:
self.main_task = asyncio.ensure_future(self.main_coro())
async def send_frame(self, frame):
while True:
await self.port.write_exactly(frame.encode())
reply = await self.port.read(1)
if reply == sfl_ack_success:
return
elif reply == sfl_ack_crcerror:
pass # retry
else:
print("[FLTERM] Got unknown reply '{}' from the device, aborting.".format(reply))
raise ValueError
async def upload(self, filename, address):
with open(filename, "rb") as f:
data = f.read()
print("[FLTERM] Uploading {} ({} bytes)...".format(filename, len(data)))
current_address = address
position = 0
length = len(data)
start = time.time()
while len(data):
sys.stdout.write("|{}>{}| {}%\r".format('=' * (20*position//length),
' ' * (20-20*position//length),
100*position//length))
sys.stdout.flush()
frame = SFLFrame()
frame_data = data[:251]
frame.cmd = sfl_cmd_load
frame.payload = current_address.to_bytes(4, "big")
frame.payload += frame_data
try:
await self.send_frame(frame)
except ValueError:
return
current_address += len(frame_data)
position += len(frame_data)
try:
data = data[251:]
except:
data = []
end = time.time()
elapsed = end - start
print("[FLTERM] Upload complete ({0:.1f}KB/s).".format(length/(elapsed*1024)))
return length
async def boot(self):
print("[FLTERM] Booting the device.")
frame = SFLFrame()
frame.cmd = sfl_cmd_jump
frame.payload = self.kernel_address.to_bytes(4, "big")
await self.send_frame(frame)
async def answer_magic(self):
print("[FLTERM] Received firmware download request from the device.")
await self.port.write_exactly(sfl_magic_ack)
try:
await self.upload(self.kernel_image, self.kernel_address)
except FileNotFoundError:
print("[FLTERM] File not found")
else:
await self.boot()
print("[FLTERM] Done.");
async def main_coro(self):
magic_detect_buffer = b"\x00"*len(sfl_magic_req)
port_reader = None
key_getter = None
while True:
if port_reader is None:
port_reader = asyncio.ensure_future(self.port.read(1024))
fs = [port_reader]
if not self.output_only:
if key_getter is None:
key_getter = asyncio.ensure_future(self.keyqueue.get())
fs += [key_getter]
try:
done, pending = await asyncio.wait(
fs, return_when=asyncio.FIRST_COMPLETED)
except asyncio.CancelledError:
for f in fs:
f.cancel()
try:
await f
except asyncio.CancelledError:
pass
raise
if port_reader in done:
data = port_reader.result()
port_reader = None
sys.stdout.buffer.write(data)
sys.stdout.flush()
if self.kernel_image is not None:
for c in data:
magic_detect_buffer = magic_detect_buffer[1:] + bytes([c])
if magic_detect_buffer == sfl_magic_req:
await self.answer_magic()
break
if key_getter in done:
await self.port.write(key_getter.result())
key_getter = None
async def upload_only_coro(self):
magic_detect_buffer = b"\x00"*len(sfl_magic_req)
while True:
data = await self.port.read(1024)
sys.stdout.buffer.write(data)
sys.stdout.flush()
for c in data:
magic_detect_buffer = magic_detect_buffer[1:] + bytes([c])
if magic_detect_buffer == sfl_magic_req:
await self.answer_magic()
return
async def close(self):
if not (self.upload_only or self.output_only):
deinit_getkey()
self.main_task.cancel()
try:
await self.main_task
except asyncio.CancelledError:
pass
finally:
self.port.close()
def _get_args():
parser = argparse.ArgumentParser()
parser.add_argument("port", help="serial port")
parser.add_argument("--speed", default=115200, help="serial baudrate")
parser.add_argument("--kernel", default=None, help="kernel image")
parser.add_argument("--kernel-addr", type=lambda a: int(a, 0),
default=0x40000000, help="kernel address")
parser.add_argument("--upload-only", default=False, action="store_true",
help="only upload kernel")
parser.add_argument("--output-only", default=False, action="store_true",
help="do not receive keyboard input or require a pty")
return parser.parse_args()
def main():
if os.name == "nt":
loop = asyncio.ProactorEventLoop()
asyncio.set_event_loop(loop)
else:
loop = asyncio.get_event_loop()
args = _get_args()
flterm = Flterm(args.port, args.speed, args.kernel, args.kernel_addr,
args.upload_only, args.output_only)
try:
flterm.init()
loop.run_until_complete(flterm.main_task)
except KeyboardInterrupt:
pass
finally:
loop.run_until_complete(flterm.close())
loop.close()
if __name__ == "__main__":
main()
|
utils.py | #
# Copyright (c) 2016 Christoph Heiss <me@christoph-heiss.me>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import threading
import hashlib
from functools import partial
def create_thread(target, args=(), name=None):
thread = threading.Thread(target=target, args=args)
if name:
thread.name = name
thread.daemon = True
thread.start()
return thread
def read_file_seq(path, block_size):
with open(path, 'rb') as f:
for buf in iter(partial(f.read, block_size), b''):
yield buf
def hash_file(path):
h = hashlib.md5()
h.update(bytes(path, 'utf-8'))
for buf in read_file_seq(path, 4096):
h.update(buf)
return h.hexdigest()
|
runner.py | #!/usr/bin/env python3
# Copyright 2010 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""This is the Emscripten test runner. To run some tests, specify which tests
you want, for example
tests/runner asm1.test_hello_world
There are many options for which tests to run and how to run them. For details,
see
http://kripken.github.io/emscripten-site/docs/getting_started/test-suite.html
"""
# XXX Use EMTEST_ALL_ENGINES=1 in the env to test all engines!
from enum import Enum
from functools import wraps
from subprocess import PIPE, STDOUT
import argparse
import atexit
import contextlib
import difflib
import fnmatch
import glob
import hashlib
import json
import logging
import math
import multiprocessing
import operator
import os
import random
import shlex
import shutil
import string
import subprocess
import stat
import sys
import tempfile
import time
import unittest
import webbrowser
from http.server import HTTPServer, SimpleHTTPRequestHandler
from urllib.parse import unquote, unquote_plus
# Setup
__rootpath__ = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(__rootpath__)
import clang_native
import jsrun
import parallel_testsuite
from jsrun import NON_ZERO
from tools.shared import TEMP_DIR, EMCC, EMXX, DEBUG, EMCONFIGURE, EMCMAKE
from tools.shared import EMSCRIPTEN_TEMP_DIR
from tools.shared import EM_BUILD_VERBOSE
from tools.shared import get_canonical_temp_dir, try_delete
from tools.utils import MACOS, WINDOWS
from tools import shared, line_endings, building, config
def path_from_root(*pathelems):
"""Construct a path relative to the emscripten root directory."""
return os.path.join(__rootpath__, *pathelems)
sys.path.append(path_from_root('third_party/websockify'))
logger = logging.getLogger("runner")
# User can specify an environment variable EMTEST_BROWSER to force the browser
# test suite to run using another browser command line than the default system
# browser. Setting '0' as the browser disables running a browser (but we still
# see tests compile)
EMTEST_BROWSER = os.getenv('EMTEST_BROWSER')
EMTEST_DETECT_TEMPFILE_LEAKS = int(os.getenv('EMTEST_DETECT_TEMPFILE_LEAKS', '0'))
# TODO(sbc): Remove this check for the legacy name once its been around for a while.
assert 'EM_SAVE_DIR' not in os.environ, "Please use EMTEST_SAVE_DIR instead of EM_SAVE_DIR"
EMTEST_SAVE_DIR = int(os.getenv('EMTEST_SAVE_DIR', '0'))
# generally js engines are equivalent, testing 1 is enough. set this
# to force testing on all js engines, good to find js engine bugs
EMTEST_ALL_ENGINES = os.getenv('EMTEST_ALL_ENGINES')
EMTEST_SKIP_SLOW = os.getenv('EMTEST_SKIP_SLOW')
EMTEST_LACKS_NATIVE_CLANG = os.getenv('EMTEST_LACKS_NATIVE_CLANG')
EMTEST_VERBOSE = int(os.getenv('EMTEST_VERBOSE', '0')) or shared.DEBUG
TEST_ROOT = path_from_root('tests')
WEBIDL_BINDER = shared.bat_suffix(path_from_root('tools', 'webidl_binder'))
if EMTEST_VERBOSE:
logging.root.setLevel(logging.DEBUG)
def delete_contents(pathname):
for entry in os.listdir(pathname):
try_delete(os.path.join(pathname, entry))
def test_file(*path_components):
"""Construct a path relative to the emscripten "tests" directory."""
return os.path.join(TEST_ROOT, *path_components)
# checks if browser testing is enabled
def has_browser():
return EMTEST_BROWSER != '0'
# Generic decorator that calls a function named 'condition' on the test class and
# skips the test if that function returns true
def skip_if(func, condition, explanation='', negate=False):
assert callable(func)
explanation_str = ' : %s' % explanation if explanation else ''
@wraps(func)
def decorated(self, *args, **kwargs):
choice = self.__getattribute__(condition)()
if negate:
choice = not choice
if choice:
self.skipTest(condition + explanation_str)
func(self, *args, **kwargs)
return decorated
def needs_dylink(func):
assert callable(func)
@wraps(func)
def decorated(self):
self.check_dylink()
return func(self)
return decorated
def is_slow_test(func):
assert callable(func)
@wraps(func)
def decorated(self, *args, **kwargs):
if EMTEST_SKIP_SLOW:
return self.skipTest('skipping slow tests')
return func(self, *args, **kwargs)
return decorated
def disabled(note=''):
assert not callable(note)
return unittest.skip(note)
def no_mac(note=''):
assert not callable(note)
if MACOS:
return unittest.skip(note)
return lambda f: f
def no_windows(note=''):
assert not callable(note)
if WINDOWS:
return unittest.skip(note)
return lambda f: f
def requires_native_clang(func):
assert callable(func)
def decorated(self, *args, **kwargs):
if EMTEST_LACKS_NATIVE_CLANG:
return self.skipTest('native clang tests are disabled')
return func(self, *args, **kwargs)
return decorated
def node_pthreads(f):
def decorated(self):
self.set_setting('USE_PTHREADS')
self.emcc_args += ['-Wno-pthreads-mem-growth']
if self.get_setting('MINIMAL_RUNTIME'):
self.skipTest('node pthreads not yet supported with MINIMAL_RUNTIME')
self.js_engines = [config.NODE_JS]
self.node_args += ['--experimental-wasm-threads', '--experimental-wasm-bulk-memory']
f(self)
return decorated
@contextlib.contextmanager
def env_modify(updates):
"""A context manager that updates os.environ."""
# This could also be done with mock.patch.dict() but taking a dependency
# on the mock library is probably not worth the benefit.
old_env = os.environ.copy()
print("env_modify: " + str(updates))
# Seting a value to None means clear the environment variable
clears = [key for key, value in updates.items() if value is None]
updates = {key: value for key, value in updates.items() if value is not None}
os.environ.update(updates)
for key in clears:
if key in os.environ:
del os.environ[key]
try:
yield
finally:
os.environ.clear()
os.environ.update(old_env)
# Decorator version of env_modify
def with_env_modify(updates):
def decorated(f):
def modified(self):
with env_modify(updates):
return f(self)
return modified
return decorated
def ensure_dir(dirname):
if not os.path.isdir(dirname):
os.makedirs(dirname)
def limit_size(string, maxbytes=800000 * 20, maxlines=100000, max_line=5000):
lines = string.splitlines()
for i, line in enumerate(lines):
if len(line) > max_line:
lines[i] = line[:max_line] + '[..]'
if len(lines) > maxlines:
lines = lines[0:maxlines // 2] + ['[..]'] + lines[-maxlines // 2:]
string = '\n'.join(lines) + '\n'
if len(string) > maxbytes:
string = string[0:maxbytes // 2] + '\n[..]\n' + string[-maxbytes // 2:]
return string
def create_file(name, contents, binary=False):
assert not os.path.isabs(name)
mode = 'wb' if binary else 'w'
with open(name, mode) as f:
f.write(contents)
def make_executable(name):
os.chmod(name, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
# The core test modes
core_test_modes = [
'wasm0',
'wasm1',
'wasm2',
'wasm3',
'wasms',
'wasmz',
'strict',
'wasm2js0',
'wasm2js1',
'wasm2js2',
'wasm2js3',
'wasm2jss',
'wasm2jsz',
]
# The default core test mode, used when none is specified
default_core_test_mode = 'wasm0'
# The non-core test modes
non_core_test_modes = [
'other',
'browser',
'sanity',
'sockets',
'interactive',
'benchmark',
'asan',
'lsan',
'wasm2ss',
'posixtest',
'posixtest_browser',
]
def parameterized(parameters):
"""
Mark a test as parameterized.
Usage:
@parameterized({
'subtest1': (1, 2, 3),
'subtest2': (4, 5, 6),
})
def test_something(self, a, b, c):
... # actual test body
This is equivalent to defining two tests:
def test_something_subtest1(self):
# runs test_something(1, 2, 3)
def test_something_subtest2(self):
# runs test_something(4, 5, 6)
"""
def decorator(func):
func._parameterize = parameters
return func
return decorator
class RunnerMeta(type):
@classmethod
def make_test(mcs, name, func, suffix, args):
"""
This is a helper function to create new test functions for each parameterized form.
:param name: the original name of the function
:param func: the original function that we are parameterizing
:param suffix: the suffix to append to the name of the function for this parameterization
:param args: the positional arguments to pass to the original function for this parameterization
:returns: a tuple of (new_function_name, new_function_object)
"""
# Create the new test function. It calls the original function with the specified args.
# We use @functools.wraps to copy over all the function attributes.
@wraps(func)
def resulting_test(self):
return func(self, *args)
# Add suffix to the function name so that it displays correctly.
if suffix:
resulting_test.__name__ = f'{name}_{suffix}'
else:
resulting_test.__name__ = name
# On python 3, functions have __qualname__ as well. This is a full dot-separated path to the
# function. We add the suffix to it as well.
resulting_test.__qualname__ = f'{func.__qualname__}_{suffix}'
return resulting_test.__name__, resulting_test
def __new__(mcs, name, bases, attrs):
# This metaclass expands parameterized methods from `attrs` into separate ones in `new_attrs`.
new_attrs = {}
for attr_name, value in attrs.items():
# Check if a member of the new class has _parameterize, the tag inserted by @parameterized.
if hasattr(value, '_parameterize'):
# If it does, we extract the parameterization information, build new test functions.
for suffix, args in value._parameterize.items():
new_name, func = mcs.make_test(attr_name, value, suffix, args)
assert new_name not in new_attrs, 'Duplicate attribute name generated when parameterizing %s' % attr_name
new_attrs[new_name] = func
else:
# If not, we just copy it over to new_attrs verbatim.
assert attr_name not in new_attrs, '%s collided with an attribute from parameterization' % attr_name
new_attrs[attr_name] = value
# We invoke type, the default metaclass, to actually create the new class, with new_attrs.
return type.__new__(mcs, name, bases, new_attrs)
class RunnerCore(unittest.TestCase, metaclass=RunnerMeta):
# default temporary directory settings. set_temp_dir may be called later to
# override these
temp_dir = TEMP_DIR
canonical_temp_dir = get_canonical_temp_dir(TEMP_DIR)
# This avoids cluttering the test runner output, which is stderr too, with compiler warnings etc.
# Change this to None to get stderr reporting, for debugging purposes
stderr_redirect = STDOUT
def is_wasm(self):
return self.get_setting('WASM') != 0
def check_dylink(self):
if self.get_setting('ALLOW_MEMORY_GROWTH') == 1 and not self.is_wasm():
self.skipTest('no dynamic linking with memory growth (without wasm)')
if not self.is_wasm():
self.skipTest('no dynamic linking support in wasm2js yet')
if '-fsanitize=address' in self.emcc_args:
self.skipTest('no dynamic linking support in ASan yet')
if '-fsanitize=leak' in self.emcc_args:
self.skipTest('no dynamic linking support in LSan yet')
def uses_memory_init_file(self):
if self.get_setting('SIDE_MODULE') or (self.is_wasm() and not self.get_setting('WASM2JS')):
return False
elif '--memory-init-file' in self.emcc_args:
return int(self.emcc_args[self.emcc_args.index('--memory-init-file') + 1])
else:
# side modules handle memory differently; binaryen puts the memory in the wasm module
opt_supports = any(opt in self.emcc_args for opt in ('-O2', '-O3', '-Os', '-Oz'))
return opt_supports
def set_temp_dir(self, temp_dir):
self.temp_dir = temp_dir
self.canonical_temp_dir = get_canonical_temp_dir(self.temp_dir)
# Explicitly set dedicated temporary directory for parallel tests
os.environ['EMCC_TEMP_DIR'] = self.temp_dir
@classmethod
def setUpClass(cls):
super().setUpClass()
print('(checking sanity from test runner)') # do this after we set env stuff
shared.check_sanity(force=True)
def setUp(self):
super().setUp()
self.settings_mods = {}
self.emcc_args = ['-Werror']
self.node_args = []
self.v8_args = []
self.env = {}
self.temp_files_before_run = []
self.uses_es6 = False
self.js_engines = config.JS_ENGINES.copy()
self.wasm_engines = config.WASM_ENGINES.copy()
self.banned_js_engines = []
self.use_all_engines = EMTEST_ALL_ENGINES
if EMTEST_DETECT_TEMPFILE_LEAKS:
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, filename)))
if EMTEST_SAVE_DIR:
self.working_dir = os.path.join(self.temp_dir, 'emscripten_test')
if os.path.exists(self.working_dir):
if EMTEST_SAVE_DIR == 2:
print('Not clearing existing test directory')
else:
print('Clearing existing test directory')
# Even when EMTEST_SAVE_DIR we still try to start with an empty directoy as many tests
# expect this. EMTEST_SAVE_DIR=2 can be used to keep the old contents for the new test
# run. This can be useful when iterating on a given test with extra files you want to keep
# around in the output directory.
delete_contents(self.working_dir)
else:
print('Creating new test output directory')
ensure_dir(self.working_dir)
else:
self.working_dir = tempfile.mkdtemp(prefix='emscripten_test_' + self.__class__.__name__ + '_', dir=self.temp_dir)
os.chdir(self.working_dir)
if not EMTEST_SAVE_DIR:
self.has_prev_ll = False
for temp_file in os.listdir(TEMP_DIR):
if temp_file.endswith('.ll'):
self.has_prev_ll = True
def tearDown(self):
if not EMTEST_SAVE_DIR:
# rmtree() fails on Windows if the current working directory is inside the tree.
os.chdir(os.path.dirname(self.get_dir()))
try_delete(self.get_dir())
if EMTEST_DETECT_TEMPFILE_LEAKS and not DEBUG:
temp_files_after_run = []
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, filename)))
# Our leak detection will pick up *any* new temp files in the temp dir.
# They may not be due to us, but e.g. the browser when running browser
# tests. Until we figure out a proper solution, ignore some temp file
# names that we see on our CI infrastructure.
ignorable_file_prefixes = [
'/tmp/tmpaddon',
'/tmp/circleci-no-output-timeout',
'/tmp/wasmer'
]
left_over_files = set(temp_files_after_run) - set(self.temp_files_before_run)
left_over_files = [f for f in left_over_files if not any([f.startswith(prefix) for prefix in ignorable_file_prefixes])]
if len(left_over_files):
print('ERROR: After running test, there are ' + str(len(left_over_files)) + ' new temporary files/directories left behind:', file=sys.stderr)
for f in left_over_files:
print('leaked file: ' + f, file=sys.stderr)
self.fail('Test leaked ' + str(len(left_over_files)) + ' temporary files!')
def get_setting(self, key, default=None):
return self.settings_mods.get(key, default)
def set_setting(self, key, value=1):
if value is None:
self.clear_setting(key)
self.settings_mods[key] = value
def has_changed_setting(self, key):
return key in self.settings_mods
def clear_setting(self, key):
self.settings_mods.pop(key, None)
def serialize_settings(self):
ret = []
for key, value in self.settings_mods.items():
if value == 1:
ret += ['-s', key]
elif type(value) == str:
ret += ['-s', f'{key}={value}']
else:
ret += ['-s', f'{key}={json.dumps(value)}']
return ret
def get_dir(self):
return self.working_dir
def in_dir(self, *pathelems):
return os.path.join(self.get_dir(), *pathelems)
def add_pre_run(self, code):
create_file('prerun.js', 'Module.preRun = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'prerun.js']
def add_post_run(self, code):
create_file('postrun.js', 'Module.postRun = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'postrun.js']
def add_on_exit(self, code):
create_file('onexit.js', 'Module.onExit = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'onexit.js']
# returns the full list of arguments to pass to emcc
# param @main_file whether this is the main file of the test. some arguments
# (like --pre-js) do not need to be passed when building
# libraries, for example
def get_emcc_args(self, main_file=False):
args = self.serialize_settings() + self.emcc_args
if not main_file:
for i, arg in enumerate(args):
if arg in ('--pre-js', '--post-js'):
args[i] = None
args[i + 1] = None
args = [arg for arg in args if arg is not None]
return args
def verify_es5(self, filename):
es_check = shared.get_npm_cmd('es-check')
# use --quiet once its available
# See: https://github.com/dollarshaveclub/es-check/pull/126/
es_check_env = os.environ.copy()
es_check_env['PATH'] = os.path.dirname(config.NODE_JS[0]) + os.pathsep + es_check_env['PATH']
try:
shared.run_process(es_check + ['es5', os.path.abspath(filename)], stderr=PIPE, env=es_check_env)
except subprocess.CalledProcessError as e:
print(e.stderr)
self.fail('es-check failed to verify ES5 output compliance')
# Build JavaScript code from source code
def build(self, filename, libraries=[], includes=[], force_c=False,
post_build=None, js_outfile=True):
suffix = '.js' if js_outfile else '.wasm'
if shared.suffix(filename) in ('.cc', '.cxx', '.cpp') and not force_c:
compiler = [EMXX]
else:
# TODO(https://github.com/emscripten-core/emscripten/issues/11121)
# We link with C++ stdlibs, even when linking with emcc for historical reasons. We can remove
# this if this issues is fixed.
compiler = [EMCC, '-nostdlib++']
if force_c:
compiler.append('-xc')
dirname, basename = os.path.split(filename)
output = shared.unsuffixed(basename) + suffix
cmd = compiler + [filename, '-o', output] + self.get_emcc_args(main_file=True) + libraries
if shared.suffix(filename) not in ('.i', '.ii'):
# Add the location of the test file to include path.
cmd += ['-I.']
cmd += ['-I' + include for include in includes]
self.run_process(cmd, stderr=self.stderr_redirect if not DEBUG else None)
self.assertExists(output)
if js_outfile and not self.uses_es6:
self.verify_es5(output)
if post_build:
post_build(output)
if js_outfile and self.uses_memory_init_file():
src = open(output).read()
# side memory init file, or an empty one in the js
assert ('/* memory initializer */' not in src) or ('/* memory initializer */ allocate([]' in src)
return output
def get_func(self, src, name):
start = src.index('function ' + name + '(')
t = start
n = 0
while True:
if src[t] == '{':
n += 1
elif src[t] == '}':
n -= 1
if n == 0:
return src[start:t + 1]
t += 1
assert t < len(src)
def count_funcs(self, javascript_file):
num_funcs = 0
start_tok = "// EMSCRIPTEN_START_FUNCS"
end_tok = "// EMSCRIPTEN_END_FUNCS"
start_off = 0
end_off = 0
with open(javascript_file, 'rt') as f:
blob = "".join(f.readlines())
start_off = blob.find(start_tok) + len(start_tok)
end_off = blob.find(end_tok)
asm_chunk = blob[start_off:end_off]
num_funcs = asm_chunk.count('function ')
return num_funcs
def count_wasm_contents(self, wasm_binary, what):
out = self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-opt'), wasm_binary, '--metrics'], stdout=PIPE).stdout
# output is something like
# [?] : 125
for line in out.splitlines():
if '[' + what + ']' in line:
ret = line.split(':')[1].strip()
return int(ret)
self.fail('Failed to find [%s] in wasm-opt output' % what)
def get_wasm_text(self, wasm_binary):
return self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-dis'), wasm_binary], stdout=PIPE).stdout
def is_exported_in_wasm(self, name, wasm):
wat = self.get_wasm_text(wasm)
return ('(export "%s"' % name) in wat
def run_js(self, filename, engine=None, args=[], output_nicerizer=None, assert_returncode=0):
# use files, as PIPE can get too full and hang us
stdout = self.in_dir('stdout')
stderr = self.in_dir('stderr')
error = None
if not engine:
engine = config.JS_ENGINES[0]
if engine == config.NODE_JS:
engine = engine + self.node_args
if engine == config.V8_ENGINE:
engine = engine + self.v8_args
if EMTEST_VERBOSE:
print(f"Running '{filename}' under '{shared.shlex_join(engine)}'")
try:
jsrun.run_js(filename, engine, args,
stdout=open(stdout, 'w'),
stderr=open(stderr, 'w'),
assert_returncode=assert_returncode)
except subprocess.CalledProcessError as e:
error = e
# Make sure that we produced proper line endings to the .js file we are about to run.
if not filename.endswith('.wasm'):
self.assertEqual(line_endings.check_line_endings(filename), 0)
out = open(stdout, 'r').read()
err = open(stderr, 'r').read()
if output_nicerizer:
ret = output_nicerizer(out, err)
else:
ret = out + err
if error or EMTEST_VERBOSE:
ret = limit_size(ret)
print('-- begin program output --')
print(ret, end='')
print('-- end program output --')
if error:
if assert_returncode == NON_ZERO:
self.fail('JS subprocess unexpectedly succeeded (%s): Output:\n%s' % (error.cmd, ret))
else:
self.fail('JS subprocess failed (%s): %s. Output:\n%s' % (error.cmd, error.returncode, ret))
# We should pass all strict mode checks
self.assertNotContained('strict warning:', ret)
return ret
def assertExists(self, filename, msg=None):
if not msg:
msg = 'Expected file not found: ' + filename
self.assertTrue(os.path.exists(filename), msg)
def assertNotExists(self, filename, msg=None):
if not msg:
msg = 'Unexpected file exists: ' + filename
self.assertFalse(os.path.exists(filename), msg)
# Tests that the given two paths are identical, modulo path delimiters. E.g. "C:/foo" is equal to "C:\foo".
def assertPathsIdentical(self, path1, path2):
path1 = path1.replace('\\', '/')
path2 = path2.replace('\\', '/')
return self.assertIdentical(path1, path2)
# Tests that the given two multiline text content are identical, modulo line
# ending differences (\r\n on Windows, \n on Unix).
def assertTextDataIdentical(self, text1, text2, msg=None,
fromfile='expected', tofile='actual'):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertIdentical(text1, text2, msg, fromfile, tofile)
def assertIdentical(self, values, y, msg=None,
fromfile='expected', tofile='actual'):
if type(values) not in (list, tuple):
values = [values]
for x in values:
if x == y:
return # success
diff_lines = difflib.unified_diff(x.splitlines(), y.splitlines(),
fromfile=fromfile, tofile=tofile)
diff = ''.join([a.rstrip() + '\n' for a in diff_lines])
if EMTEST_VERBOSE:
print("Expected to have '%s' == '%s'" % (limit_size(values[0]), limit_size(y)))
fail_message = 'Unexpected difference:\n' + limit_size(diff)
if not EMTEST_VERBOSE:
fail_message += '\nFor full output run with EMTEST_VERBOSE=1.'
if msg:
fail_message += '\n' + msg
self.fail(fail_message)
def assertTextDataContained(self, text1, text2):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertContained(text1, text2)
def assertContained(self, values, string, additional_info=''):
if type(values) not in [list, tuple]:
values = [values]
if callable(string):
string = string()
if not any(v in string for v in values):
diff = difflib.unified_diff(values[0].split('\n'), string.split('\n'), fromfile='expected', tofile='actual')
diff = ''.join(a.rstrip() + '\n' for a in diff)
self.fail("Expected to find '%s' in '%s', diff:\n\n%s\n%s" % (
limit_size(values[0]), limit_size(string), limit_size(diff),
additional_info
))
def assertNotContained(self, value, string):
if callable(value):
value = value() # lazy loading
if callable(string):
string = string()
if value in string:
self.fail("Expected to NOT find '%s' in '%s', diff:\n\n%s" % (
limit_size(value), limit_size(string),
limit_size(''.join([a.rstrip() + '\n' for a in difflib.unified_diff(value.split('\n'), string.split('\n'), fromfile='expected', tofile='actual')]))
))
def assertContainedIf(self, value, string, condition):
if condition:
self.assertContained(value, string)
else:
self.assertNotContained(value, string)
def assertBinaryEqual(self, file1, file2):
self.assertEqual(os.path.getsize(file1),
os.path.getsize(file2))
self.assertEqual(open(file1, 'rb').read(),
open(file2, 'rb').read())
library_cache = {}
def get_build_dir(self):
ret = os.path.join(self.get_dir(), 'building')
ensure_dir(ret)
return ret
def get_library(self, name, generated_libs, configure=['sh', './configure'],
configure_args=[], make=['make'], make_args=None,
env_init={}, cache_name_extra='', native=False):
if make_args is None:
make_args = ['-j', str(shared.get_num_cores())]
build_dir = self.get_build_dir()
output_dir = self.get_dir()
emcc_args = self.get_emcc_args()
hash_input = (str(emcc_args) + ' $ ' + str(env_init)).encode('utf-8')
cache_name = name + ','.join([opt for opt in emcc_args if len(opt) < 7]) + '_' + hashlib.md5(hash_input).hexdigest() + cache_name_extra
valid_chars = "_%s%s" % (string.ascii_letters, string.digits)
cache_name = ''.join([(c if c in valid_chars else '_') for c in cache_name])
if self.library_cache.get(cache_name):
print('<load %s from cache> ' % cache_name, file=sys.stderr)
generated_libs = []
for basename, contents in self.library_cache[cache_name]:
bc_file = os.path.join(build_dir, cache_name + '_' + basename)
with open(bc_file, 'wb') as f:
f.write(contents)
generated_libs.append(bc_file)
return generated_libs
print(f'<building and saving {cache_name} into cache>', file=sys.stderr)
if configure is not None:
# Avoid += so we don't mutate the default arg
configure = configure + configure_args
return build_library(name, build_dir, output_dir, generated_libs, configure,
make, make_args, self.library_cache,
cache_name, env_init=env_init, native=native, cflags=self.get_emcc_args())
def clear(self):
delete_contents(self.get_dir())
if EMSCRIPTEN_TEMP_DIR:
delete_contents(EMSCRIPTEN_TEMP_DIR)
def run_process(self, cmd, check=True, **args):
# Wrapper around shared.run_process. This is desirable so that the tests
# can fail (in the unittest sense) rather than error'ing.
# In the long run it would nice to completely remove the dependency on
# core emscripten code (shared.py) here.
try:
return shared.run_process(cmd, check=check, **args)
except subprocess.CalledProcessError as e:
if check and e.returncode != 0:
self.fail('subprocess exited with non-zero return code(%d): `%s`' %
(e.returncode, shared.shlex_join(cmd)))
def emcc(self, filename, args=[], output_filename=None, **kwargs):
if output_filename is None:
output_filename = filename + '.o'
try_delete(output_filename)
self.run_process([EMCC, filename] + args + ['-o', output_filename], **kwargs)
# Shared test code between main suite and others
def expect_fail(self, cmd, **args):
"""Run a subprocess and assert that it returns non-zero.
Return the stderr of the subprocess.
"""
proc = self.run_process(cmd, check=False, stderr=PIPE, **args)
self.assertNotEqual(proc.returncode, 0, 'subprocess unexpectedly succeeded. stderr:\n' + proc.stderr)
# When we check for failure we expect a user-visible error, not a traceback.
# However, on windows a python traceback can happen randomly sometimes,
# due to "Access is denied" https://github.com/emscripten-core/emscripten/issues/718
if not WINDOWS or 'Access is denied' not in proc.stderr:
self.assertNotContained('Traceback', proc.stderr)
return proc.stderr
# excercise dynamic linker.
#
# test that linking to shared library B, which is linked to A, loads A as well.
# main is also linked to C, which is also linked to A. A is loaded/initialized only once.
#
# B
# main < > A
# C
#
# this test is used by both test_core and test_browser.
# when run under broswer it excercises how dynamic linker handles concurrency
# - because B and C are loaded in parallel.
def _test_dylink_dso_needed(self, do_run):
create_file('liba.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
static const char *afunc_prev;
extern "C" {
EMSCRIPTEN_KEEPALIVE void afunc(const char *s);
}
void afunc(const char *s) {
printf("a: %s (prev: %s)\n", s, afunc_prev);
afunc_prev = s;
}
struct ainit {
ainit() {
puts("a: loaded");
}
};
static ainit _;
''')
create_file('libb.cpp', r'''
#include <emscripten.h>
extern "C" {
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void bfunc();
}
void bfunc() {
afunc("b");
}
''')
create_file('libc.cpp', r'''
#include <emscripten.h>
extern "C" {
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void cfunc();
}
void cfunc() {
afunc("c");
}
''')
# _test_dylink_dso_needed can be potentially called several times by a test.
# reset dylink-related options first.
self.clear_setting('MAIN_MODULE')
self.clear_setting('SIDE_MODULE')
# XXX in wasm each lib load currently takes 5MB; default INITIAL_MEMORY=16MB is thus not enough
self.set_setting('INITIAL_MEMORY', '32mb')
so = '.wasm' if self.is_wasm() else '.js'
def ccshared(src, linkto=[]):
cmdv = [EMCC, src, '-o', shared.unsuffixed(src) + so, '-s', 'SIDE_MODULE'] + self.get_emcc_args()
cmdv += linkto
self.run_process(cmdv)
ccshared('liba.cpp')
ccshared('libb.cpp', ['liba' + so])
ccshared('libc.cpp', ['liba' + so])
self.set_setting('MAIN_MODULE')
original_args = self.emcc_args.copy()
extra_args = ['libb' + so, 'libc' + so]
self.emcc_args += extra_args
do_run(r'''
#ifdef __cplusplus
extern "C" {
#endif
void bfunc();
void cfunc();
#ifdef __cplusplus
}
#endif
int test_main() {
bfunc();
cfunc();
return 0;
}
''',
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n')
self.emcc_args = original_args
for libname in ['liba', 'libb', 'libc']:
self.emcc_args += ['--embed-file', libname + so]
do_run(r'''
#include <assert.h>
#include <dlfcn.h>
#include <stddef.h>
int test_main() {
void *bdso, *cdso;
void (*bfunc)(), (*cfunc)();
// FIXME for RTLD_LOCAL binding symbols to loaded lib is not currently working
bdso = dlopen("libb%(so)s", RTLD_NOW|RTLD_GLOBAL);
assert(bdso != NULL);
cdso = dlopen("libc%(so)s", RTLD_NOW|RTLD_GLOBAL);
assert(cdso != NULL);
bfunc = (void (*)())dlsym(bdso, "bfunc");
assert(bfunc != NULL);
cfunc = (void (*)())dlsym(cdso, "cfunc");
assert(cfunc != NULL);
bfunc();
cfunc();
return 0;
}
''' % locals(),
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n')
def filtered_js_engines(self, js_engines=None):
if js_engines is None:
js_engines = self.js_engines
for engine in js_engines:
assert engine in config.JS_ENGINES, "js engine does not exist in config.JS_ENGINES"
assert type(engine) == list
for engine in self.banned_js_engines:
assert type(engine) in (list, type(None))
banned = [b[0] for b in self.banned_js_engines if b]
return [engine for engine in js_engines if engine and engine[0] not in banned]
def do_run(self, src, expected_output, force_c=False, **kwargs):
if 'no_build' in kwargs:
filename = src
else:
if force_c:
filename = 'src.c'
else:
filename = 'src.cpp'
with open(filename, 'w') as f:
f.write(src)
self._build_and_run(filename, expected_output, **kwargs)
def do_runf(self, filename, expected_output=None, **kwargs):
self._build_and_run(filename, expected_output, **kwargs)
## Just like `do_run` but with filename of expected output
def do_run_from_file(self, filename, expected_output_filename, **kwargs):
self._build_and_run(filename, open(expected_output_filename).read(), **kwargs)
def do_run_in_out_file_test(self, *path, **kwargs):
srcfile = test_file(*path)
outfile = shared.unsuffixed(srcfile) + '.out'
expected = open(outfile).read()
self._build_and_run(srcfile, expected, **kwargs)
## Does a complete test - builds, runs, checks output, etc.
def _build_and_run(self, filename, expected_output, args=[], output_nicerizer=None,
no_build=False,
js_engines=None, post_build=None, libraries=[],
includes=[],
assert_returncode=0, assert_identical=False, assert_all=False,
check_for_error=True, force_c=False):
logger.debug(f'_build_and_run: {filename}')
if no_build:
js_file = filename
else:
self.build(filename, libraries=libraries, includes=includes, post_build=post_build,
force_c=force_c)
js_file = shared.unsuffixed(os.path.basename(filename)) + '.js'
self.assertExists(js_file)
engines = self.filtered_js_engines(js_engines)
if len(engines) > 1 and not self.use_all_engines:
engines = engines[:1]
# In standalone mode, also add wasm vms as we should be able to run there too.
if self.get_setting('STANDALONE_WASM'):
# TODO once standalone wasm support is more stable, apply use_all_engines
# like with js engines, but for now as we bring it up, test in all of them
if not self.wasm_engines:
logger.warning('no wasm engine was found to run the standalone part of this test')
engines += self.wasm_engines
if self.get_setting('WASM2C') and not EMTEST_LACKS_NATIVE_CLANG:
# compile the c file to a native executable.
c = shared.unsuffixed(js_file) + '.wasm.c'
executable = shared.unsuffixed(js_file) + '.exe'
cmd = [shared.CLANG_CC, c, '-o', executable] + clang_native.get_clang_native_args()
self.run_process(cmd, env=clang_native.get_clang_native_env())
# we can now run the executable directly, without an engine, which
# we indicate with None as the engine
engines += [[None]]
if len(engines) == 0:
self.skipTest('No JS engine present to run this test with. Check %s and the paths therein.' % config.EM_CONFIG)
for engine in engines:
js_output = self.run_js(js_file, engine, args, output_nicerizer=output_nicerizer, assert_returncode=assert_returncode)
js_output = js_output.replace('\r\n', '\n')
if expected_output:
try:
if assert_identical:
self.assertIdentical(expected_output, js_output)
elif assert_all:
for o in expected_output:
self.assertContained(o, js_output)
else:
self.assertContained(expected_output, js_output)
if check_for_error:
self.assertNotContained('ERROR', js_output)
except Exception:
print('(test did not pass in JS engine: %s)' % engine)
raise
def get_freetype_library(self):
if '-Werror' in self.emcc_args:
self.emcc_args.remove('-Werror')
return self.get_library(os.path.join('third_party', 'freetype'), os.path.join('objs', '.libs', 'libfreetype.a'), configure_args=['--disable-shared', '--without-zlib'])
def get_poppler_library(self, env_init=None):
# The fontconfig symbols are all missing from the poppler build
# e.g. FcConfigSubstitute
self.set_setting('ERROR_ON_UNDEFINED_SYMBOLS', 0)
self.emcc_args += [
'-I' + test_file('third_party', 'freetype', 'include'),
'-I' + test_file('third_party', 'poppler', 'include')
]
freetype = self.get_freetype_library()
# Poppler has some pretty glaring warning. Suppress them to keep the
# test output readable.
if '-Werror' in self.emcc_args:
self.emcc_args.remove('-Werror')
self.emcc_args += [
'-Wno-sentinel',
'-Wno-logical-not-parentheses',
'-Wno-unused-private-field',
'-Wno-tautological-compare',
'-Wno-unknown-pragmas',
]
env_init = env_init.copy() if env_init else {}
env_init['FONTCONFIG_CFLAGS'] = ' '
env_init['FONTCONFIG_LIBS'] = ' '
poppler = self.get_library(
os.path.join('third_party', 'poppler'),
[os.path.join('utils', 'pdftoppm.o'), os.path.join('utils', 'parseargs.o'), os.path.join('poppler', '.libs', 'libpoppler.a')],
env_init=env_init,
configure_args=['--disable-libjpeg', '--disable-libpng', '--disable-poppler-qt', '--disable-poppler-qt4', '--disable-cms', '--disable-cairo-output', '--disable-abiword-output', '--disable-shared'])
return poppler + freetype
def get_zlib_library(self):
if WINDOWS:
return self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'),
configure=['cmake', '.'],
make=['cmake', '--build', '.'],
make_args=[])
return self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'), make_args=['libz.a'])
# Run a server and a web page. When a test runs, we tell the server about it,
# which tells the web page, which then opens a window with the test. Doing
# it this way then allows the page to close() itself when done.
def harness_server_func(in_queue, out_queue, port):
class TestServerHandler(SimpleHTTPRequestHandler):
# Request header handler for default do_GET() path in
# SimpleHTTPRequestHandler.do_GET(self) below.
def send_head(self):
if self.path.endswith('.js'):
path = self.translate_path(self.path)
try:
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found: " + path)
return None
self.send_response(200)
self.send_header('Content-type', 'application/javascript')
self.send_header('Connection', 'close')
self.end_headers()
return f
else:
return SimpleHTTPRequestHandler.send_head(self)
# Add COOP, COEP, CORP, and no-caching headers
def end_headers(self):
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Cross-Origin-Opener-Policy', 'same-origin')
self.send_header('Cross-Origin-Embedder-Policy', 'require-corp')
self.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
self.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
return SimpleHTTPRequestHandler.end_headers(self)
def do_GET(self):
if self.path == '/run_harness':
if DEBUG:
print('[server startup]')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(open(test_file('browser_harness.html'), 'rb').read())
elif 'report_' in self.path:
# the test is reporting its result. first change dir away from the
# test dir, as it will be deleted now that the test is finishing, and
# if we got a ping at that time, we'd return an error
os.chdir(path_from_root())
# for debugging, tests may encode the result and their own url (window.location) as result|url
if '|' in self.path:
path, url = self.path.split('|', 1)
else:
path = self.path
url = '?'
if DEBUG:
print('[server response:', path, url, ']')
if out_queue.empty():
out_queue.put(path)
else:
# a badly-behaving test may send multiple xhrs with reported results; we just care
# about the first (if we queued the others, they might be read as responses for
# later tests, or maybe the test sends more than one in a racy manner).
# we place 'None' in the queue here so that the outside knows something went wrong
# (none is not a valid value otherwise; and we need the outside to know because if we
# raise an error in here, it is just swallowed in python's webserver code - we want
# the test to actually fail, which a webserver response can't do).
out_queue.put(None)
raise Exception('browser harness error, excessive response to server - test must be fixed! "%s"' % self.path)
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.send_header('Cache-Control', 'no-cache, must-revalidate')
self.send_header('Connection', 'close')
self.send_header('Expires', '-1')
self.end_headers()
self.wfile.write(b'OK')
elif 'stdout=' in self.path or 'stderr=' in self.path or 'exception=' in self.path:
'''
To get logging to the console from browser tests, add this to
print/printErr/the exception handler in src/shell.html:
var xhr = new XMLHttpRequest();
xhr.open('GET', encodeURI('http://localhost:8888?stdout=' + text));
xhr.send();
'''
print('[client logging:', unquote_plus(self.path), ']')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
elif self.path == '/check':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
if not in_queue.empty():
# there is a new test ready to be served
url, dir = in_queue.get()
if DEBUG:
print('[queue command:', url, dir, ']')
assert in_queue.empty(), 'should not be any blockage - one test runs at a time'
assert out_queue.empty(), 'the single response from the last test was read'
# tell the browser to load the test
self.wfile.write(b'COMMAND:' + url.encode('utf-8'))
# move us to the right place to serve the files for the new test
os.chdir(dir)
else:
# the browser must keep polling
self.wfile.write(b'(wait)')
else:
# Use SimpleHTTPServer default file serving operation for GET.
if DEBUG:
print('[simple HTTP serving:', unquote_plus(self.path), ']')
SimpleHTTPRequestHandler.do_GET(self)
def log_request(code=0, size=0):
# don't log; too noisy
pass
# allows streaming compilation to work
SimpleHTTPRequestHandler.extensions_map['.wasm'] = 'application/wasm'
httpd = HTTPServer(('localhost', port), TestServerHandler)
httpd.serve_forever() # test runner will kill us
class Reporting(Enum):
"""When running browser tests we normally automatically include support
code for reporting results back to the browser. This enum allows tests
to decide what type of support code they need/want.
"""
NONE = 0
# Include the JS helpers for reporting results
JS_ONLY = 1
# Include C/C++ reporting code (REPORT_RESULT mactros) as well as JS helpers
FULL = 2
class BrowserCore(RunnerCore):
# note how many tests hang / do not send an output. if many of these
# happen, likely something is broken and it is best to abort the test
# suite early, as otherwise we will wait for the timeout on every
# single test (hundreds of minutes)
MAX_UNRESPONSIVE_TESTS = 10
unresponsive_tests = 0
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@staticmethod
def browser_open(url):
if not EMTEST_BROWSER:
logger.info('Using default system browser')
webbrowser.open_new(url)
return
browser_args = shlex.split(EMTEST_BROWSER)
# If the given browser is a scalar, treat it like one of the possible types
# from https://docs.python.org/2/library/webbrowser.html
if len(browser_args) == 1:
try:
# This throws if the type of browser isn't available
webbrowser.get(browser_args[0]).open_new(url)
logger.info('Using Emscripten browser: %s', browser_args[0])
return
except webbrowser.Error:
# Ignore the exception and fallback to the custom command logic
pass
# Else assume the given browser is a specific program with additional
# parameters and delegate to that
logger.info('Using Emscripten browser: %s', str(browser_args))
subprocess.Popen(browser_args + [url])
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.also_asmjs = int(os.getenv('EMTEST_BROWSER_ALSO_ASMJS', '0')) == 1
cls.port = int(os.getenv('EMTEST_BROWSER_PORT', '8888'))
if not has_browser():
return
cls.browser_timeout = 60
cls.harness_in_queue = multiprocessing.Queue()
cls.harness_out_queue = multiprocessing.Queue()
cls.harness_server = multiprocessing.Process(target=harness_server_func, args=(cls.harness_in_queue, cls.harness_out_queue, cls.port))
cls.harness_server.start()
print('[Browser harness server on process %d]' % cls.harness_server.pid)
cls.browser_open('http://localhost:%s/run_harness' % cls.port)
@classmethod
def tearDownClass(cls):
super().tearDownClass()
if not has_browser():
return
cls.harness_server.terminate()
print('[Browser harness server terminated]')
if WINDOWS:
# On Windows, shutil.rmtree() in tearDown() raises this exception if we do not wait a bit:
# WindowsError: [Error 32] The process cannot access the file because it is being used by another process.
time.sleep(0.1)
def assert_out_queue_empty(self, who):
if not self.harness_out_queue.empty():
while not self.harness_out_queue.empty():
self.harness_out_queue.get()
raise Exception('excessive responses from %s' % who)
# @param extra_tries: how many more times to try this test, if it fails. browser tests have
# many more causes of flakiness (in particular, they do not run
# synchronously, so we have a timeout, which can be hit if the VM
# we run on stalls temporarily), so we let each test try more than
# once by default
def run_browser(self, html_file, message, expectedResult=None, timeout=None, extra_tries=1):
if not has_browser():
return
if BrowserCore.unresponsive_tests >= BrowserCore.MAX_UNRESPONSIVE_TESTS:
self.skipTest('too many unresponsive tests, skipping browser launch - check your setup!')
self.assert_out_queue_empty('previous test')
if DEBUG:
print('[browser launch:', html_file, ']')
if expectedResult is not None:
try:
self.harness_in_queue.put((
'http://localhost:%s/%s' % (self.port, html_file),
self.get_dir()
))
received_output = False
output = '[no http server activity]'
start = time.time()
if timeout is None:
timeout = self.browser_timeout
while time.time() - start < timeout:
if not self.harness_out_queue.empty():
output = self.harness_out_queue.get()
received_output = True
break
time.sleep(0.1)
if not received_output:
BrowserCore.unresponsive_tests += 1
print('[unresponsive tests: %d]' % BrowserCore.unresponsive_tests)
if output is None:
# the browser harness reported an error already, and sent a None to tell
# us to also fail the test
raise Exception('failing test due to browser harness error')
if output.startswith('/report_result?skipped:'):
self.skipTest(unquote(output[len('/report_result?skipped:'):]).strip())
else:
# verify the result, and try again if we should do so
output = unquote(output)
try:
self.assertContained(expectedResult, output)
except Exception as e:
if extra_tries > 0:
print('[test error (see below), automatically retrying]')
print(e)
return self.run_browser(html_file, message, expectedResult, timeout, extra_tries - 1)
else:
raise e
finally:
time.sleep(0.1) # see comment about Windows above
self.assert_out_queue_empty('this test')
else:
webbrowser.open_new(os.path.abspath(html_file))
print('A web browser window should have opened a page containing the results of a part of this test.')
print('You need to manually look at the page to see that it works ok: ' + message)
print('(sleeping for a bit to keep the directory alive for the web browser..)')
time.sleep(5)
print('(moving on..)')
# @manually_trigger If set, we do not assume we should run the reftest when main() is done.
# Instead, call doReftest() in JS yourself at the right time.
def reftest(self, expected, manually_trigger=False):
# make sure the pngs used here have no color correction, using e.g.
# pngcrush -rem gAMA -rem cHRM -rem iCCP -rem sRGB infile outfile
basename = os.path.basename(expected)
shutil.copyfile(expected, os.path.join(self.get_dir(), basename))
with open('reftest.js', 'w') as out:
with open(test_file('browser_reporting.js')) as reporting:
out.write('''
function doReftest() {
if (doReftest.done) return;
doReftest.done = true;
var img = new Image();
img.onload = function() {
assert(img.width == Module.canvas.width, 'Invalid width: ' + Module.canvas.width + ', should be ' + img.width);
assert(img.height == Module.canvas.height, 'Invalid height: ' + Module.canvas.height + ', should be ' + img.height);
var canvas = document.createElement('canvas');
canvas.width = img.width;
canvas.height = img.height;
var ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0);
var expected = ctx.getImageData(0, 0, img.width, img.height).data;
var actualUrl = Module.canvas.toDataURL();
var actualImage = new Image();
actualImage.onload = function() {
/*
document.body.appendChild(img); // for comparisons
var div = document.createElement('div');
div.innerHTML = '^=expected, v=actual';
document.body.appendChild(div);
document.body.appendChild(actualImage); // to grab it for creating the test reference
*/
var actualCanvas = document.createElement('canvas');
actualCanvas.width = actualImage.width;
actualCanvas.height = actualImage.height;
var actualCtx = actualCanvas.getContext('2d');
actualCtx.drawImage(actualImage, 0, 0);
var actual = actualCtx.getImageData(0, 0, actualImage.width, actualImage.height).data;
var total = 0;
var width = img.width;
var height = img.height;
for (var x = 0; x < width; x++) {
for (var y = 0; y < height; y++) {
total += Math.abs(expected[y*width*4 + x*4 + 0] - actual[y*width*4 + x*4 + 0]);
total += Math.abs(expected[y*width*4 + x*4 + 1] - actual[y*width*4 + x*4 + 1]);
total += Math.abs(expected[y*width*4 + x*4 + 2] - actual[y*width*4 + x*4 + 2]);
}
}
var wrong = Math.floor(total / (img.width*img.height*3)); // floor, to allow some margin of error for antialiasing
// If the main JS file is in a worker, or modularize, then we need to supply our own reporting logic.
if (typeof reportResultToServer === 'undefined') {
(function() {
%s
reportResultToServer(wrong);
})();
} else {
reportResultToServer(wrong);
}
};
actualImage.src = actualUrl;
}
img.src = '%s';
};
// Automatically trigger the reftest?
if (!%s) {
// Yes, automatically
Module['postRun'] = doReftest;
if (typeof WebGLClient !== 'undefined') {
// trigger reftest from RAF as well, needed for workers where there is no pre|postRun on the main thread
var realRAF = window.requestAnimationFrame;
window.requestAnimationFrame = /** @suppress{checkTypes} */ (function(func) {
realRAF(function() {
func();
realRAF(doReftest);
});
});
// trigger reftest from canvas render too, for workers not doing GL
var realWOM = worker.onmessage;
worker.onmessage = function(event) {
realWOM(event);
if (event.data.target === 'canvas' && event.data.op === 'render') {
realRAF(doReftest);
}
};
}
} else {
// Manually trigger the reftest.
// The user will call it.
// Add an event loop iteration to ensure rendering, so users don't need to bother.
var realDoReftest = doReftest;
doReftest = function() {
setTimeout(realDoReftest, 1);
};
}
''' % (reporting.read(), basename, int(manually_trigger)))
def compile_btest(self, args, reporting=Reporting.FULL):
# Inject support code for reporting results. This adds an include a header so testcases can
# use REPORT_RESULT, and also adds a cpp file to be compiled alongside the testcase, which
# contains the implementation of REPORT_RESULT (we can't just include that implementation in
# the header as there may be multiple files being compiled here).
args += ['-s', 'IN_TEST_HARNESS']
if reporting != Reporting.NONE:
# For basic reporting we inject JS helper funtions to report result back to server.
args += ['-DEMTEST_PORT_NUMBER=%d' % self.port,
'--pre-js', test_file('browser_reporting.js')]
if reporting == Reporting.FULL:
# If C reporting (i.e. REPORT_RESULT macro) is required
# also compile in report_result.cpp and forice-include report_result.h
args += ['-I' + TEST_ROOT,
'-include', test_file('report_result.h'),
test_file('report_result.cpp')]
self.run_process([EMCC] + self.get_emcc_args() + args)
def btest_exit(self, filename, assert_returncode=0, *args, **kwargs):
"""Special case of btest that reports its result solely via exiting
with a give result code.
In this case we set EXIT_RUNTIME and we don't need to provide the
REPORT_RESULT macro to the C code.
"""
self.set_setting('EXIT_RUNTIME')
kwargs['reporting'] = Reporting.JS_ONLY
kwargs['expected'] = 'exit:%d' % assert_returncode
return self.btest(filename, *args, **kwargs)
def btest(self, filename, expected=None, reference=None,
reference_slack=0, manual_reference=False, post_build=None,
args=None, message='.', also_proxied=False,
url_suffix='', timeout=None, also_asmjs=False,
manually_trigger_reftest=False, extra_tries=1,
reporting=Reporting.FULL):
assert expected or reference, 'a btest must either expect an output, or have a reference image'
if args is None:
args = []
original_args = args.copy()
if not os.path.exists(filename):
filename = test_file(filename)
if reference:
self.reference = reference
expected = [str(i) for i in range(0, reference_slack + 1)]
self.reftest(test_file(reference), manually_trigger=manually_trigger_reftest)
if not manual_reference:
args += ['--pre-js', 'reftest.js', '-s', 'GL_TESTING']
outfile = 'test.html'
args += [filename, '-o', outfile]
# print('all args:', args)
try_delete(outfile)
self.compile_btest(args, reporting=reporting)
self.assertExists(outfile)
if post_build:
post_build()
if not isinstance(expected, list):
expected = [expected]
self.run_browser(outfile + url_suffix, message, ['/report_result?' + e for e in expected], timeout=timeout, extra_tries=extra_tries)
# Tests can opt into being run under asmjs as well
if 'WASM=0' not in original_args and (also_asmjs or self.also_asmjs):
print('WASM=0')
self.btest(filename, expected, reference, reference_slack, manual_reference, post_build,
original_args + ['-s', 'WASM=0'], message, also_proxied=False, timeout=timeout)
if also_proxied:
print('proxied...')
if reference:
assert not manual_reference
manual_reference = True
assert not post_build
post_build = self.post_manual_reftest
# run proxied
self.btest(filename, expected, reference, reference_slack, manual_reference, post_build,
original_args + ['--proxy-to-worker', '-s', 'GL_TESTING'], message, timeout=timeout)
###################################################################################################
def build_library(name,
build_dir,
output_dir,
generated_libs,
configure=['sh', './configure'],
make=['make'],
make_args=[],
cache=None,
cache_name=None,
env_init={},
native=False,
cflags=[]):
"""Build a library and cache the result. We build the library file
once and cache it for all our tests. (We cache in memory since the test
directory is destroyed and recreated for each test. Note that we cache
separately for different compilers). This cache is just during the test
runner. There is a different concept of caching as well, see |Cache|.
"""
if type(generated_libs) is not list:
generated_libs = [generated_libs]
source_dir = test_file(name.replace('_native', ''))
project_dir = os.path.join(build_dir, name)
if os.path.exists(project_dir):
shutil.rmtree(project_dir)
shutil.copytree(source_dir, project_dir) # Useful in debugging sometimes to comment this out, and two lines above
generated_libs = [os.path.join(project_dir, lib) for lib in generated_libs]
if native:
env = clang_native.get_clang_native_env()
else:
env = building.get_building_env(cflags=cflags)
for k, v in env_init.items():
env[k] = v
if configure:
if configure[0] == 'cmake':
configure = [EMCMAKE] + configure
else:
configure = [EMCONFIGURE] + configure
try:
with open(os.path.join(project_dir, 'configure_out'), 'w') as out:
with open(os.path.join(project_dir, 'configure_err'), 'w') as err:
stdout = out if EM_BUILD_VERBOSE < 2 else None
stderr = err if EM_BUILD_VERBOSE < 1 else None
shared.run_process(configure, env=env, stdout=stdout, stderr=stderr,
cwd=project_dir)
except subprocess.CalledProcessError:
with open(os.path.join(project_dir, 'configure_out')) as f:
print('-- configure stdout --')
print(f.read())
print('-- end configure stdout --')
with open(os.path.join(project_dir, 'configure_err')) as f:
print('-- configure stderr --')
print(f.read())
print('-- end configure stderr --')
raise
def open_make_out(mode='r'):
return open(os.path.join(project_dir, 'make.out'), mode)
def open_make_err(mode='r'):
return open(os.path.join(project_dir, 'make.err'), mode)
if EM_BUILD_VERBOSE >= 3:
make_args += ['VERBOSE=1']
try:
with open_make_out('w') as make_out:
with open_make_err('w') as make_err:
stdout = make_out if EM_BUILD_VERBOSE < 2 else None
stderr = make_err if EM_BUILD_VERBOSE < 1 else None
shared.run_process(make + make_args, stdout=stdout, stderr=stderr, env=env,
cwd=project_dir)
except subprocess.CalledProcessError:
with open_make_out() as f:
print('-- make stdout --')
print(f.read())
print('-- end make stdout --')
with open_make_err() as f:
print('-- make stderr --')
print(f.read())
print('-- end stderr --')
raise
if cache is not None:
cache[cache_name] = []
for f in generated_libs:
basename = os.path.basename(f)
cache[cache_name].append((basename, open(f, 'rb').read()))
return generated_libs
def check_js_engines():
working_engines = [e for e in config.JS_ENGINES if jsrun.check_engine(e)]
if len(working_engines) < len(config.JS_ENGINES):
print('Not all the JS engines in JS_ENGINES appears to work.')
exit(1)
if EMTEST_ALL_ENGINES:
print('(using ALL js engines)')
else:
logger.warning('use EMTEST_ALL_ENGINES=1 in the env to run against all JS '
'engines, which is slower but provides more coverage')
def get_and_import_modules():
modules = []
for filename in glob.glob(os.path.join(os.path.dirname(__file__), 'test*.py')):
module_dir, module_file = os.path.split(filename)
module_name, module_ext = os.path.splitext(module_file)
__import__(module_name)
modules.append(sys.modules[module_name])
return modules
def get_all_tests(modules):
# Create a list of all known tests so that we can choose from them based on a wildcard search
all_tests = []
suites = core_test_modes + non_core_test_modes
for m in modules:
for s in suites:
if hasattr(m, s):
tests = [t for t in dir(getattr(m, s)) if t.startswith('test_')]
all_tests += [s + '.' + t for t in tests]
return all_tests
def tests_with_expanded_wildcards(args, all_tests):
# Process wildcards, e.g. "browser.test_pthread_*" should expand to list all pthread tests
new_args = []
for i, arg in enumerate(args):
if '*' in arg:
if arg.startswith('skip:'):
arg = arg[5:]
matching_tests = fnmatch.filter(all_tests, arg)
new_args += ['skip:' + t for t in matching_tests]
else:
new_args += fnmatch.filter(all_tests, arg)
else:
new_args += [arg]
if not new_args and args:
print('No tests found to run in set: ' + str(args))
sys.exit(1)
return new_args
def skip_requested_tests(args, modules):
for i, arg in enumerate(args):
if arg.startswith('skip:'):
which = [arg.split('skip:')[1]]
print(','.join(which), file=sys.stderr)
skipped = False
for test in which:
print('will skip "%s"' % test, file=sys.stderr)
suite_name, test_name = test.split('.')
for m in modules:
suite = getattr(m, suite_name, None)
if suite:
setattr(suite, test_name, lambda s: s.skipTest("requested to be skipped"))
skipped = True
break
assert skipped, "Not able to skip test " + test
args[i] = None
return [a for a in args if a is not None]
def args_for_random_tests(args, modules):
if not args:
return args
first = args[0]
if first.startswith('random'):
random_arg = first[6:]
num_tests, base_module, relevant_modes = get_random_test_parameters(random_arg)
for m in modules:
if hasattr(m, base_module):
base = getattr(m, base_module)
new_args = choose_random_tests(base, num_tests, relevant_modes)
print_random_test_statistics(num_tests)
return new_args
return args
def get_random_test_parameters(arg):
num_tests = 1
base_module = default_core_test_mode
relevant_modes = core_test_modes
if len(arg):
num_str = arg
if arg.startswith('other'):
base_module = 'other'
relevant_modes = ['other']
num_str = arg.replace('other', '')
elif arg.startswith('browser'):
base_module = 'browser'
relevant_modes = ['browser']
num_str = arg.replace('browser', '')
num_tests = int(num_str)
return num_tests, base_module, relevant_modes
def choose_random_tests(base, num_tests, relevant_modes):
tests = [t for t in dir(base) if t.startswith('test_')]
print()
chosen = set()
while len(chosen) < num_tests:
test = random.choice(tests)
mode = random.choice(relevant_modes)
new_test = mode + '.' + test
before = len(chosen)
chosen.add(new_test)
if len(chosen) > before:
print('* ' + new_test)
else:
# we may have hit the limit
if len(chosen) == len(tests) * len(relevant_modes):
print('(all possible tests chosen! %d = %d*%d)' % (len(chosen), len(tests), len(relevant_modes)))
break
return list(chosen)
def print_random_test_statistics(num_tests):
std = 0.5 / math.sqrt(num_tests)
expected = 100.0 * (1.0 - std)
print()
print('running those %d randomly-selected tests. if they all pass, then there is a '
'greater than 95%% chance that at least %.2f%% of the test suite will pass'
% (num_tests, expected))
print()
def show():
print('if all tests passed then there is a greater than 95%% chance that at least '
'%.2f%% of the test suite will pass'
% (expected))
atexit.register(show)
def load_test_suites(args, modules):
loader = unittest.TestLoader()
unmatched_test_names = set(args)
suites = []
for m in modules:
names_in_module = []
for name in list(unmatched_test_names):
try:
operator.attrgetter(name)(m)
names_in_module.append(name)
unmatched_test_names.remove(name)
except AttributeError:
pass
if len(names_in_module):
loaded_tests = loader.loadTestsFromNames(sorted(names_in_module), m)
tests = flattened_tests(loaded_tests)
suite = suite_for_module(m, tests)
for test in tests:
suite.addTest(test)
suites.append((m.__name__, suite))
return suites, unmatched_test_names
def flattened_tests(loaded_tests):
tests = []
for subsuite in loaded_tests:
for test in subsuite:
tests.append(test)
return tests
def suite_for_module(module, tests):
suite_supported = module.__name__ in ('test_core', 'test_other', 'test_posixtest')
if not EMTEST_SAVE_DIR and not DEBUG:
has_multiple_tests = len(tests) > 1
has_multiple_cores = parallel_testsuite.num_cores() > 1
if suite_supported and has_multiple_tests and has_multiple_cores:
return parallel_testsuite.ParallelTestSuite(len(tests))
return unittest.TestSuite()
def run_tests(options, suites):
resultMessages = []
num_failures = 0
print('Test suites:')
print([s[0] for s in suites])
# Run the discovered tests
testRunner = unittest.TextTestRunner(verbosity=2)
for mod_name, suite in suites:
print('Running %s: (%s tests)' % (mod_name, suite.countTestCases()))
res = testRunner.run(suite)
msg = ('%s: %s run, %s errors, %s failures, %s skipped' %
(mod_name, res.testsRun, len(res.errors), len(res.failures), len(res.skipped)))
num_failures += len(res.errors) + len(res.failures)
resultMessages.append(msg)
if len(resultMessages) > 1:
print('====================')
print()
print('TEST SUMMARY')
for msg in resultMessages:
print(' ' + msg)
# Return the number of failures as the process exit code for automating success/failure reporting.
return min(num_failures, 255)
def parse_args(args):
parser = argparse.ArgumentParser(prog='runner.py', description=__doc__)
parser.add_argument('tests', nargs='*')
return parser.parse_args()
def main(args):
options = parse_args(args)
check_js_engines()
def prepend_default(arg):
if arg.startswith('test_'):
return default_core_test_mode + '.' + arg
return arg
tests = [prepend_default(t) for t in options.tests]
modules = get_and_import_modules()
all_tests = get_all_tests(modules)
tests = tests_with_expanded_wildcards(tests, all_tests)
tests = skip_requested_tests(tests, modules)
tests = args_for_random_tests(tests, modules)
suites, unmatched_tests = load_test_suites(tests, modules)
if unmatched_tests:
print('ERROR: could not find the following tests: ' + ' '.join(unmatched_tests))
return 1
return run_tests(options, suites)
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except KeyboardInterrupt:
logger.warning('KeyboardInterrupt')
sys.exit(1)
|
test_qgsnetworkcontentfetcherregistry.py | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsNetworkContentFetcherRegistry
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
from builtins import chr
from builtins import str
__author__ = 'Denis Rouzaud'
__date__ = '27/04/2018'
__copyright__ = 'Copyright 2018, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '176c06ceefb5f555205e72b20c962740cc0ec183'
import qgis # NOQA
import os
from qgis.testing import unittest, start_app
from qgis.core import QgsNetworkContentFetcherRegistry, QgsFetchedContent, QgsApplication
from utilities import unitTestDataPath
from qgis.PyQt.QtNetwork import QNetworkReply, QNetworkRequest
import socketserver
import threading
import http.server
app = start_app()
class TestQgsNetworkContentFetcherTask(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Bring up a simple HTTP server
os.chdir(unitTestDataPath() + '')
handler = http.server.SimpleHTTPRequestHandler
cls.httpd = socketserver.TCPServer(('localhost', 0), handler)
cls.port = cls.httpd.server_address[1]
cls.httpd_thread = threading.Thread(target=cls.httpd.serve_forever)
cls.httpd_thread.setDaemon(True)
cls.httpd_thread.start()
def __init__(self, methodName):
"""Run once on class initialization."""
unittest.TestCase.__init__(self, methodName)
self.loaded = False
self.file_content = ''
def testFetchBadUrl(self):
registry = QgsApplication.networkContentFetcherRegistry()
content = registry.fetch('http://x')
self.loaded = False
def check_reply():
self.assertEqual(content.status(), QgsFetchedContent.Failed)
self.assertNotEqual(content.error(), QNetworkReply.NoError)
self.assertEqual(content.filePath(), '')
self.loaded = True
content.fetched.connect(check_reply)
content.download()
while not self.loaded:
app.processEvents()
def testFetchGoodUrl(self):
url = 'http://localhost:' + str(self.port) + '/qgis_local_server/index.html'
registry = QgsApplication.networkContentFetcherRegistry()
content = registry.fetch(url)
self.loaded = False
def check_reply():
self.loaded = True
self.assertEqual(content.status(), QgsFetchedContent.Finished)
self.assertEqual(content.error(), QNetworkReply.NoError)
self.assertNotEqual(content.filePath(), '')
content.fetched.connect(check_reply)
content.download()
while not self.loaded:
app.processEvents()
self.assertEqual(registry.localPath(url), content.filePath())
# create new content with same URL
contentV2 = registry.fetch(url)
self.assertEqual(contentV2.status(), QgsFetchedContent.Finished)
def testFetchReloadUrl(self):
def writeSimpleFile(content):
with open('qgis_local_server/simple_content.txt', 'w') as f:
f.write(content)
self.file_content = content
registry = QgsApplication.networkContentFetcherRegistry()
content = registry.fetch('http://localhost:' + str(self.port) + '/qgis_local_server/simple_content.txt')
self.loaded = False
writeSimpleFile('my initial content')
def check_reply():
self.loaded = True
self.assertEqual(content.status(), QgsFetchedContent.Finished)
self.assertEqual(content.error(), QNetworkReply.NoError)
self.assertNotEqual(content.filePath(), '')
with open(content.filePath(), encoding="utf-8") as file:
self.assertEqual(file.readline().rstrip(), self.file_content)
content.fetched.connect(check_reply)
content.download()
while not self.loaded:
app.processEvents()
writeSimpleFile('my second content')
content.download()
with open(content.filePath(), encoding="utf-8") as file:
self.assertNotEqual(file.readline().rstrip(), self.file_content)
content.download(True)
while not self.loaded:
app.processEvents()
os.remove('qgis_local_server/simple_content.txt')
def testLocalPath(self):
registry = QgsApplication.networkContentFetcherRegistry()
filePath = 'qgis_local_server/index.html'
self.assertEqual(registry.localPath(filePath), filePath)
# a non existent download shall return untouched the path
self.assertEqual(registry.localPath('xxxx'), 'xxxx')
# an existent but unfinished download should return an empty path
content = registry.fetch('xxxx')
self.assertEqual(registry.localPath('xxxx'), '')
if __name__ == "__main__":
unittest.main()
|
tube.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
import logging
import re
import six
import string
import subprocess
import sys
import threading
import time
from six.moves import range
from pwnlib import atexit
from pwnlib import term
from pwnlib.context import context
from pwnlib.log import Logger
from pwnlib.timeout import Timeout
from pwnlib.tubes.buffer import Buffer
from pwnlib.util import fiddling
from pwnlib.util import misc
from pwnlib.util import packing
class tube(Timeout, Logger):
"""
Container of all the tube functions common to sockets, TTYs and SSH connetions.
"""
default = Timeout.default
forever = Timeout.forever
#: Delimiter to use for :meth:`sendline`, :meth:`recvline`,
#: and related functions.
newline = b'\n'
def __init__(self, timeout = default, level = None, *a, **kw):
super(tube, self).__init__(timeout)
Logger.__init__(self, None)
if level is not None:
self.setLevel(level)
self.buffer = Buffer(*a, **kw)
atexit.register(self.close)
# Functions based on functions from subclasses
def recv(self, numb = None, timeout = default):
r"""recv(numb = 4096, timeout = default) -> bytes
Receives up to `numb` bytes of data from the tube, and returns
as soon as any quantity of data is available.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Raises:
exceptions.EOFError: The connection is closed
Returns:
A bytes object containing bytes received from the socket,
or ``''`` if a timeout occurred while waiting.
Examples:
>>> t = tube()
>>> # Fake a data source
>>> t.recv_raw = lambda n: b'Hello, world'
>>> t.recv() == b'Hello, world'
True
>>> t.unrecv(b'Woohoo')
>>> t.recv() == b'Woohoo'
True
>>> with context.local(log_level='debug'):
... _ = t.recv() # doctest: +ELLIPSIS
[...] Received 0xc bytes:
b'Hello, world'
"""
numb = self.buffer.get_fill_size(numb)
return self._recv(numb, timeout) or b''
def unrecv(self, data):
"""unrecv(data)
Puts the specified data back at the beginning of the receive
buffer.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: b'hello'
>>> t.recv()
b'hello'
>>> t.recv()
b'hello'
>>> t.unrecv(b'world')
>>> t.recv()
b'world'
>>> t.recv()
b'hello'
"""
data = context._encode(data)
self.buffer.unget(data)
def _fillbuffer(self, timeout = default):
"""_fillbuffer(timeout = default)
Fills the internal buffer from the pipe, by calling
:meth:`recv_raw` exactly once.
Returns:
The bytes of data received, or ``''`` if no data was received.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda *a: b'abc'
>>> len(t.buffer)
0
>>> t._fillbuffer()
b'abc'
>>> len(t.buffer)
3
"""
data = b''
with self.local(timeout):
data = self.recv_raw(self.buffer.get_fill_size())
if data and self.isEnabledFor(logging.DEBUG):
self.debug('Received %#x bytes:' % len(data))
if len(set(data)) == 1 and len(data) > 1:
self.indented('%r * %#x' % (data[0], len(data)), level = logging.DEBUG)
elif all(c in string.printable.encode() for c in data):
for line in data.splitlines(True):
self.indented(repr(line), level = logging.DEBUG)
else:
self.indented(fiddling.hexdump(data), level = logging.DEBUG)
if data:
self.buffer.add(data)
return data
def _recv(self, numb = None, timeout = default):
"""_recv(numb = 4096, timeout = default) -> str
Receives one chunk of from the internal buffer or from the OS if the
buffer is empty.
"""
numb = self.buffer.get_fill_size(numb)
# No buffered data, could not put anything in the buffer
# before timeout.
if not self.buffer and not self._fillbuffer(timeout):
return b''
return self.buffer.get(numb)
def recvpred(self, pred, timeout = default):
"""recvpred(pred, timeout = default) -> bytes
Receives one byte at a time from the tube, until ``pred(all_bytes)``
evaluates to True.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Arguments:
pred(callable): Function to call, with the currently-accumulated data.
timeout(int): Timeout for the operation
Raises:
exceptions.EOFError: The connection is closed
Returns:
A bytes object containing bytes received from the socket,
or ``''`` if a timeout occurred while waiting.
"""
data = b''
with self.countdown(timeout):
while not pred(data):
try:
res = self.recv(1)
except Exception:
self.unrecv(data)
return b''
if res:
data += res
else:
self.unrecv(data)
return b''
return data
def recvn(self, numb, timeout = default):
"""recvn(numb, timeout = default) -> str
Receives exactly `n` bytes.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Raises:
exceptions.EOFError: The connection closed before the request could be satisfied
Returns:
A string containing bytes received from the socket,
or ``''`` if a timeout occurred while waiting.
Examples:
>>> t = tube()
>>> data = b'hello world'
>>> t.recv_raw = lambda *a: data
>>> t.recvn(len(data)) == data
True
>>> t.recvn(len(data)+1) == data + data[:1]
True
>>> t.recv_raw = lambda *a: None
>>> # The remaining data is buffered
>>> t.recv() == data[1:]
True
>>> t.recv_raw = lambda *a: time.sleep(0.01) or b'a'
>>> t.recvn(10, timeout=0.05)
b''
>>> t.recvn(10, timeout=0.06) # doctest: +ELLIPSIS
b'aaaaaa...'
"""
# Keep track of how much data has been received
# It will be pasted together at the end if a
# timeout does not occur, or put into the tube buffer.
with self.countdown(timeout):
while self.countdown_active() and len(self.buffer) < numb and self._fillbuffer(self.timeout):
pass
if len(self.buffer) < numb:
return b''
return self.buffer.get(numb)
def recvuntil(self, delims, drop=False, timeout=default):
"""recvuntil(delims, drop=False, timeout=default) -> bytes
Receive data until one of `delims` is encountered.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
arguments:
delims(bytes,tuple): Byte-string of delimiters characters, or list of delimiter byte-strings.
drop(bool): Drop the ending. If :const:`True` it is removed from the end of the return value.
Raises:
exceptions.EOFError: The connection closed before the request could be satisfied
Returns:
A string containing bytes received from the socket,
or ``''`` if a timeout occurred while waiting.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: b"Hello World!"
>>> t.recvuntil(b' ')
b'Hello '
>>> _=t.clean(0)
>>> # Matches on 'o' in 'Hello'
>>> t.recvuntil((b' ',b'W',b'o',b'r'))
b'Hello'
>>> _=t.clean(0)
>>> # Matches expressly full string
>>> t.recvuntil(b' Wor')
b'Hello Wor'
>>> _=t.clean(0)
>>> # Matches on full string, drops match
>>> t.recvuntil(b' Wor', drop=True)
b'Hello'
>>> # Try with regex special characters
>>> t = tube()
>>> t.recv_raw = lambda n: b"Hello|World"
>>> t.recvuntil(b'|', drop=True)
b'Hello'
"""
# Convert string into singleton tupple
if isinstance(delims, (bytes, six.text_type)):
delims = (delims,)
delims = tuple(map(context._encode, delims))
# Longest delimiter for tracking purposes
longest = max(map(len, delims))
# Cumulative data to search
data = []
top = b''
with self.countdown(timeout):
while self.countdown_active():
try:
res = self.recv(timeout=self.timeout)
except Exception:
self.unrecv(b''.join(data) + top)
raise
if not res:
self.unrecv(b''.join(data) + top)
return b''
top += res
start = len(top)
for d in delims:
j = top.find(d)
if start > j > -1:
start = j
end = j + len(d)
if start < len(top):
self.unrecv(top[end:])
if drop:
top = top[:start]
else:
top = top[:end]
return b''.join(data) + top
if len(top) > longest:
i = -longest - 1
data.append(top[:i])
top = top[i:]
return b''
def recvlines(self, numlines=2**20, keepends=False, timeout=default):
r"""recvlines(numlines, keepends=False, timeout=default) -> list of bytes objects
Receive up to ``numlines`` lines.
A "line" is any sequence of bytes terminated by the byte sequence
set by :attr:`newline`, which defaults to ``'\n'``.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Arguments:
numlines(int): Maximum number of lines to receive
keepends(bool): Keep newlines at the end of each line (:const:`False`).
timeout(int): Maximum timeout
Raises:
exceptions.EOFError: The connection closed before the request could be satisfied
Returns:
A string containing bytes received from the socket,
or ``''`` if a timeout occurred while waiting.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: b'\n'
>>> t.recvlines(3)
[b'', b'', b'']
>>> t.recv_raw = lambda n: b'Foo\nBar\nBaz\n'
>>> t.recvlines(3)
[b'Foo', b'Bar', b'Baz']
>>> t.recvlines(3, True)
[b'Foo\n', b'Bar\n', b'Baz\n']
"""
lines = []
with self.countdown(timeout):
for _ in range(numlines):
try:
# We must set 'keepends' to True here so that we can
# restore the original, unmodified data to the buffer
# in the event of a timeout.
res = self.recvline(keepends=True, timeout=timeout)
except Exception:
self.unrecv(b''.join(lines))
raise
if res:
lines.append(res)
else:
break
if not keepends:
lines = [line.rstrip(self.newline) for line in lines]
return lines
def recvlinesS(self, numlines=2**20, keepends=False, timeout=default):
r"""recvlinesS(numlines, keepends=False, timeout=default) -> str list
This function is identical to :meth:`recvlines`, but decodes
the received bytes into string using :func:`context.encoding`.
You should use :meth:`recvlines` whenever possible for better performance.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: b'\n'
>>> t.recvlinesS(3)
['', '', '']
>>> t.recv_raw = lambda n: b'Foo\nBar\nBaz\n'
>>> t.recvlinesS(3)
['Foo', 'Bar', 'Baz']
"""
return [context._decode(x) for x in self.recvlines(numlines, keepends, timeout)]
def recvlinesb(self, numlines=2**20, keepends=False, timeout=default):
r"""recvlinesb(numlines, keepends=False, timeout=default) -> bytearray list
This function is identical to :meth:`recvlines`, but returns a bytearray.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: b'\n'
>>> t.recvlinesb(3)
[bytearray(b''), bytearray(b''), bytearray(b'')]
>>> t.recv_raw = lambda n: b'Foo\nBar\nBaz\n'
>>> t.recvlinesb(3)
[bytearray(b'Foo'), bytearray(b'Bar'), bytearray(b'Baz')]
"""
return [bytearray(x) for x in self.recvlines(numlines, keepends, timeout)]
def recvline(self, keepends=True, timeout=default):
r"""recvline(keepends=True, timeout=default) -> bytes
Receive a single line from the tube.
A "line" is any sequence of bytes terminated by the byte sequence
set in :attr:`newline`, which defaults to ``'\n'``.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Arguments:
keepends(bool): Keep the line ending (:const:`True`).
timeout(int): Timeout
Return:
All bytes received over the tube until the first
newline ``'\n'`` is received. Optionally retains
the ending.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: b'Foo\nBar\r\nBaz\n'
>>> t.recvline()
b'Foo\n'
>>> t.recvline()
b'Bar\r\n'
>>> t.recvline(keepends = False)
b'Baz'
>>> t.newline = b'\r\n'
>>> t.recvline(keepends = False)
b'Foo\nBar'
"""
return self.recvuntil(self.newline, drop = not keepends, timeout = timeout)
def recvline_pred(self, pred, keepends=False, timeout=default):
r"""recvline_pred(pred, keepends=False) -> bytes
Receive data until ``pred(line)`` returns a truthy value.
Drop all other data.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Arguments:
pred(callable): Function to call. Returns the line for which
this function returns :const:`True`.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: b"Foo\nBar\nBaz\n"
>>> t.recvline_pred(lambda line: line == b"Bar\n")
b'Bar'
>>> t.recvline_pred(lambda line: line == b"Bar\n", keepends=True)
b'Bar\n'
>>> t.recvline_pred(lambda line: line == b'Nope!', timeout=0.1)
b''
"""
tmpbuf = Buffer()
line = b''
with self.countdown(timeout):
while self.countdown_active():
try:
line = self.recvline(keepends=True)
except Exception:
self.buffer.unget(tmpbuf)
raise
if not line:
self.buffer.unget(tmpbuf)
return b''
if pred(line):
if not keepends:
line = line[:-len(self.newline)]
return line
else:
tmpbuf.add(line)
return b''
def recvline_contains(self, items, keepends = False, timeout = default):
r"""
Receive lines until one line is found which contains at least
one of `items`.
Arguments:
items(str,tuple): List of strings to search for, or a single string.
keepends(bool): Return lines with newlines if :const:`True`
timeout(int): Timeout, in seconds
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: b"Hello\nWorld\nXylophone\n"
>>> t.recvline_contains(b'r')
b'World'
>>> f = lambda n: b"cat dog bird\napple pear orange\nbicycle car train\n"
>>> t = tube()
>>> t.recv_raw = f
>>> t.recvline_contains(b'pear')
b'apple pear orange'
>>> t = tube()
>>> t.recv_raw = f
>>> t.recvline_contains((b'car', b'train'))
b'bicycle car train'
"""
if isinstance(items, (bytes, six.text_type)):
items = (items,)
items = tuple(map(context._encode, items))
def pred(line):
return any(d in line for d in items)
return self.recvline_pred(pred, keepends, timeout)
def recvline_startswith(self, delims, keepends=False, timeout=default):
r"""recvline_startswith(delims, keepends=False, timeout=default) -> bytes
Keep receiving lines until one is found that starts with one of
`delims`. Returns the last line received.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Arguments:
delims(str,tuple): List of strings to search for, or string of single characters
keepends(bool): Return lines with newlines if :const:`True`
timeout(int): Timeout, in seconds
Returns:
The first line received which starts with a delimiter in ``delims``.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: b"Hello\nWorld\nXylophone\n"
>>> t.recvline_startswith((b'W',b'X',b'Y',b'Z'))
b'World'
>>> t.recvline_startswith((b'W',b'X',b'Y',b'Z'), True)
b'Xylophone\n'
>>> t.recvline_startswith(b'Wo')
b'World'
"""
# Convert string into singleton tupple
if isinstance(delims, (bytes, six.text_type)):
delims = (delims,)
delims = tuple(map(context._encode, delims))
return self.recvline_pred(lambda line: any(map(line.startswith, delims)),
keepends=keepends,
timeout=timeout)
def recvline_endswith(self, delims, keepends=False, timeout=default):
r"""recvline_endswith(delims, keepends=False, timeout=default) -> bytes
Keep receiving lines until one is found that starts with one of
`delims`. Returns the last line received.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
See :meth:`recvline_startswith` for more details.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: b'Foo\nBar\nBaz\nKaboodle\n'
>>> t.recvline_endswith(b'r')
b'Bar'
>>> t.recvline_endswith((b'a',b'b',b'c',b'd',b'e'), True)
b'Kaboodle\n'
>>> t.recvline_endswith(b'oodle')
b'Kaboodle'
"""
# Convert string into singleton tupple
if isinstance(delims, (bytes, six.text_type)):
delims = (delims,)
delims = tuple(context._encode(delim) + self.newline for delim in delims)
return self.recvline_pred(lambda line: any(map(line.endswith, delims)),
keepends=keepends,
timeout=timeout)
def recvregex(self, regex, exact=False, timeout=default):
"""recvregex(regex, exact=False, timeout=default) -> bytes
Wrapper around :func:`recvpred`, which will return when a regex
matches the string in the buffer.
By default :func:`re.RegexObject.search` is used, but if `exact` is
set to True, then :func:`re.RegexObject.match` will be used instead.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
"""
if isinstance(regex, (bytes, six.text_type)):
regex = context._encode(regex)
regex = re.compile(regex)
if exact:
pred = regex.match
else:
pred = regex.search
return self.recvpred(pred, timeout = timeout)
def recvline_regex(self, regex, exact=False, keepends=False, timeout=default):
"""recvline_regex(regex, exact=False, keepends=False, timeout=default) -> bytes
Wrapper around :func:`recvline_pred`, which will return when a regex
matches a line.
By default :func:`re.RegexObject.search` is used, but if `exact` is
set to True, then :func:`re.RegexObject.match` will be used instead.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
"""
if isinstance(regex, (bytes, six.text_type)):
regex = context._encode(regex)
regex = re.compile(regex)
if exact:
pred = regex.match
else:
pred = regex.search
return self.recvline_pred(pred, keepends = keepends, timeout = timeout)
def recvrepeat(self, timeout=default):
"""recvrepeat(timeout=default) -> bytes
Receives data until a timeout or EOF is reached.
Examples:
>>> data = [
... b'd',
... b'', # simulate timeout
... b'c',
... b'b',
... b'a',
... ]
>>> def delayrecv(n, data=data):
... return data.pop()
>>> t = tube()
>>> t.recv_raw = delayrecv
>>> t.recvrepeat(0.2)
b'abc'
>>> t.recv()
b'd'
"""
try:
while self._fillbuffer(timeout=timeout):
pass
except EOFError:
pass
return self.buffer.get()
def recvall(self, timeout=Timeout.forever):
"""recvall() -> bytes
Receives data until EOF is reached.
"""
with self.waitfor('Receiving all data') as h:
l = len(self.buffer)
with self.local(timeout):
try:
while True:
l = misc.size(len(self.buffer))
h.status(l)
if not self._fillbuffer():
break
except EOFError:
pass
h.success("Done (%s)" % l)
self.close()
return self.buffer.get()
def send(self, data):
"""send(data)
Sends data.
If log level ``DEBUG`` is enabled, also prints out the data
received.
If it is not possible to send anymore because of a closed
connection, it raises ``exceptions.EOFError``
Examples:
>>> def p(x): print(repr(x))
>>> t = tube()
>>> t.send_raw = p
>>> t.send(b'hello')
b'hello'
"""
data = context._encode(data)
if self.isEnabledFor(logging.DEBUG):
self.debug('Sent %#x bytes:' % len(data))
if len(set(data)) == 1:
self.indented('%r * %#x' % (data[0], len(data)))
elif all(c in string.printable.encode() for c in data):
for line in data.splitlines(True):
self.indented(repr(line), level = logging.DEBUG)
else:
self.indented(fiddling.hexdump(data), level = logging.DEBUG)
self.send_raw(data)
def sendline(self, line=b''):
r"""sendline(data)
Shorthand for ``t.send(data + t.newline)``.
Examples:
>>> def p(x): print(repr(x))
>>> t = tube()
>>> t.send_raw = p
>>> t.sendline(b'hello')
b'hello\n'
>>> t.newline = b'\r\n'
>>> t.sendline(b'hello')
b'hello\r\n'
"""
line = context._encode(line)
self.send(line + self.newline)
def sendlines(self, lines=[]):
for line in lines:
self.sendline(line)
def sendafter(self, delim, data, timeout = default):
"""sendafter(delim, data, timeout = default) -> str
A combination of ``recvuntil(delim, timeout=timeout)`` and ``send(data)``.
"""
res = self.recvuntil(delim, timeout=timeout)
self.send(data)
return res
def sendlineafter(self, delim, data, timeout = default):
"""sendlineafter(delim, data, timeout = default) -> str
A combination of ``recvuntil(delim, timeout=timeout)`` and ``sendline(data)``."""
res = self.recvuntil(delim, timeout=timeout)
self.sendline(data)
return res
def sendthen(self, delim, data, timeout = default):
"""sendthen(delim, data, timeout = default) -> str
A combination of ``send(data)`` and ``recvuntil(delim, timeout=timeout)``."""
self.send(data)
return self.recvuntil(delim, timeout=timeout)
def sendlinethen(self, delim, data, timeout = default):
"""sendlinethen(delim, data, timeout = default) -> str
A combination of ``sendline(data)`` and ``recvuntil(delim, timeout=timeout)``."""
self.sendline(data)
return self.recvuntil(delim, timeout=timeout)
def interactive(self, prompt = term.text.bold_red('$') + ' '):
"""interactive(prompt = pwnlib.term.text.bold_red('$') + ' ')
Does simultaneous reading and writing to the tube. In principle this just
connects the tube to standard in and standard out, but in practice this
is much more usable, since we are using :mod:`pwnlib.term` to print a
floating prompt.
Thus it only works in while in :data:`pwnlib.term.term_mode`.
"""
self.info('Switching to interactive mode')
go = threading.Event()
def recv_thread():
while not go.isSet():
try:
cur = self.recv(timeout = 0.05)
cur = cur.replace(self.newline, b'\n')
if cur:
stdout = sys.stdout
if not term.term_mode:
stdout = getattr(stdout, 'buffer', stdout)
stdout.write(cur)
stdout.flush()
except EOFError:
self.info('Got EOF while reading in interactive')
break
t = context.Thread(target = recv_thread)
t.daemon = True
t.start()
try:
while not go.isSet():
if term.term_mode:
data = term.readline.readline(prompt = prompt, float = True)
else:
stdin = getattr(sys.stdin, 'buffer', sys.stdin)
data = stdin.read(1)
if data:
try:
self.send(data)
except EOFError:
go.set()
self.info('Got EOF while sending in interactive')
else:
go.set()
except KeyboardInterrupt:
self.info('Interrupted')
go.set()
while t.is_alive():
t.join(timeout = 0.1)
def stream(self, line_mode=True):
"""stream()
Receive data until the tube exits, and print it to stdout.
Similar to :func:`interactive`, except that no input is sent.
Similar to ``print(tube.recvall())`` except that data is printed
as it is received, rather than after all data is received.
Arguments:
line_mode(bool): Whether to receive line-by-line or raw data.
Returns:
All data printed.
"""
buf = Buffer()
function = self.recvline if line_mode else self.recv
try:
while True:
buf.add(function())
stdout = sys.stdout
if not term.term_mode:
stdout = getattr(stdout, 'buffer', stdout)
stdout.write(buf.data[-1])
except KeyboardInterrupt:
pass
except EOFError:
pass
return buf.get()
def clean(self, timeout = 0.05):
"""clean(timeout = 0.05)
Removes all the buffered data from a tube by calling
:meth:`pwnlib.tubes.tube.tube.recv` with a low timeout until it fails.
If ``timeout`` is zero, only cached data will be cleared.
Note: If timeout is set to zero, the underlying network is
not actually polled; only the internal buffer is cleared.
Returns:
All data received
Examples:
>>> t = tube()
>>> t.unrecv(b'clean me up')
>>> t.clean(0)
b'clean me up'
>>> len(t.buffer)
0
"""
if timeout == 0:
return self.buffer.get()
return self.recvrepeat(timeout)
def clean_and_log(self, timeout = 0.05):
r"""clean_and_log(timeout = 0.05)
Works exactly as :meth:`pwnlib.tubes.tube.tube.clean`, but logs received
data with :meth:`pwnlib.self.info`.
Returns:
All data received
Examples:
>>> def recv(n, data=[b'', b'hooray_data']):
... while data: return data.pop()
>>> t = tube()
>>> t.recv_raw = recv
>>> t.connected_raw = lambda d: True
>>> t.fileno = lambda: 1234
>>> with context.local(log_level='info'):
... data = t.clean_and_log() #doctest: +ELLIPSIS
[DEBUG] Received 0xb bytes:
b'hooray_data'
>>> data
b'hooray_data'
>>> context.clear()
"""
with context.local(log_level='debug'):
return self.clean(timeout)
def connect_input(self, other):
"""connect_input(other)
Connects the input of this tube to the output of another tube object.
Examples:
>>> def p(x): print(x.decode())
>>> def recvone(n, data=[b'data']):
... while data: return data.pop()
... raise EOFError
>>> a = tube()
>>> b = tube()
>>> a.recv_raw = recvone
>>> b.send_raw = p
>>> a.connected_raw = lambda d: True
>>> b.connected_raw = lambda d: True
>>> a.shutdown = lambda d: True
>>> b.shutdown = lambda d: True
>>> import time
>>> _=(b.connect_input(a), time.sleep(0.1))
data
"""
def pump():
import sys as _sys
while self.countdown_active():
if not (self.connected('send') and other.connected('recv')):
break
try:
data = other.recv(timeout = 0.05)
except EOFError:
break
if not _sys:
return
if not data:
continue
try:
self.send(data)
except EOFError:
break
if not _sys:
return
self.shutdown('send')
other.shutdown('recv')
t = context.Thread(target = pump)
t.daemon = True
t.start()
def connect_output(self, other):
"""connect_output(other)
Connects the output of this tube to the input of another tube object.
Examples:
>>> def p(x): print(repr(x))
>>> def recvone(n, data=[b'data']):
... while data: return data.pop()
... raise EOFError
>>> a = tube()
>>> b = tube()
>>> a.recv_raw = recvone
>>> b.send_raw = p
>>> a.connected_raw = lambda d: True
>>> b.connected_raw = lambda d: True
>>> a.shutdown = lambda d: True
>>> b.shutdown = lambda d: True
>>> _=(a.connect_output(b), time.sleep(0.1))
b'data'
"""
other.connect_input(self)
def connect_both(self, other):
"""connect_both(other)
Connects the both ends of this tube object with another tube object."""
self.connect_input(other)
self.connect_output(other)
def spawn_process(self, *args, **kwargs):
"""Spawns a new process having this tube as stdin, stdout and stderr.
Takes the same arguments as :class:`subprocess.Popen`."""
return subprocess.Popen(
*args,
stdin = self.fileno(),
stdout = self.fileno(),
stderr = self.fileno(),
**kwargs
)
def __lshift__(self, other):
"""
Shorthand for connecting multiple tubes.
See :meth:`connect_input` for more information.
Examples:
The following are equivalent ::
tube_a >> tube.b
tube_a.connect_input(tube_b)
This is useful when chaining multiple tubes ::
tube_a >> tube_b >> tube_a
tube_a.connect_input(tube_b)
tube_b.connect_input(tube_a)
"""
self.connect_input(other)
return other
def __rshift__(self, other):
"""
Inverse of the ``<<`` operator. See :meth:`__lshift__`.
See :meth:`connect_input` for more information.
"""
self.connect_output(other)
return other
def __ne__(self, other):
"""
Shorthand for connecting tubes to eachother.
The following are equivalent ::
a >> b >> a
a <> b
See :meth:`connect_input` for more information.
"""
self << other << self
def wait_for_close(self):
"""Waits until the tube is closed."""
while self.connected():
time.sleep(0.05)
wait = wait_for_close
def can_recv(self, timeout = 0):
"""can_recv(timeout = 0) -> bool
Returns True, if there is data available within `timeout` seconds.
Examples:
>>> import time
>>> t = tube()
>>> t.can_recv_raw = lambda *a: False
>>> t.can_recv()
False
>>> _=t.unrecv(b'data')
>>> t.can_recv()
True
>>> _=t.recv()
>>> t.can_recv()
False
"""
return bool(self.buffer or self.can_recv_raw(timeout))
def settimeout(self, timeout):
"""settimeout(timeout)
Set the timeout for receiving operations. If the string "default"
is given, then :data:`context.timeout` will be used. If None is given,
then there will be no timeout.
Examples:
>>> t = tube()
>>> t.settimeout_raw = lambda t: None
>>> t.settimeout(3)
>>> t.timeout == 3
True
"""
self.timeout = timeout
shutdown_directions = {
'in': 'recv',
'read': 'recv',
'recv': 'recv',
'out': 'send',
'write': 'send',
'send': 'send',
}
connected_directions = shutdown_directions.copy()
connected_directions['any'] = 'any'
def shutdown(self, direction = "send"):
"""shutdown(direction = "send")
Closes the tube for futher reading or writing depending on `direction`.
Arguments:
direction(str): Which direction to close; "in", "read" or "recv"
closes the tube in the ingoing direction, "out", "write" or "send"
closes it in the outgoing direction.
Returns:
:const:`None`
Examples:
>>> def p(x): print(x)
>>> t = tube()
>>> t.shutdown_raw = p
>>> _=list(map(t.shutdown, ('in', 'read', 'recv', 'out', 'write', 'send')))
recv
recv
recv
send
send
send
>>> t.shutdown('bad_value') #doctest: +ELLIPSIS
Traceback (most recent call last):
...
KeyError: "direction must be in ['in', 'out', 'read', 'recv', 'send', 'write']"
"""
try:
direction = self.shutdown_directions[direction]
except KeyError:
raise KeyError('direction must be in %r' % sorted(self.shutdown_directions))
else:
self.shutdown_raw(self.shutdown_directions[direction])
def connected(self, direction = 'any'):
"""connected(direction = 'any') -> bool
Returns True if the tube is connected in the specified direction.
Arguments:
direction(str): Can be the string 'any', 'in', 'read', 'recv',
'out', 'write', 'send'.
Doctest:
>>> def p(x): print(x)
>>> t = tube()
>>> t.connected_raw = p
>>> _=list(map(t.connected, ('any', 'in', 'read', 'recv', 'out', 'write', 'send')))
any
recv
recv
recv
send
send
send
>>> t.connected('bad_value') #doctest: +ELLIPSIS
Traceback (most recent call last):
...
KeyError: "direction must be in ['any', 'in', 'out', 'read', 'recv', 'send', 'write']"
"""
try:
direction = self.connected_directions[direction]
except KeyError:
raise KeyError('direction must be in %r' % sorted(self.connected_directions))
else:
return self.connected_raw(direction)
def __enter__(self):
"""Permit use of 'with' to control scoping and closing sessions.
Examples:
>>> t = tube()
>>> def p(x): print(x)
>>> t.close = lambda: p("Closed!")
>>> with t: pass
Closed!
"""
return self
def __exit__(self, type, value, traceback):
"""Handles closing for 'with' statement
See :meth:`__enter__`
"""
self.close()
# The minimal interface to be implemented by a child
def recv_raw(self, numb):
"""recv_raw(numb) -> str
Should not be called directly. Receives data without using the buffer
on the object.
Unless there is a timeout or closed connection, this should always
return data. In case of a timeout, it should return None, in case
of a closed connection it should raise an ``exceptions.EOFError``.
"""
raise EOFError('Not implemented')
def send_raw(self, data):
"""send_raw(data)
Should not be called directly. Sends data to the tube.
Should return ``exceptions.EOFError``, if it is unable to send any
more, because of a close tube.
"""
raise EOFError('Not implemented')
def settimeout_raw(self, timeout):
"""settimeout_raw(timeout)
Should not be called directly. Sets the timeout for
the tube.
"""
raise NotImplementedError()
def timeout_change(self):
"""
Informs the raw layer of the tube that the timeout has changed.
Should not be called directly.
Inherited from :class:`Timeout`.
"""
try:
self.settimeout_raw(self.timeout)
except NotImplementedError:
pass
def can_recv_raw(self, timeout):
"""can_recv_raw(timeout) -> bool
Should not be called directly. Returns True, if
there is data available within the timeout, but
ignores the buffer on the object.
"""
raise NotImplementedError()
def connected_raw(self, direction):
"""connected(direction = 'any') -> bool
Should not be called directly. Returns True iff the
tube is connected in the given direction.
"""
raise NotImplementedError()
def close(self):
"""close()
Closes the tube.
"""
pass
# Ideally we could:
# raise NotImplementedError()
# But this causes issues with the unit tests.
def fileno(self):
"""fileno() -> int
Returns the file number used for reading.
"""
raise NotImplementedError()
def shutdown_raw(self, direction):
"""shutdown_raw(direction)
Should not be called directly. Closes the tube for further reading or
writing.
"""
raise NotImplementedError()
def p64(self, *a, **kw): return self.send(packing.p64(*a, **kw))
def p32(self, *a, **kw): return self.send(packing.p32(*a, **kw))
def p16(self, *a, **kw): return self.send(packing.p16(*a, **kw))
def p8(self, *a, **kw): return self.send(packing.p8(*a, **kw))
def pack(self, *a, **kw): return self.send(packing.pack(*a, **kw))
def u64(self, *a, **kw): return packing.u64(self.recvn(8), *a, **kw)
def u32(self, *a, **kw): return packing.u32(self.recvn(4), *a, **kw)
def u16(self, *a, **kw): return packing.u16(self.recvn(2), *a, **kw)
def u8(self, *a, **kw): return packing.u8(self.recvn(1), *a, **kw)
def unpack(self, *a, **kw): return packing.unpack(self.recvn(context.bytes), *a, **kw)
def flat(self, *a, **kw): return self.send(packing.flat(*a,**kw))
def fit(self, *a, **kw): return self.send(packing.fit(*a, **kw))
# Dynamic functions
def make_wrapper(func):
def wrapperb(self, *a, **kw):
return bytearray(func(self, *a, **kw))
def wrapperS(self, *a, **kw):
return context._decode(func(self, *a, **kw))
wrapperb.__doc__ = 'Same as :meth:`{func.__name__}`, but returns a bytearray'.format(func=func)
wrapperb.__name__ = func.__name__ + 'b'
wrapperS.__doc__ = 'Same as :meth:`{func.__name__}`, but returns a str, ' \
'decoding the result using `context.encoding`. ' \
'(note that the binary versions are way faster)'.format(func=func)
wrapperS.__name__ = func.__name__ + 'S'
return wrapperb, wrapperS
for func in [recv,
recvn,
recvall,
recvrepeat,
recvuntil,
recvpred,
recvregex,
recvline,
recvline_contains,
recvline_startswith,
recvline_endswith,
recvline_regex]:
for wrapper in make_wrapper(func):
locals()[wrapper.__name__] = wrapper
def make_wrapper(func, alias):
def wrapper(self, *a, **kw):
return func(self, *a, **kw)
wrapper.__doc__ = 'Alias for :meth:`{func.__name__}`'.format(func=func)
wrapper.__name__ = alias
return wrapper
for _name in list(locals()):
if 'recv' in _name:
_name2 = _name.replace('recv', 'read')
elif 'send' in _name:
_name2 = _name.replace('send', 'write')
else:
continue
locals()[_name2] = make_wrapper(locals()[_name], _name2)
# Clean up the scope
del wrapper, func, make_wrapper, _name, _name2
|
robot_test.py | #!/usr/bin/env python3
import copy
import logging
import os
import pathlib
import re
import socket
import threading
from typing import Union
import queue
from functools import partial
import yaml
import pytest
from unittest import mock
import mecademicpy.robot as mdr
import mecademicpy.mx_robot_def as mx_def
import mecademicpy.robot_trajectory_files as robot_files
TEST_IP = '127.0.0.1'
MECA500_CONNECTED_RESPONSE = 'Connected to Meca500 R3 v9.0.0'
DEFAULT_TIMEOUT = 10 # Set 10s as default timeout.
#####################################################################################
# Test readme
#####################################################################################
# Use the 'robot' test fixture to automatically instantiate a robot object.
# Using the 'robot' fixture also enables automatically calling robot.Disconnect() at
# test teardown.
# Use 'connect_robot_helper(robot, args..)' to take care of robot connection.
# Refer to the 'test_start_offline_program()' test case for an example usage that
# also includes using `simple_response_handler()` to test a message exchange.
#####################################################################################
# Test fixtures and helper functions
#####################################################################################
# Fixture for creating robot object and also disconnecting on test teardown.
@pytest.fixture
def robot():
robot = mdr.Robot()
assert robot is not None
# Yield the robot setup function.
yield robot
# Finally disconnect on teardown.
robot.Disconnect()
# Automates sending the welcome message and responding to the robot serial query. Do not use for monitor_mode=True.
def connect_robot_helper(robot: mdr.Robot,
yaml_filename='meca500_r3_v9.yml',
monitor_mode=False,
offline_mode=True,
disconnect_on_exception=False,
enable_synchronous_mode=False):
file_path = pathlib.Path.cwd().joinpath('tests', 'robot_config')
yaml_file_full_path = pathlib.Path.joinpath(file_path, yaml_filename)
with open(yaml_file_full_path, 'r') as file_stream:
robot_config = yaml.safe_load(file_stream)
# Set connection message
rx_queue = robot._monitor_rx_queue if monitor_mode else robot._command_rx_queue
rx_queue.put(mdr._Message(mx_def.MX_ST_CONNECTED, robot_config['expected_connection_message']))
expected_commands = []
robot_responses = []
if not monitor_mode and robot_config['expected_connect_commands']:
# Set robot command responses
for transaction in robot_config['expected_connect_commands']:
expected_commands.append(transaction['name'])
cmd_responses = []
cmd_responses.append(mdr._Message(transaction['response_code'], transaction['response']))
if 'extra_event' in transaction:
cmd_responses.append(mdr._Message(transaction['extra_event'], transaction['extra_event_data']))
robot_responses.append(cmd_responses)
# Start the fake robot thread (that will simulate response to expected requests)
fake_robot = threading.Thread(target=simple_response_handler,
args=(robot._command_tx_queue, robot._command_rx_queue, expected_commands,
robot_responses))
fake_robot.start()
robot.Connect(TEST_IP,
offline_mode=offline_mode,
disconnect_on_exception=disconnect_on_exception,
enable_synchronous_mode=enable_synchronous_mode,
monitor_mode=monitor_mode)
fake_robot.join()
robot.WaitConnected(timeout=0)
# Function for exchanging one message with queue.
def simple_response_handler(queue_in: queue.Queue, queue_out: queue.Queue, expected_in: list[str],
desired_out: Union[list[list[mdr._Message]], list[mdr._Message]]):
if isinstance(expected_in, list):
for i in range(len(expected_in)):
event = queue_in.get(block=True, timeout=1)
assert event == expected_in[i]
if isinstance(desired_out[i], list):
for response in desired_out[i]:
queue_out.put(response)
else:
queue_out.put(desired_out[i])
else:
event = queue_in.get(block=True, timeout=1)
assert event == expected_in
queue_out.put(desired_out)
# Server to listen for a connection. Send initial data in data_list on connect, send rest in response to any msg.
def fake_server(address, port, data_list, server_up):
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_sock.settimeout(10) # Allow up to 10 seconds to create the connection.
server_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_sock.bind((address, port))
server_sock.listen()
server_up.set()
client, addr = server_sock.accept()
if data_list:
client.sendall(data_list.pop(0).encode('ascii'))
while True:
received_data = client.recv(1024)
if not received_data:
break
if data_list:
client.sendall(data_list.pop(0).encode('ascii'))
# Run the fake_server in a separate thread.
def run_fake_server(address, port, data_list):
server_up_event = threading.Event() # Synchronization event for fake server.
server_thread = threading.Thread(target=fake_server, args=(address, port, data_list, server_up_event))
server_thread.start()
assert server_up_event.wait(timeout=DEFAULT_TIMEOUT)
return server_thread
# Simulated socket, initialized with list of responses, one response at a time is returned with recv().
class FakeSocket():
def __init__(self, input):
self.queue = queue.Queue()
for x in input:
self.queue.put(x)
def setblocking(self, _):
pass
def recv(self, _):
return self.queue.get()
#####################################################################################
# Test cases
#####################################################################################
# Test that connecting with invalid parameters raises exception.
def test_setup_invalid_input(robot: mdr.Robot):
with pytest.raises(TypeError):
robot.Connect(2)
with pytest.raises(ValueError):
robot.Connect('1.1.1.1.1')
# Test that connecting without robot will raise exception. On failure, first check that virtual robot is not running!
def test_connection_no_robot(robot: mdr.Robot):
robot.default_timeout = 0
with pytest.raises(mdr.CommunicationError):
robot.Connect(TEST_IP)
# Test connection/disconnection cycle with real socket simulating a legacy robot.
# **** On failure, first check that virtual robot is not running!
def test_successful_connection_full_socket_legacy(robot: mdr.Robot):
command_server_thread = run_fake_server(TEST_IP, mx_def.MX_ROBOT_TCP_PORT_CONTROL, [
'[3000][Connected to Meca500 R3-virtual v8.3.10]\0', '[2083][m500-99999]\0',
'[2082][v8.3.10.9876-unit-test-fake]\0', '[2007][0,0,0,0,1,1,1]\0'
])
monitor_server_thread = run_fake_server(TEST_IP, mx_def.MX_ROBOT_TCP_PORT_FEED, [])
with pytest.raises(mdr.TimeoutException):
robot.WaitConnected(timeout=0)
robot.Connect(TEST_IP)
robot.WaitConnected()
assert robot.GetRobotInfo().model == 'Meca500'
assert robot.GetRobotInfo().revision == 3
assert robot.GetRobotInfo().is_virtual is True
assert robot.GetRobotInfo().version.major == 8
assert robot.GetRobotInfo().version.minor == 3
assert robot.GetRobotInfo().version.patch == 10
assert robot.GetRobotInfo().version.build == 9876
assert robot.GetRobotInfo().serial == 'm500-99999'
robot.Disconnect()
assert robot._command_socket is None
assert robot._monitor_socket is None
command_server_thread.join()
monitor_server_thread.join()
# Test that the socket handler properly concatenates messages split across multiple recv() calls.
def test_successful_connection_split_response():
fake_socket = FakeSocket([b'[3', b'00', b'0][Connected to Meca500 R3 v9.0.0]\0', b''])
rx_queue = queue.Queue()
# Test the socket handler directly to ensure messages are received across several recv() calls.
mdr.Robot._handle_socket_rx(fake_socket, rx_queue, logging.getLogger(__name__))
assert rx_queue.qsize() == 1
message = rx_queue.get()
assert message.id == mx_def.MX_ST_CONNECTED
assert message.data == MECA500_CONNECTED_RESPONSE
# Test that we can connect to a Scara robot.
def test_scara_connection(robot: mdr.Robot):
cur_dir = os.getcwd()
connect_robot_helper(robot, yaml_filename='scara_r1_v9.yml')
assert not robot.GetStatusRobot().activation_state
assert robot.GetRobotInfo().model == 'Scara'
assert robot.GetRobotInfo().num_joints == 4
assert robot.GetRobotInfo().version.major == 9
assert robot.GetRobotInfo().rt_message_capable
assert robot.GetRobotInfo().serial == 'scara-87654321'
# Test that we can connect to a M500 robot running older version 7.0.6
def test_7_0_connection(robot: mdr.Robot):
cur_dir = os.getcwd()
connect_robot_helper(robot, yaml_filename='meca500_r3_v7_0.yml')
assert not robot.GetStatusRobot().activation_state
assert robot.GetRobotInfo().model == 'Meca500'
assert robot.GetRobotInfo().num_joints == 6
assert robot.GetRobotInfo().version.major == 7
assert robot.GetRobotInfo().version.minor == 0
assert robot.GetRobotInfo().version.patch == 6
assert not robot.GetRobotInfo().rt_message_capable
assert not robot.GetRobotInfo().rt_on_ctrl_port_capable
assert robot.GetRobotInfo().serial is None
# Test that we can connect to a M500 robot running older version 8.3
def test_8_3_connection(robot: mdr.Robot):
cur_dir = os.getcwd()
connect_robot_helper(robot, yaml_filename='meca500_r3_v8_3.yml')
assert not robot.GetStatusRobot().activation_state
assert robot.GetRobotInfo().model == 'Meca500'
assert robot.GetRobotInfo().num_joints == 6
assert robot.GetRobotInfo().version.major == 8
assert robot.GetRobotInfo().version.minor == 3
assert not robot.GetRobotInfo().rt_message_capable
assert not robot.GetRobotInfo().rt_on_ctrl_port_capable
assert robot.GetRobotInfo().serial == 'm500-83'
# Test that we can connect to a M500 robot running older version 8.4
def test_8_4_connection(robot: mdr.Robot):
cur_dir = os.getcwd()
connect_robot_helper(robot, yaml_filename='meca500_r3_v8_4.yml')
assert not robot.GetStatusRobot().activation_state
assert robot.GetRobotInfo().model == 'Meca500'
assert robot.GetRobotInfo().num_joints == 6
assert robot.GetRobotInfo().version.major == 8
assert robot.GetRobotInfo().version.minor == 4
assert robot.GetRobotInfo().rt_message_capable
assert not robot.GetRobotInfo().rt_on_ctrl_port_capable
assert robot.GetRobotInfo().serial == 'm500-84'
# Test that we can connect to a M500 robot running older version 8.4
def test_9_0_connection(robot: mdr.Robot):
cur_dir = os.getcwd()
connect_robot_helper(robot, yaml_filename='meca500_r3_v9.yml')
assert not robot.GetStatusRobot().activation_state
assert robot.GetRobotInfo().model == 'Meca500'
assert robot.GetRobotInfo().num_joints == 6
assert robot.GetRobotInfo().version.major == 9
assert robot.GetRobotInfo().version.minor == 147
assert robot.GetRobotInfo().version.patch == 0
assert robot.GetRobotInfo().version.build == 1213
assert robot.GetRobotInfo().rt_message_capable
assert robot.GetRobotInfo().rt_on_ctrl_port_capable
assert robot.GetRobotInfo().serial == 'm500-99999999'
# Test that we can connect to a M500 robot running older version 8.4
def test_already_connected(robot: mdr.Robot):
cur_dir = os.getcwd()
connect_robot_helper(robot, yaml_filename='meca500_r3_v9.yml')
# Try connecting again, should do nothing
robot.Connect()
assert robot.IsConnected()
# Ensure user can reconnect to robot after disconnection or failure to connect.
def test_sequential_connections(robot: mdr.Robot):
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_USER_ALREADY, ''))
with pytest.raises(Exception):
robot.Connect(TEST_IP, offline_mode=True, disconnect_on_exception=False)
robot._command_rx_queue.put(mdr._Message(99999, ''))
with pytest.raises(Exception):
robot.Connect(TEST_IP, offline_mode=True, disconnect_on_exception=False)
connect_robot_helper(robot)
robot.Disconnect()
connect_robot_helper(robot)
robot.Disconnect()
def test_monitoring_connection(robot: mdr.Robot):
robot._monitor_rx_queue.put(mdr._Message(99999, ''))
robot._monitor_rx_queue.put(mdr._Message(99999, ''))
robot._monitor_rx_queue.put(mdr._Message(99999, ''))
robot._monitor_rx_queue.put(mdr._Message(99999, ''))
# Make sure robot connects quickly even if many messages preceding connection message are on monitoring port
connect_robot_helper(robot)
robot.WaitConnected(timeout=0)
# Ensure user can wrap robot object within "with" block.
def test_with_block(robot: mdr.Robot):
called_callbacks = []
def on_connected_test():
called_callbacks.append('on_connected_test')
def on_disconnected_test():
called_callbacks.append('on_disconnected_test')
with mdr.Robot() as robot2:
callbacks = mdr.RobotCallbacks()
callbacks.on_connected = on_connected_test
callbacks.on_disconnected = on_disconnected_test
robot2.RegisterCallbacks(callbacks, run_callbacks_in_separate_thread=True)
# Simulate a connection
connect_robot_helper(robot2)
# Test that connection occurred, and disconnection too (at end of "with" block)
assert called_callbacks == ['on_connected_test', 'on_disconnected_test']
# Ensure user can wrap robot object within "with" block on an already existing robot
def test_with_block_twice(robot: mdr.Robot):
called_callbacks = []
def on_connected_test():
called_callbacks.append('on_connected_test')
def on_disconnected_test():
called_callbacks.append('on_disconnected_test')
# Create robot and attach callbacks
robot2 = mdr.Robot()
callbacks = mdr.RobotCallbacks()
callbacks.on_connected = on_connected_test
callbacks.on_disconnected = on_disconnected_test
robot2.RegisterCallbacks(callbacks, run_callbacks_in_separate_thread=True)
# Connect within 'with' block -> Should disconnect but keep callbacks attached
with robot2:
connect_robot_helper(robot2)
# Connect again 'with' block -> Should disconnect but keep callbacks attached
with robot2:
connect_robot_helper(robot2)
# Test that connection occurred, and disconnection too (at end of "with" block)
assert called_callbacks == [
'on_connected_test', 'on_disconnected_test', 'on_connected_test', 'on_disconnected_test'
]
# Ensure robot must not yet be connected when entering "with" block.
def test_with_pre_connected(robot: mdr.Robot):
robot2 = mdr.Robot()
connect_robot_helper(robot2)
with pytest.raises(mdr.InvalidStateError):
with robot2:
robot2.Disconnect()
# Test parsing of monitoring port messages, and that robot state is correctly updated.
def test_monitoring_connection(robot: mdr.Robot):
connect_robot_helper(robot, monitor_mode=True)
# Helper functions for generating test data. To ensure data is unique in each field, we add the response code to the
# 'seed' array, with is generated with range().
def make_test_array(code, data):
return [x + code for x in data]
# Convert the test array into a TimestampedData object.
def make_test_data(code, data):
test_array = make_test_array(code, data)
return mdr.TimestampedData(test_array[0], test_array[1:])
# Convert the test array into a Message object.
def make_test_message(code, data):
test_array = make_test_array(code, data)
return mdr._Message(code, ','.join([str(x) for x in test_array]))
# Send monitor messages.
robot._monitor_rx_queue.put(make_test_message(mx_def.MX_ST_RT_TARGET_JOINT_POS, range(7)))
robot._monitor_rx_queue.put(make_test_message(mx_def.MX_ST_RT_TARGET_CART_POS, range(7)))
robot._monitor_rx_queue.put(make_test_message(mx_def.MX_ST_RT_TARGET_JOINT_VEL, range(7)))
robot._monitor_rx_queue.put(make_test_message(mx_def.MX_ST_RT_TARGET_CART_VEL, range(7)))
robot._monitor_rx_queue.put(make_test_message(mx_def.MX_ST_RT_TARGET_CONF, range(4)))
robot._monitor_rx_queue.put(make_test_message(mx_def.MX_ST_RT_TARGET_CONF_TURN, range(2)))
robot._monitor_rx_queue.put(make_test_message(mx_def.MX_ST_RT_JOINT_POS, range(7)))
robot._monitor_rx_queue.put(make_test_message(mx_def.MX_ST_RT_CART_POS, range(7)))
robot._monitor_rx_queue.put(make_test_message(mx_def.MX_ST_RT_JOINT_VEL, range(7)))
robot._monitor_rx_queue.put(make_test_message(mx_def.MX_ST_RT_JOINT_TORQ, range(7)))
robot._monitor_rx_queue.put(make_test_message(mx_def.MX_ST_RT_CART_VEL, range(7)))
robot._monitor_rx_queue.put(make_test_message(mx_def.MX_ST_RT_CONF, range(4)))
robot._monitor_rx_queue.put(make_test_message(mx_def.MX_ST_RT_CONF_TURN, range(2)))
robot._monitor_rx_queue.put(make_test_message(mx_def.MX_ST_RT_ACCELEROMETER, range(5)))
robot.Disconnect()
# Temporarily test using direct members, switch to using proper getters once implemented.
assert robot._robot_rt_data.rt_target_joint_pos == make_test_data(mx_def.MX_ST_RT_TARGET_JOINT_POS, range(7))
assert robot._robot_rt_data.rt_target_cart_pos == make_test_data(mx_def.MX_ST_RT_TARGET_CART_POS, range(7))
assert robot._robot_rt_data.rt_target_joint_vel == make_test_data(mx_def.MX_ST_RT_TARGET_JOINT_VEL, range(7))
assert robot._robot_rt_data.rt_target_cart_vel == make_test_data(mx_def.MX_ST_RT_TARGET_CART_VEL, range(7))
assert robot._robot_rt_data.rt_target_conf == make_test_data(mx_def.MX_ST_RT_TARGET_CONF, range(4))
assert robot._robot_rt_data.rt_target_conf_turn == make_test_data(mx_def.MX_ST_RT_TARGET_CONF_TURN, range(2))
assert robot._robot_rt_data.rt_joint_pos == make_test_data(mx_def.MX_ST_RT_JOINT_POS, range(7))
assert robot._robot_rt_data.rt_cart_pos == make_test_data(mx_def.MX_ST_RT_CART_POS, range(7))
assert robot._robot_rt_data.rt_joint_vel == make_test_data(mx_def.MX_ST_RT_JOINT_VEL, range(7))
assert robot._robot_rt_data.rt_joint_torq == make_test_data(mx_def.MX_ST_RT_JOINT_TORQ, range(7))
assert robot._robot_rt_data.rt_cart_vel == make_test_data(mx_def.MX_ST_RT_CART_VEL, range(7))
assert robot._robot_rt_data.rt_conf == make_test_data(mx_def.MX_ST_RT_CONF, range(4))
assert robot._robot_rt_data.rt_conf_turn == make_test_data(mx_def.MX_ST_RT_CONF_TURN, range(2))
# The data is sent as [timestamp, accelerometer_id, {measurements...}].
# We convert it to a dictionary which maps the accelerometer_id to a TimestampedData object.
accel_array = make_test_array(mx_def.MX_ST_RT_ACCELEROMETER, range(5))
assert robot._robot_rt_data.rt_accelerometer == {
accel_array[1]: mdr.TimestampedData(accel_array[0], accel_array[2:])
}
# Test that checkpoints created by user are properly sent to robot, waited on, and unblocked.
def test_user_set_checkpoints(robot: mdr.Robot):
connect_robot_helper(robot)
# Validate internal checkpoint waiting.
checkpoint_1 = robot.SetCheckpoint(1)
# Check that the command is sent to the robot.
assert robot._command_tx_queue.get() == 'SetCheckpoint(1)'
# Check that the id is correct.
assert checkpoint_1.id == 1
# Check that wait times out if response has not been sent.
with pytest.raises(mdr.TimeoutException):
checkpoint_1.wait(timeout=0)
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_CHECKPOINT_REACHED, '1'))
# Check that wait succeeds if response is sent.
checkpoint_1.wait(timeout=DEFAULT_TIMEOUT)
# Test that the user can wait on checkpoints which were set by an external source, like an offline program.
def test_external_checkpoints(robot: mdr.Robot):
connect_robot_helper(robot)
# Validate external checkpoint waiting.
checkpoint_1 = robot.ExpectExternalCheckpoint(1)
# Check that the command is not sent to the robot.
assert robot._command_tx_queue.qsize() == 0
# Check that the id is correct.
assert checkpoint_1.id == 1
# Check that wait times out if response has not been sent.
with pytest.raises(mdr.TimeoutException):
checkpoint_1.wait(timeout=0)
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_CHECKPOINT_REACHED, '1'))
# Check that wait succeeds if response is sent.
checkpoint_1.wait(timeout=DEFAULT_TIMEOUT)
# Test that user-set and external checkpoints work concurrently.
def test_multiple_checkpoints(robot: mdr.Robot):
connect_robot_helper(robot)
# Validate multiple checkpoints, internal and external.
checkpoint_1 = robot.SetCheckpoint(1)
checkpoint_2 = robot.SetCheckpoint(2)
checkpoint_3 = robot.ExpectExternalCheckpoint(3)
# All three checkpoints are still pending, check that all three time out.
with pytest.raises(mdr.TimeoutException):
checkpoint_1.wait(timeout=0)
with pytest.raises(mdr.TimeoutException):
checkpoint_2.wait(timeout=0)
with pytest.raises(mdr.TimeoutException):
checkpoint_3.wait(timeout=0)
# First checkpoint is reached, second two should time out.
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_CHECKPOINT_REACHED, '1'))
checkpoint_1.wait(timeout=DEFAULT_TIMEOUT)
with pytest.raises(mdr.TimeoutException):
checkpoint_2.wait(timeout=0)
with pytest.raises(mdr.TimeoutException):
checkpoint_3.wait(timeout=0)
# First and second checkpoints are reached, last one should time out.
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_CHECKPOINT_REACHED, '2'))
checkpoint_1.wait(timeout=DEFAULT_TIMEOUT)
checkpoint_2.wait(timeout=DEFAULT_TIMEOUT)
with pytest.raises(mdr.TimeoutException):
checkpoint_3.wait(timeout=0)
# All checkpoints are reached.
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_CHECKPOINT_REACHED, '3'))
checkpoint_3.wait(timeout=DEFAULT_TIMEOUT)
checkpoint_2.wait(timeout=DEFAULT_TIMEOUT)
checkpoint_1.wait(timeout=DEFAULT_TIMEOUT)
# Test that repeated checkpoints are unblocked in the order they are set.
# Repeated checkpoints are supported but discouraged.
def test_repeated_checkpoints(robot: mdr.Robot):
connect_robot_helper(robot)
checkpoint_1_a = robot.SetCheckpoint(1)
checkpoint_1_b = robot.SetCheckpoint(1)
# Check that wait times out if response has not been sent.
with pytest.raises(mdr.TimeoutException):
checkpoint_1_a.wait(timeout=0)
with pytest.raises(mdr.TimeoutException):
checkpoint_1_b.wait(timeout=0)
# Only one checkpoint has been returned, the second should still block.
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_CHECKPOINT_REACHED, '1'))
with pytest.raises(mdr.TimeoutException):
checkpoint_1_b.wait(timeout=0)
checkpoint_1_a.wait(timeout=DEFAULT_TIMEOUT)
# Check that waits succeeds if response is sent.
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_CHECKPOINT_REACHED, '1'))
checkpoint_1_b.wait(timeout=DEFAULT_TIMEOUT)
checkpoint_1_a.wait(timeout=DEFAULT_TIMEOUT)
# Test WaitForAnyCheckpoint().
def test_special_checkpoints(robot: mdr.Robot):
connect_robot_helper(robot)
checkpoint_1 = robot.SetCheckpoint(1)
checkpoint_2 = robot.SetCheckpoint(2)
with pytest.raises(mdr.TimeoutException):
robot.WaitForAnyCheckpoint(timeout=0)
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_CHECKPOINT_REACHED, '1'))
robot.WaitForAnyCheckpoint()
# Test that receiving a checkpoint without an associated wait does not raise exception.
def test_unaccounted_checkpoints(robot: mdr.Robot):
connect_robot_helper(robot)
# Send unexpected checkpoint.
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_CHECKPOINT_REACHED, '1'))
# This call will raise an exception if internal states are invalid.
robot._check_internal_states()
# Test that checkpoints which will never be unblocked raise an exception.
def test_stranded_checkpoints(robot: mdr.Robot):
connect_robot_helper(robot)
checkpoint_1 = robot.SetCheckpoint(1)
robot.Disconnect()
# Checkpoint should throw error instead of blocking since robot is already disconnected.
with pytest.raises(mdr.InterruptException):
checkpoint_1.wait(timeout=DEFAULT_TIMEOUT)
# Test that events can be correctly waited for and set.
def test_events(robot: mdr.Robot):
with pytest.raises(mdr.TimeoutException):
robot.WaitActivated(timeout=0)
robot.WaitDeactivated()
with pytest.raises(mdr.TimeoutException):
robot.WaitConnected(timeout=0)
robot.WaitDisconnected()
connect_robot_helper(robot)
robot.WaitConnected()
with pytest.raises(mdr.TimeoutException):
robot.WaitDisconnected(timeout=0)
with pytest.raises(mdr.TimeoutException):
robot.WaitActivated(timeout=0)
robot.WaitDeactivated()
robot.ActivateRobot()
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_GET_STATUS_ROBOT, '1,0,0,0,0,0,0'))
robot.WaitActivated(timeout=1)
with pytest.raises(mdr.TimeoutException):
robot.WaitDeactivated(timeout=0)
with pytest.raises(mdr.TimeoutException):
robot.WaitHomed(timeout=0)
robot.Home()
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_GET_STATUS_ROBOT, '1,1,0,0,0,0,0'))
robot.WaitHomed(timeout=1)
robot.PauseMotion()
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_GET_STATUS_ROBOT, '1,1,0,0,1,0,0'))
# Wait until pause is successfully set.
robot.WaitMotionPaused(timeout=DEFAULT_TIMEOUT)
with pytest.raises(mdr.TimeoutException):
robot.WaitMotionResumed(timeout=0)
robot.ResumeMotion()
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_GET_STATUS_ROBOT, '1,1,0,0,0,0,0'))
robot.WaitMotionResumed(timeout=1)
robot.ClearMotion()
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_CLEAR_MOTION, ''))
robot.WaitMotionCleared(timeout=1)
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_GET_STATUS_ROBOT, '1,1,0,0,0,1,0'))
robot._robot_events.on_end_of_block.wait(timeout=1)
# Robot enters error state.
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_GET_STATUS_ROBOT, '1,1,0,1,0,0,0'))
robot._robot_events.on_error.wait(timeout=1)
robot.ResetError()
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_GET_STATUS_ROBOT, '1,1,0,0,0,0,0'))
robot._robot_events.on_error_reset.wait(timeout=1)
robot.DeactivateRobot()
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_GET_STATUS_ROBOT, '0,0,0,0,0,0,0'))
# Note: the order of these waits is intentional.
# The WaitActivated check may fail if message hasn't yet been processed.
robot.WaitDeactivated(timeout=1)
with pytest.raises(mdr.TimeoutException):
robot.WaitActivated(timeout=0)
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_BRAKES_OFF, ''))
robot._robot_events.on_brakes_deactivated.wait(timeout=DEFAULT_TIMEOUT)
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_BRAKES_ON, ''))
robot._robot_events.on_brakes_activated.wait(timeout=DEFAULT_TIMEOUT)
with pytest.raises(mdr.TimeoutException):
robot.WaitDisconnected(timeout=0)
robot.Disconnect()
robot.WaitDisconnected()
# Test that robot disconnects automatically on exception when feature is enabled.
def test_disconnect_on_exception(robot: mdr.Robot):
connect_robot_helper(robot, disconnect_on_exception=True)
with pytest.raises(mdr.DisconnectError):
robot.SetCheckpoint(0)
# Test that disabling the feature avoids the disconnect.
robot.Disconnect()
connect_robot_helper(robot, disconnect_on_exception=False)
with pytest.raises(AssertionError):
robot.SetCheckpoint(0)
# Test that callbacks can be set and are correctly called. Every callback in the RobotCallbacks class is checked.
# If a new callback is added, the test must be updated to trigger the callback.
def test_callbacks(robot: mdr.Robot):
# Initialize object which will contain all user-defined callback functions.
callbacks = mdr.RobotCallbacks()
# Expect that almost all callbacks will be called
expected_callbacks = copy.deepcopy(callbacks.__dict__)
# ... except on_monitor_message since we're not connecting to monitoring port by default
expected_callbacks.pop('on_monitor_message')
# Create list to store names of callbacks which have been called.
called_callbacks = []
# Function to record which callbacks have been called.
# To avoid writing a separate function for each callback, we take in a name parameter.
# Just before the callback is assigned, we set the name to be the callback we currently care about.
def test_callback(name):
called_callbacks.append(name)
# For each available callback 'slot', assign the 'test_callback' function, with the callback name as a parameter.
for attr in callbacks.__dict__:
callbacks.__dict__[attr] = partial(test_callback, name=attr)
# Checkpoint callbacks are different than other callbacks, use different function.
checkpoint_id = 123
def checkpoint_callback(id):
called_callbacks.append('on_checkpoint_reached')
called_callbacks.append(id)
callbacks.on_checkpoint_reached = checkpoint_callback
# The two message callbacks are also unique.
def command_message_callback(message):
called_callbacks.append('on_command_message')
def monitor_message_callback(message):
called_callbacks.append('on_monitor_message')
callbacks.on_command_message = command_message_callback
callbacks.on_monitor_message = monitor_message_callback
# End of cycle callback is alike on_monitor_message, as it also happens on a monitoring message, but less often
def end_of_cycle_callback():
called_callbacks.append('on_end_of_cycle')
callbacks.on_end_of_cycle = end_of_cycle_callback
for run_in_thread in [True]:
# Register all callbacks.
robot.RegisterCallbacks(callbacks, run_callbacks_in_separate_thread=run_in_thread)
connect_robot_helper(robot, enable_synchronous_mode=True)
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_GET_STATUS_ROBOT, '1,0,0,0,0,0,0'))
robot.ActivateRobot()
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_GET_STATUS_ROBOT, '1,1,0,0,0,0,0'))
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_RT_CYCLE_END, '12345'))
robot.Home()
robot.GetStatusRobot(synchronous_update=False)
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_GET_STATUS_ROBOT, '1,1,0,0,1,0,0'))
robot.GetStatusGripper(synchronous_update=False)
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_GET_STATUS_GRIPPER, '0,0,0,0,0,0'))
checkpoint_1 = robot.SetCheckpoint(checkpoint_id)
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_CHECKPOINT_REACHED, str(checkpoint_id)))
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_GET_STATUS_ROBOT, '1,1,0,0,1,0,0'))
robot.PauseMotion()
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_GET_STATUS_ROBOT, '1,1,0,0,0,0,0'))
robot.ResumeMotion()
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_CLEAR_MOTION, ''))
# Note we don't actually run robot.ClearMotion() here as the command will block in synchronous mode.
# It is also not necessary for the test.
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_OFFLINE_START, ''))
# Note we don't actually run robot.StartOfflineProgram() here as there is no actual robot and thus
# no recorded programs
# It is also not necessary for the test.
# Simulate end of cycle (detected on MX_ST_GET_POSE monitor message when robot is not 'rt_message_capable')
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_GET_POSE, '0.0,0.0,0.0,0.0,0.0,0.0'))
# Robot enters error state.
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_GET_STATUS_ROBOT, '1,1,0,1,0,0,0'))
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_GET_STATUS_ROBOT, '1,1,0,0,0,0,0'))
robot.ResetError()
# Robot pstop triggered.
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_PSTOP, '1'))
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_PSTOP, '0'))
robot.ResetPStop()
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_GET_STATUS_ROBOT, '1,1,1,0,0,0,0'))
robot.ActivateSim()
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_GET_STATUS_ROBOT, '1,1,0,0,0,0,0'))
robot.DeactivateSim()
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_EXTTOOL_SIM, '1'))
robot.SetExtToolSim(mx_def.MX_EXT_TOOL_MEGP25_SHORT)
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_EXTTOOL_SIM, '0'))
robot.SetExtToolSim(mx_def.MX_EXT_TOOL_NONE)
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_RT_EXTTOOL_STATUS, '33,1,1,1,0'))
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_RT_VALVE_STATE, '34,1,1'))
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_RT_GRIPPER_STATE, '35,1,1'))
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_RECOVERY_MODE_ON, ''))
robot.SetRecoveryMode(True)
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_RECOVERY_MODE_OFF, ''))
robot.SetRecoveryMode(False)
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_OFFLINE_START, ''))
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_GET_STATUS_ROBOT, '0,0,0,0,0,0,0'))
robot.DeactivateRobot()
robot.Disconnect()
if not run_in_thread:
robot.RunCallbacks()
robot.UnregisterCallbacks()
# Check that all callbacks have been called.
for attr in expected_callbacks:
assert attr in called_callbacks, f'callback {attr} not called (called={called_callbacks})'
assert checkpoint_id in called_callbacks
assert robot._callback_thread is None
# Test unblocking InterruptableEvent class with exception.
def test_event_with_exception():
# Test successful setting.
event = mdr.InterruptableEvent()
event.set()
event.wait(timeout=0)
# Test event timed out.
event.clear()
with pytest.raises(mdr.TimeoutException):
event.wait(timeout=0)
# Test event throwing exception.
exception_event = mdr.InterruptableEvent()
exception_event.abort()
with pytest.raises(mdr.InterruptException):
exception_event.wait(timeout=0)
# Test all motion commands except those in skip_list. Skipped commands do not follow standard motion command format, or
# their arguments cannot be deduced from the function signature.
def test_motion_commands(robot: mdr.Robot):
connect_robot_helper(robot)
skip_list = [
'MoveGripper', 'MoveJoints', 'MoveJointsVel', 'MoveJointsRel', 'SetSynchronousMode', 'SetTorqueLimits',
'SetTorqueLimitsCfg'
]
# Run all move-type commands in API and check that the text_command matches.
for name in dir(robot):
if name in skip_list:
continue
elif name.startswith('Move') or name.startswith('Set'):
method = getattr(robot, name)
# Assemble parameter list. Note we need to get the wrapped function since a decorator is used.
num_args = method.__wrapped__.__code__.co_argcount
test_args = list(range(1, num_args))
test_args_text = ','.join([str(x) for x in test_args])
# Call method.
method(*test_args)
text_command = robot._command_tx_queue.get(block=True, timeout=1)
# Check that the text commands begins with the appropriate name.
assert text_command.find(name) == 0, 'Method {} does not match text command'.format(name)
# Check that the test arguments.
assert text_command.find(test_args_text) != -1, 'Method {} args do not match text command'.format(name)
# Test that joint-type moves send the correct command and checks input.
def test_joint_moves(robot: mdr.Robot):
connect_robot_helper(robot)
fake_joint = fake_data(seed=1, length=6)
fake_joints_str = fake_string(seed=1, length=6)
robot.MoveJoints(*fake_joint)
text_command = robot._command_tx_queue.get(block=True, timeout=1)
assert text_command.find('MoveJoints') == 0
assert text_command.find(fake_joints_str) != -1
with pytest.raises(ValueError):
robot.MoveJoints(1, 2, 3)
robot.MoveJointsRel(*fake_joint)
text_command = robot._command_tx_queue.get(block=True, timeout=1)
assert text_command.find('MoveJointsRel') == 0
assert text_command.find(fake_joints_str) != -1
with pytest.raises(ValueError):
robot.MoveJointsRel(1, 2, 3)
robot.MoveJointsVel(*fake_joint)
text_command = robot._command_tx_queue.get(block=True, timeout=1)
assert text_command.find('MoveJointsVel') == 0
assert text_command.find(fake_joints_str) != -1
with pytest.raises(ValueError):
robot.MoveJointsVel(1, 2, 3)
# Test get commands with synchronous_update=True.
def test_synchronous_gets(robot: mdr.Robot):
connect_robot_helper(robot)
# Test GetRtTargetJointPos.
expected_commands = ['SyncCmdQueue(1)', 'GetRtTargetJointPos']
robot_responses = []
robot_responses.append(mdr._Message(mx_def.MX_ST_SYNC_CMD_QUEUE, '1'))
robot_responses.append(mdr._Message(mx_def.MX_ST_RT_TARGET_JOINT_POS, '1234, 1, 2, 3, 4, 5, 6'))
fake_robot = threading.Thread(target=simple_response_handler,
args=(robot._command_tx_queue, robot._command_rx_queue, expected_commands,
robot_responses))
fake_robot.start()
# Try synchronous get
assert robot.GetRtTargetJointPos(synchronous_update=True, timeout=1) == [1, 2, 3, 4, 5, 6]
# Also test get with timestamp
expected_response = mdr.TimestampedData(1234, [1, 2, 3, 4, 5, 6])
assert robot.GetRtTargetJointPos(include_timestamp=True, synchronous_update=False) == expected_response
fake_robot.join()
# Test GetRtTargetCartPos.
expected_commands = ['SyncCmdQueue(2)', 'GetRtTargetCartPos']
robot_responses = []
robot_responses.append(mdr._Message(mx_def.MX_ST_SYNC_CMD_QUEUE, '2'))
robot_responses.append(mdr._Message(mx_def.MX_ST_RT_TARGET_CART_POS, '2345, 2, 3, 4, 5, 6, 7'))
fake_robot = threading.Thread(target=simple_response_handler,
args=(robot._command_tx_queue, robot._command_rx_queue, expected_commands,
robot_responses))
fake_robot.start()
# Try synchronous get
assert robot.GetRtTargetCartPos(synchronous_update=True, timeout=1) == [2, 3, 4, 5, 6, 7]
# Also test get with timestamp
expected_response = mdr.TimestampedData(2345, [2, 3, 4, 5, 6, 7])
assert robot.GetRtTargetCartPos(include_timestamp=True, synchronous_update=False) == expected_response
fake_robot.join()
# Attempting these gets without the appropriate robot response should result in timeout.
with pytest.raises(mdr.TimeoutException):
robot.GetRtTargetJointPos(synchronous_update=True, timeout=0)
with pytest.raises(mdr.TimeoutException):
robot.GetRtTargetCartPos(synchronous_update=True, timeout=0)
# Helper functions for generating fake data for simulated functions like GetJoints
def fake_data(seed, length=6):
return [seed] * length
def fake_string(seed, length=6):
return ','.join([str(x) for x in fake_data(seed, length)])
# Test that get commands correctly return timestamps.
def test_synchronous_gets_legacy(robot: mdr.Robot):
# Use a connected response that indicate a robot that does not support real-time monitoring
connect_robot_helper(robot, yaml_filename='meca500_r3_v8_3.yml')
#
# Test legacy messages:
#
robot._monitor_rx_queue.put(mdr._Message(mx_def.MX_ST_GET_JOINTS, fake_string(seed=1)))
robot._monitor_rx_queue.put(mdr._Message(mx_def.MX_ST_GET_POSE, fake_string(seed=1)))
# Terminate queue and wait for thread to exit to ensure messages are processed.
robot._monitor_rx_queue.put(mdr._TERMINATE)
robot._monitor_handler_thread.join(timeout=5)
robot._initialize_monitoring_connection()
# Without RT messages, enabling 'include_timestamp' should raise exception.
with pytest.raises(mdr.InvalidStateError):
robot.GetRtTargetJointPos(include_timestamp=True)
with pytest.raises(mdr.InvalidStateError):
robot.GetRtTargetCartPos(include_timestamp=True)
robot.GetRtTargetJointPos(include_timestamp=False) == fake_data(seed=1)
robot.GetRtTargetCartPos(include_timestamp=False) == fake_data(seed=1)
assert not robot.GetRobotInfo().rt_message_capable
# Test synchronous gets without RT messages.
expected_command = 'GetJoints'
robot_response = mdr._Message(mx_def.MX_ST_GET_JOINTS, fake_string(seed=2))
fake_robot = threading.Thread(target=simple_response_handler,
args=(robot._command_tx_queue, robot._command_rx_queue, expected_command,
robot_response))
fake_robot.start()
assert robot.GetRtTargetJointPos(synchronous_update=True, timeout=1) == fake_data(seed=2)
fake_robot.join()
expected_command = 'GetPose'
robot_response = mdr._Message(mx_def.MX_ST_GET_POSE, fake_string(seed=2))
fake_robot = threading.Thread(target=simple_response_handler,
args=(robot._command_tx_queue, robot._command_rx_queue, expected_command,
robot_response))
fake_robot.start()
assert robot.GetRtTargetCartPos(synchronous_update=True, timeout=1) == fake_data(seed=2)
fake_robot.join()
# Test initializing offline programs.
def test_start_offline_program(robot: mdr.Robot):
connect_robot_helper(robot, enable_synchronous_mode=True)
expected_command = 'StartProgram(1)'
# Report that the program has been started successfully.
robot_response = mdr._Message(mx_def.MX_ST_OFFLINE_START, '')
fake_robot = threading.Thread(target=simple_response_handler,
args=(robot._command_tx_queue, robot._command_rx_queue, expected_command,
robot_response))
fake_robot.start()
robot.StartOfflineProgram(1, timeout=1)
fake_robot.join(timeout=1)
# Report that the program does not exist.
robot_response = mdr._Message(mx_def.MX_ST_NO_OFFLINE_SAVED, '')
fake_robot = threading.Thread(target=simple_response_handler,
args=(robot._command_tx_queue, robot._command_rx_queue, expected_command,
robot_response))
fake_robot.start()
with pytest.raises(mdr.InvalidStateError):
robot.StartOfflineProgram(1, timeout=1)
fake_robot.join(timeout=1)
# Test monitor-only mode. (No commands can be sent.)
def test_monitor_mode(robot: mdr.Robot):
robot._monitor_rx_queue.put(mdr._Message(mx_def.MX_ST_CONNECTED, MECA500_CONNECTED_RESPONSE))
robot.Connect(TEST_IP,
monitor_mode=True,
offline_mode=True,
disconnect_on_exception=False,
enable_synchronous_mode=True)
robot.WaitConnected(timeout=0)
# Check that the Meca500 response was correctly parsed to have 6 joints.
assert robot.GetRobotInfo().num_joints == 6
# Prepare test data
fake_joint = fake_data(seed=1, length=6)
fake_joints_str = fake_string(seed=1, length=6)
fake_pose = fake_data(seed=2, length=6)
fake_pose_str = fake_string(seed=2, length=6)
# Send test messages.
robot._monitor_rx_queue.put(mdr._Message(mx_def.MX_ST_RT_TARGET_JOINT_POS, '1234, ' + fake_joints_str))
robot._monitor_rx_queue.put(mdr._Message(mx_def.MX_ST_RT_TARGET_CART_POS, '2345, ' + fake_pose_str))
# Terminate queue and wait for thread to exit to ensure messages are processed.
robot._monitor_rx_queue.put(mdr._TERMINATE)
robot._monitor_handler_thread.join(timeout=5)
# Check that these gets do not raise an exception.
assert robot.GetRtTargetJointPos() == fake_joint
assert robot.GetRtTargetCartPos() == fake_pose
with pytest.raises(mdr.InvalidStateError):
robot.MoveJoints(*fake_joint)
# Test the sending and receiving of custom commands.
def test_custom_command(robot: mdr.Robot):
connect_robot_helper(robot)
expected_command = 'TestCommand'
robot_response = mdr._Message(mx_def.MX_ST_CMD_SUCCESSFUL, 'TestResponse')
fake_robot = threading.Thread(target=simple_response_handler,
args=(robot._command_tx_queue, robot._command_rx_queue, expected_command,
robot_response))
fake_robot.start()
response_event = robot.SendCustomCommand('TestCommand', expected_responses=[mx_def.MX_ST_CMD_SUCCESSFUL])
response_event.wait(timeout=DEFAULT_TIMEOUT) == robot_response
assert len(robot._custom_response_events) == 0
# Returns a copy of the string with all whitespaces removed
def remove_all_whitespaces(string):
return re.sub(r"\s+", "", string)
# Compare 2 CSV files (ignoring whitespaces)
# Returns true if equal
def robot_trajectory_files_identical(file_path_1, file_path_2):
robot_traj_1 = robot_files.RobotTrajectories.from_file(file_path_1)
robot_traj_2 = robot_files.RobotTrajectories.from_file(file_path_2)
return robot_traj_1 == robot_traj_2
# Test the ability to log robot state for legacy (non rt monitoring message capable) platforms.
def test_file_logger(tmp_path, robot: mdr.Robot):
connect_robot_helper(robot)
# Manually set that the robot is rt-message-capable.
robot._robot_info.rt_message_capable = True
# Send status message to indicate that the robot is activated and homed, and idle.
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_GET_STATUS_ROBOT, '1,1,0,0,0,1,1'))
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_GET_REAL_TIME_MONITORING, ''))
# Start logging with context manager version of logger. record_time is False to for comparison with reference file.
with robot.FileLogger(0.001, file_path=tmp_path, record_time=False):
robot.MoveJoints(0, -60, 60, 0, 0, 0)
robot.MoveJoints(0, 0, 0, 0, 0, 0)
for i in range(1, 4):
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_GET_JOINTS, fake_string(seed=3)))
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_GET_POSE, fake_string(seed=4)))
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_GET_CONF, fake_string(seed=102, length=3)))
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_GET_CONF_TURN, fake_string(seed=103, length=2)))
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_RT_TARGET_JOINT_POS, fake_string(seed=3, length=7)))
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_RT_TARGET_CART_POS, fake_string(seed=4, length=7)))
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_RT_TARGET_JOINT_VEL, fake_string(seed=5, length=7)))
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_RT_TARGET_CART_VEL, fake_string(seed=6, length=7)))
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_RT_TARGET_CONF, fake_string(seed=7, length=4)))
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_RT_TARGET_CONF_TURN, fake_string(seed=8, length=2)))
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_RT_JOINT_POS, fake_string(seed=9, length=7)))
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_RT_CART_POS, fake_string(seed=10, length=7)))
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_RT_JOINT_VEL, fake_string(seed=11, length=7)))
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_RT_JOINT_TORQ, fake_string(seed=12, length=7)))
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_RT_CART_VEL, fake_string(seed=13, length=7)))
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_RT_CONF, fake_string(seed=14, length=4)))
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_RT_CONF_TURN, fake_string(seed=15, length=2)))
robot._command_rx_queue.put(
mdr._Message(mx_def.MX_ST_RT_ACCELEROMETER, '16,5,' + fake_string(seed=16000, length=3)))
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_RT_EXTTOOL_STATUS, fake_string(seed=20, length=5)))
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_RT_GRIPPER_STATE, fake_string(seed=21, length=3)))
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_RT_VALVE_STATE, fake_string(seed=22, length=3)))
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_RT_WRF, fake_string(seed=17, length=7)))
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_RT_TRF, fake_string(seed=18, length=7)))
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_RT_CHECKPOINT, fake_string(seed=19, length=2)))
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_RT_CYCLE_END, str(i * 100)))
# Simulate response to last "SetRealTimeMonitoring" performed automatically at end of logging
robot._command_rx_queue.put(mdr._Message(mx_def.MX_ST_GET_REAL_TIME_MONITORING, ''))
# Terminate queue and wait for thread to exit to ensure messages are processed.
robot._command_rx_queue.put(mdr._TERMINATE)
robot._command_response_handler_thread.join(timeout=5)
# Restart the monitoring connection to ensure the API is in a good state.
robot._initialize_monitoring_connection()
# Ensure one log file is created.
directory = os.listdir(tmp_path)
assert len(directory) == 1
log_file_name = directory[0]
assert log_file_name.startswith('Meca500_R3_v9.147.0')
log_file_path = os.path.join(tmp_path, log_file_name)
reference_file_path = os.path.join(os.path.dirname(__file__), 'log_file_reference.zip')
# Check that the logger output matches the reference file.
assert robot_trajectory_files_identical(log_file_path, reference_file_path)
robot.Disconnect()
# Test ability to log robot state for legacy (non rt monitoring message capable) platforms.
# Logging with legacy platforms use system time. To ensure consistency across tests, mock system time call to always
# return the same time (in nanoseconds).
@mock.patch('time.time_ns', mock.MagicMock(return_value=1621277770487091))
def test_file_logger_legacy(tmp_path, robot: mdr.Robot):
connect_robot_helper(robot, yaml_filename='meca500_r3_v8_3.yml')
# This is explicitly set for readability, and is not necessary.
robot._robot_info.rt_message_capable = False
# Send status message to indicate that the robot is activated and homed, and idle.
robot._monitor_rx_queue.put(mdr._Message(mx_def.MX_ST_GET_STATUS_ROBOT, '1,1,0,0,0,1,1'))
# Start logging with context manager version of logger. Only log the two available fields.
with robot.FileLogger(
0.001,
file_path=tmp_path,
fields=['TargetJointPos', 'TargetCartPos'],
record_time=False,
):
robot.MoveJoints(0, -60, 60, 0, 0, 0)
robot.MoveJoints(0, 0, 0, 0, 0, 0)
for seed in range(1, 4):
robot._monitor_rx_queue.put(mdr._Message(mx_def.MX_ST_GET_JOINTS, fake_string(seed)))
robot._monitor_rx_queue.put(mdr._Message(mx_def.MX_ST_GET_POSE, fake_string(seed)))
# Terminate queue and wait for thread to exit to ensure messages are processed.
robot._monitor_rx_queue.put(mdr._TERMINATE)
robot._monitor_handler_thread.join(timeout=5)
# Restart the monitoring connection to ensure the API is in a good state.
robot._initialize_monitoring_connection()
# Ensure one log file is created.
directory = os.listdir(tmp_path)
assert len(directory) == 1
log_file_name = directory[0]
assert log_file_name.startswith('Meca500_R3_v8.3.0')
log_file_path = os.path.join(tmp_path, log_file_name)
reference_file_path = os.path.join(os.path.dirname(__file__), 'legacy_log_file_reference.zip')
# Check that the logger output matches the reference file.
assert robot_trajectory_files_identical(log_file_path, reference_file_path)
robot.Disconnect()
|
tcp_test.py | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Thomas Jackson <jacksontj.89@gmail.com>`
'''
# Import python libs
from __future__ import absolute_import
import os
import threading
import tornado.gen
import tornado.ioloop
from tornado.testing import AsyncTestCase
import salt.config
import salt.ext.six as six
import salt.utils
import salt.transport.server
import salt.transport.client
import salt.exceptions
# Import Salt Testing libs
from salttesting import TestCase, skipIf
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../')
import integration
# Import Salt libs
from unit.transport.req_test import ReqChannelMixin
from unit.transport.pub_test import PubChannelMixin
# TODO: move to a library?
def get_config_file_path(filename):
return os.path.join(integration.TMP, 'config', filename)
class BaseTCPReqCase(TestCase):
'''
Test the req server/client pair
'''
@classmethod
def setUpClass(cls):
cls.master_opts = salt.config.master_config(get_config_file_path('master'))
cls.master_opts.update({
'transport': 'tcp',
'auto_accept': True,
})
cls.minion_opts = salt.config.minion_config(get_config_file_path('minion'))
cls.minion_opts.update({
'transport': 'tcp',
'master_uri': 'tcp://127.0.0.1:{0}'.format(cls.minion_opts['master_port']),
})
cls.process_manager = salt.utils.process.ProcessManager(name='ReqServer_ProcessManager')
cls.server_channel = salt.transport.server.ReqServerChannel.factory(cls.master_opts)
cls.server_channel.pre_fork(cls.process_manager)
cls.io_loop = tornado.ioloop.IOLoop()
cls.io_loop.make_current()
cls.server_channel.post_fork(cls._handle_payload, io_loop=cls.io_loop)
cls.server_thread = threading.Thread(target=cls.io_loop.start)
cls.server_thread.daemon = True
cls.server_thread.start()
@classmethod
def tearDownClass(cls):
cls.io_loop.stop()
cls.server_thread.join()
cls.process_manager.kill_children()
cls.server_channel.close()
del cls.server_channel
@skipIf(salt.utils.is_darwin(), 'hanging test suite on MacOS')
class ClearReqTestCases(BaseTCPReqCase, ReqChannelMixin):
'''
Test all of the clear msg stuff
'''
def setUp(self):
self.channel = salt.transport.client.ReqChannel.factory(self.minion_opts, crypt='clear')
@classmethod
@tornado.gen.coroutine
def _handle_payload(cls, payload):
'''
TODO: something besides echo
'''
raise tornado.gen.Return((payload, {'fun': 'send_clear'}))
@skipIf(salt.utils.is_darwin(), 'hanging test suite on MacOS')
class AESReqTestCases(BaseTCPReqCase, ReqChannelMixin):
def setUp(self):
self.channel = salt.transport.client.ReqChannel.factory(self.minion_opts)
@classmethod
@tornado.gen.coroutine
def _handle_payload(cls, payload):
'''
TODO: something besides echo
'''
raise tornado.gen.Return((payload, {'fun': 'send'}))
# TODO: make failed returns have a specific framing so we can raise the same exception
# on encrypted channels
def test_badload(self):
'''
Test a variety of bad requests, make sure that we get some sort of error
'''
msgs = ['', [], tuple()]
for msg in msgs:
with self.assertRaises(salt.exceptions.AuthenticationError):
ret = self.channel.send(msg)
class BaseTCPPubCase(AsyncTestCase):
'''
Test the req server/client pair
'''
@classmethod
def setUpClass(cls):
cls.master_opts = salt.config.master_config(get_config_file_path('master'))
cls.master_opts.update({
'transport': 'tcp',
'auto_accept': True,
})
cls.minion_opts = salt.config.minion_config(get_config_file_path('minion'))
cls.minion_opts.update({
'transport': 'tcp',
'master_ip': '127.0.0.1',
'auth_timeout': 1,
'master_uri': 'tcp://127.0.0.1:{0}'.format(cls.minion_opts['master_port']),
})
cls.process_manager = salt.utils.process.ProcessManager(name='ReqServer_ProcessManager')
cls.server_channel = salt.transport.server.PubServerChannel.factory(cls.master_opts)
cls.server_channel.pre_fork(cls.process_manager)
# we also require req server for auth
cls.req_server_channel = salt.transport.server.ReqServerChannel.factory(cls.master_opts)
cls.req_server_channel.pre_fork(cls.process_manager)
cls._server_io_loop = tornado.ioloop.IOLoop()
cls.req_server_channel.post_fork(cls._handle_payload, io_loop=cls._server_io_loop)
cls.server_thread = threading.Thread(target=cls._server_io_loop.start)
cls.server_thread.start()
@classmethod
def _handle_payload(cls, payload):
'''
TODO: something besides echo
'''
return payload, {'fun': 'send_clear'}
@classmethod
def tearDownClass(cls):
cls._server_io_loop.stop()
cls.server_thread.join()
cls.process_manager.kill_children()
cls.req_server_channel.close()
del cls.req_server_channel
def setUp(self):
super(BaseTCPPubCase, self).setUp()
self._start_handlers = dict(self.io_loop._handlers)
def tearDown(self):
super(BaseTCPPubCase, self).tearDown()
failures = []
for k, v in six.iteritems(self.io_loop._handlers):
if self._start_handlers.get(k) != v:
failures.append((k, v))
if len(failures) > 0:
raise Exception('FDs still attached to the IOLoop: {0}'.format(failures))
@skipIf(True, 'Skip until we can devote time to fix this test')
class AsyncPubChannelTest(BaseTCPPubCase, PubChannelMixin):
'''
Tests around the publish system
'''
if __name__ == '__main__':
from integration import run_tests
run_tests(ClearReqTestCases, needs_daemon=False)
run_tests(AESReqTestCases, needs_daemon=False)
|
simple_queue.py | from cloudbutton.multiprocessing import Process, SimpleQueue
def f(q):
q.put([42, None, 'hello World'])
if __name__ == '__main__':
q = SimpleQueue()
p = Process(target=f, args=(q,))
p.start()
print(q.get()) # prints "[42, None, 'hello']"
p.join()
|
session.py | #
# Copyright (c) 2016-2022 Deephaven Data Labs and Patent Pending
#
import os
import threading
from typing import List
import pyarrow
from bitstring import BitArray
import grpc
from pydeephaven._arrow_flight_service import ArrowFlightService
from pydeephaven._console_service import ConsoleService
from pydeephaven._app_service import AppService
from pydeephaven._session_service import SessionService
from pydeephaven._table_ops import TimeTableOp, EmptyTableOp, MergeTablesOp, FetchTableOp
from pydeephaven._table_service import TableService
from pydeephaven.dherror import DHError
from pydeephaven.proto import ticket_pb2
from pydeephaven.query import Query
from pydeephaven.table import Table
NO_SYNC = 0
SYNC_ONCE = 1
SYNC_REPEATED = 2
class Session:
""" A Session object represents a connection to the Deephaven data server. It contains a number of convenience
methods for asking the server to create tables, import Arrow data into tables, merge tables, run Python scripts, and
execute queries.
Session objects can be used in Python with statement so that whatever happens in the with statement block, they
are guaranteed to be closed upon exit.
Attributes:
tables (list[str]): names of the global tables available in the server after running scripts
is_alive (bool): check if the session is still alive (may refresh the session)
"""
def __init__(self, host: str = None, port: int = None, never_timeout: bool = True, session_type: str = 'python', sync_fields: int = NO_SYNC):
""" Initialize a Session object that connects to the Deephaven server
Args:
host (str): the host name or IP address of the remote machine, default is 'localhost'
port (int): the port number that Deephaven server is listening on, default is 10000
never_timeout (bool, optional): never allow the session to timeout, default is True
session_type (str, optional): the Deephaven session type. Defaults to 'python'
sync_fields (int, optional): equivalent to calling `Session.sync_fields()` (see below), default is NO_SYNC
Sync Options:
session.NO_SYNC: does not check for existing tables on the server
session.SYNC_ONCE: equivalent to `Session.sync_fields(repeating=False)`
session.SYNC_REPEATED: equivalent to `Session.sync_fields(repeating=True)`
Raises:
DHError
"""
self._r_lock = threading.RLock()
self._last_ticket = 0
self._ticket_bitarray = BitArray(1024)
self.host = host
if not host:
self.host = os.environ.get("DH_HOST", "localhost")
self.port = port
if not port:
self.port = int(os.environ.get("DH_PORT", 10000))
if sync_fields not in (NO_SYNC, SYNC_ONCE, SYNC_REPEATED):
raise DHError("invalid sync_fields setting")
self.is_connected = False
self.session_token = None
self.grpc_channel = None
self._session_service = None
self._table_service = None
self._grpc_barrage_stub = None
self._console_service = None
self._flight_service = None
self._app_service = None
self._never_timeout = never_timeout
self._keep_alive_timer = None
self._session_type = session_type
self._sync_fields = sync_fields
self._list_fields = None
self._field_update_thread = None
self._fields = {}
self._connect()
def __enter__(self):
if not self.is_connected:
self._connect()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
@property
def tables(self):
with self._r_lock:
return [nm for sc, nm in self._fields if sc == 'scope' and self._fields[(sc, nm)][0] == 'Table']
@property
def grpc_metadata(self):
return [(b'deephaven_session_id', self.session_token)]
@property
def table_service(self):
if not self._table_service:
self._table_service = TableService(self)
return self._table_service
@property
def session_service(self):
if not self._session_service:
self._session_service = SessionService(self)
return self._session_service
@property
def console_service(self):
if not self._console_service:
self._console_service = ConsoleService(self)
return self._console_service
@property
def flight_service(self):
if not self._flight_service:
self._flight_service = ArrowFlightService(self)
return self._flight_service
@property
def app_service(self):
if not self._app_service:
self._app_service = AppService(self)
return self._app_service
def make_ticket(self, ticket_no=None):
if not ticket_no:
ticket_no = self.get_ticket()
ticket_bytes = ticket_no.to_bytes(4, byteorder='little', signed=True)
return ticket_pb2.Ticket(ticket=b'e' + ticket_bytes)
def get_ticket(self):
with self._r_lock:
self._last_ticket += 1
if self._last_ticket == 2 ** 31 - 1:
raise DHError("fatal error: out of free internal ticket")
return self._last_ticket
def sync_fields(self, repeating: bool):
""" Check for fields that have been added/deleted by other sessions and add them to the local list
This will start a new background thread when `repeating=True`.
Args:
repeating (bool): Continue to check in the background for new/updated tables
Raises:
DHError
"""
with self._r_lock:
if self._list_fields is not None:
return
self._list_fields = self.app_service.list_fields()
self._parse_fields_change(next(self._list_fields))
if repeating:
self._field_update_thread = threading.Thread(target=self._update_fields)
self._field_update_thread.daemon = True
self._field_update_thread.start()
else:
if not self._list_fields.cancel():
raise DHError("could not cancel ListFields subscription")
self._list_fields = None
def _update_fields(self):
""" Constant loop that checks for any server-side field changes and adds them to the local list """
try:
while True:
fields_change = next(self._list_fields)
with self._r_lock:
self._parse_fields_change(fields_change)
except Exception as e:
if isinstance(e, grpc.Future):
pass
else:
raise e
def _cancel_update_fields(self):
with self._r_lock:
if self._field_update_thread is not None:
self._list_fields.cancel()
self._field_update_thread.join()
self._list_fields = None
self._field_update_thread = None
def _connect(self):
with self._r_lock:
self.grpc_channel, self.session_token, self._timeout = self.session_service.connect()
self.is_connected = True
if self._never_timeout:
self._keep_alive()
if self._sync_fields == SYNC_ONCE:
self.sync_fields(repeating=False)
elif self._sync_fields == SYNC_REPEATED:
self.sync_fields(repeating=True)
def _keep_alive(self):
if self._keep_alive_timer:
self._refresh_token()
self._keep_alive_timer = threading.Timer(self._timeout / 2 / 1000, self._keep_alive)
self._keep_alive_timer.daemon = True
self._keep_alive_timer.start()
def _refresh_token(self):
with self._r_lock:
try:
self.session_token, self._timeout = self.session_service.refresh_token()
except DHError:
self.is_connected = False
@property
def is_alive(self):
with self._r_lock:
if not self.is_connected:
return False
if self._never_timeout:
return True
try:
self.session_token = self.session_service.refresh_token()
return True
except DHError as e:
self.is_connected = False
return False
def close(self) -> None:
""" Close the Session object if it hasn't timed out already.
Raises:
DHError
"""
with self._r_lock:
if self.is_connected:
self._cancel_update_fields()
self.session_service.close()
self.grpc_channel.close()
self.is_connected = False
self._last_ticket = 0
# self._executor.shutdown()
def release(self, ticket):
self.session_service.release(ticket)
def _parse_fields_change(self, fields_change):
if fields_change.created:
for t in fields_change.created:
t_type = None if t.typed_ticket.type == '' else t.typed_ticket.type
self._fields[(t.application_id, t.field_name)] = (t_type, Table(session=self, ticket=t.typed_ticket.ticket))
if fields_change.updated:
for t in fields_change.updated:
t_type = None if t.typed_ticket.type == '' else t.typed_ticket.type
self._fields[(t.application_id, t.field_name)] = (t_type, Table(session=self, ticket=t.typed_ticket.ticket))
if fields_change.removed:
for t in fields_change.removed:
self._fields.pop((t.application_id, t.field_name), None)
def _parse_script_response(self, response):
self._parse_fields_change(response.changes)
# convenience/factory methods
def run_script(self, script: str) -> None:
""" Run the supplied Python script on the server.
Args:
script (str): the Python script code
Raises:
DHError
"""
with self._r_lock:
if self._sync_fields == SYNC_REPEATED:
self._cancel_update_fields()
response = self.console_service.run_script(script)
if self._sync_fields == SYNC_REPEATED:
self._fields = {}
self._parse_script_response(response)
self.sync_fields(repeating=True)
else:
self._parse_script_response(response)
def open_table(self, name: str) -> Table:
""" Open a table in the global scope with the given name on the server.
Args:
name (str): the name of the table
Returns:
a Table object
Raises:
DHError
"""
with self._r_lock:
if name not in self.tables:
raise DHError(f"no table by the name {name}")
table_op = FetchTableOp()
return self.table_service.grpc_table_op(self._fields[('scope', name)][1], table_op)
def bind_table(self, name: str, table: Table) -> None:
""" Bind a table to the given name on the server so that it can be referenced by that name.
Args:
name (str): name for the table
table (Table): a Table object
Raises:
DHError
"""
with self._r_lock:
self.console_service.bind_table(table=table, variable_name=name)
def time_table(self, period: int, start_time: int = None) -> Table:
""" Create a time table on the server.
Args:
period (int): the interval (in nano seconds) at which the time table ticks (adds a row)
start_time (int, optional): the start time for the time table in nano seconds, default is None (meaning now)
Returns:
a Table object
Raises:
DHError
"""
table_op = TimeTableOp(start_time=start_time, period=period)
return self.table_service.grpc_table_op(None, table_op)
def empty_table(self, size: int) -> Table:
""" create an empty table on the server.
Args:
size (int): the size of the empty table in number of rows
Returns:
a Table object
Raises:
DHError
"""
table_op = EmptyTableOp(size=size)
return self.table_service.grpc_table_op(None, table_op)
def import_table(self, data: pyarrow.Table) -> Table:
""" Import the pyarrow table as a new Deephaven table on the server.
Deephaven supports most of the Arrow data types. However, if the pyarrow table contains any field with a data
type not supported by Deephaven, the import operation will fail.
Args:
data (pyarrow.Table): a pyarrow Table object
Returns:
a Table object
Raises:
DHError
"""
return self.flight_service.import_table(data=data)
def merge_tables(self, tables: List[Table], order_by: str = None) -> Table:
""" Merge several tables into one table on the server.
Args:
tables (list[Table]): the list of Table objects to merge
order_by (str, optional): if specified the resultant table will be sorted on this column
Returns:
a Table object
Raises:
DHError
"""
table_op = MergeTablesOp(tables=tables, key_column=order_by)
return self.table_service.grpc_table_op(None, table_op)
def query(self, table: Table) -> Query:
""" Create a Query object to define a sequence of operations on a Deephaven table.
Args:
table (Table): a Table object
Returns:
a Query object
Raises:
DHError
"""
return Query(self, table)
|
report.py | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import threading
from twitter.common.threading import PeriodicThread
class ReportingError(Exception):
pass
class Report(object):
"""A report of a pants run."""
# Log levels.
FATAL = 0
ERROR = 1
WARN = 2
INFO = 3
DEBUG = 4
_log_level_name_map = {
'FATAL': FATAL, 'ERROR': ERROR, 'WARN': WARN, 'WARNING': WARN, 'INFO': INFO, 'DEBUG': DEBUG
}
@staticmethod
def log_level_from_string(s):
s = s.upper()
return Report._log_level_name_map.get(s, Report.INFO)
def __init__(self):
# We periodically emit newly gathered output from tool invocations.
self._emitter_thread = \
PeriodicThread(target=self.flush, name='output-emitter', period_secs=0.5)
self._emitter_thread.daemon = True
# Map from workunit id to workunit.
self._workunits = {}
# We report to these reporters.
self._reporters = {} # name -> Reporter instance.
# We synchronize on this, to support parallel execution.
self._lock = threading.Lock()
def open(self):
with self._lock:
for reporter in self._reporters.values():
reporter.open()
self._emitter_thread.start()
# Note that if you addr/remove reporters after open() has been called you have
# to ensure that their state is set up correctly. Best only to do this with
# stateless reporters, such as ConsoleReporter.
def add_reporter(self, name, reporter):
with self._lock:
self._reporters[name] = reporter
def remove_reporter(self, name):
with self._lock:
ret = self._reporters[name]
del self._reporters[name]
return ret
def start_workunit(self, workunit):
with self._lock:
self._workunits[workunit.id] = workunit
for reporter in self._reporters.values():
reporter.start_workunit(workunit)
def log(self, workunit, level, *msg_elements):
"""Log a message.
Each element of msg_elements is either a message string or a (message, detail) pair.
"""
with self._lock:
for reporter in self._reporters.values():
reporter.handle_log(workunit, level, *msg_elements)
def end_workunit(self, workunit):
with self._lock:
self._notify() # Make sure we flush everything reported until now.
for reporter in self._reporters.values():
reporter.end_workunit(workunit)
if workunit.id in self._workunits:
del self._workunits[workunit.id]
def flush(self):
with self._lock:
self._notify()
def close(self):
self._emitter_thread.stop()
with self._lock:
self._notify() # One final time.
for reporter in self._reporters.values():
reporter.close()
def _notify(self):
# Notify for output in all workunits. Note that output may be coming in from workunits other
# than the current one, if work is happening in parallel.
# Assumes self._lock is held by the caller.
for workunit in self._workunits.values():
for label, output in workunit.outputs().items():
s = output.read()
if len(s) > 0:
for reporter in self._reporters.values():
reporter.handle_output(workunit, label, s)
|
test.py | # -*- coding: utf8 -*-
from contextlib import contextmanager
from functools import wraps
from os.path import exists, join, realpath, dirname, split
import errno
import fcntl
import inspect
import logging
import os
import platform
import pty
import resource
import sh
import signal
import stat
import sys
import tempfile
import time
import unittest
import warnings
IS_PY3 = sys.version_info[0] == 3
IS_PY2 = not IS_PY3
MINOR_VER = sys.version_info[1]
try:
import unittest.mock
except ImportError:
HAS_MOCK = False
else:
HAS_MOCK = True
# we have to use the real path because on osx, /tmp is a symlink to
# /private/tmp, and so assertions that gettempdir() == sh.pwd() will fail
tempdir = realpath(tempfile.gettempdir())
IS_MACOS = platform.system() in ("AIX", "Darwin")
# these 3 functions are helpers for modifying PYTHONPATH with a module's main
# directory
def append_pythonpath(env, path):
key = "PYTHONPATH"
pypath = [p for p in env.get(key, "").split(":") if p]
pypath.insert(0, path)
pypath = ":".join(pypath)
env[key] = pypath
def get_module_import_dir(m):
mod_file = inspect.getsourcefile(m)
is_package = mod_file.endswith("__init__.py")
mod_dir = dirname(mod_file)
if is_package:
mod_dir, _ = split(mod_dir)
return mod_dir
def append_module_path(env, m):
append_pythonpath(env, get_module_import_dir(m))
if IS_PY3:
xrange = range
unicode = str
long = int
from io import StringIO
ioStringIO = StringIO
from io import BytesIO as cStringIO
iocStringIO = cStringIO
else:
from StringIO import StringIO
from cStringIO import StringIO as cStringIO
from io import StringIO as ioStringIO
from io import BytesIO as iocStringIO
THIS_DIR = dirname(os.path.abspath(__file__))
system_python = sh.Command(sys.executable)
# this is to ensure that our `python` helper here is able to import our local sh
# module, and not the system one
baked_env = os.environ.copy()
append_module_path(baked_env, sh)
python = system_python.bake(_env=baked_env)
if hasattr(logging, 'NullHandler'):
NullHandler = logging.NullHandler
else:
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
skipUnless = getattr(unittest, "skipUnless", None)
if not skipUnless:
# our stupid skipUnless wrapper for python2.6
def skipUnless(condition, reason):
def wrapper(test):
if condition:
return test
else:
@wraps(test)
def skip(*args, **kwargs):
return
return skip
return wrapper
skip_unless = skipUnless
def requires_progs(*progs):
missing = []
for prog in progs:
try:
sh.Command(prog)
except sh.CommandNotFound:
missing.append(prog)
friendly_missing = ", ".join(missing)
return skipUnless(len(missing) == 0, "Missing required system programs: %s"
% friendly_missing)
requires_posix = skipUnless(os.name == "posix", "Requires POSIX")
requires_utf8 = skipUnless(sh.DEFAULT_ENCODING == "UTF-8", "System encoding must be UTF-8")
not_macos = skipUnless(not IS_MACOS, "Doesn't work on MacOS")
requires_py3 = skipUnless(IS_PY3, "Test only works on Python 3")
requires_py35 = skipUnless(IS_PY3 and MINOR_VER >= 5, "Test only works on Python 3.5 or higher")
def requires_poller(poller):
use_select = bool(int(os.environ.get("SH_TESTS_USE_SELECT", "0")))
cur_poller = "select" if use_select else "poll"
return skipUnless(cur_poller == poller, "Only enabled for select.%s" % cur_poller)
@contextmanager
def ulimit(key, new_soft):
soft, hard = resource.getrlimit(key)
resource.setrlimit(key, (new_soft, hard))
try:
yield
finally:
resource.setrlimit(key, (soft, hard))
def create_tmp_test(code, prefix="tmp", delete=True, **kwargs):
""" creates a temporary test file that lives on disk, on which we can run
python with sh """
py = tempfile.NamedTemporaryFile(prefix=prefix, delete=delete)
code = code.format(**kwargs)
if IS_PY3:
code = code.encode("UTF-8")
py.write(code)
py.flush()
# make the file executable
st = os.stat(py.name)
os.chmod(py.name, st.st_mode | stat.S_IEXEC)
# we don't explicitly close, because close will remove the file, and we
# don't want that until the test case is done. so we let the gc close it
# when it goes out of scope
return py
class BaseTests(unittest.TestCase):
def assert_oserror(self, num, fn, *args, **kwargs):
try:
fn(*args, **kwargs)
except OSError as e:
self.assertEqual(e.errno, num)
def assert_deprecated(self, fn, *args, **kwargs):
with warnings.catch_warnings(record=True) as w:
fn(*args, **kwargs)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, DeprecationWarning))
# python2.6 lacks this
def assertIn(self, needle, haystack):
s = super(BaseTests, self)
if hasattr(s, "assertIn"):
s.assertIn(needle, haystack)
else:
self.assertTrue(needle in haystack)
# python2.6 lacks this
def assertNotIn(self, needle, haystack):
s = super(BaseTests, self)
if hasattr(s, "assertNotIn"):
s.assertNotIn(needle, haystack)
else:
self.assertTrue(needle not in haystack)
# python2.6 lacks this
def assertLess(self, a, b):
s = super(BaseTests, self)
if hasattr(s, "assertLess"):
s.assertLess(a, b)
else:
self.assertTrue(a < b)
# python2.6 lacks this
def assertGreater(self, a, b):
s = super(BaseTests, self)
if hasattr(s, "assertGreater"):
s.assertGreater(a, b)
else:
self.assertTrue(a > b)
# python2.6 lacks this
def skipTest(self, msg):
s = super(BaseTests, self)
if hasattr(s, "skipTest"):
s.skipTest(msg)
else:
return
@requires_posix
class FunctionalTests(BaseTests):
def setUp(self):
self._environ = os.environ.copy()
def tearDown(self):
os.environ = self._environ
def test_print_command(self):
from sh import ls, which
actual_location = which("ls")
out = str(ls)
self.assertEqual(out, actual_location)
def test_unicode_arg(self):
from sh import echo
test = "漢字"
if not IS_PY3:
test = test.decode("utf8")
p = echo(test, _encoding="utf8")
output = p.strip()
self.assertEqual(test, output)
def test_unicode_exception(self):
from sh import ErrorReturnCode
py = create_tmp_test("exit(1)")
arg = "漢字"
native_arg = arg
if not IS_PY3:
arg = arg.decode("utf8")
try:
python(py.name, arg, _encoding="utf8")
except ErrorReturnCode as e:
self.assertIn(native_arg, str(e))
else:
self.fail("exception wasn't raised")
def test_pipe_fd(self):
py = create_tmp_test("""print("hi world")""")
read_fd, write_fd = os.pipe()
python(py.name, _out=write_fd)
out = os.read(read_fd, 10)
self.assertEqual(out, b"hi world\n")
def test_trunc_exc(self):
py = create_tmp_test("""
import sys
sys.stdout.write("a" * 1000)
sys.stderr.write("b" * 1000)
exit(1)
""")
self.assertRaises(sh.ErrorReturnCode_1, python, py.name)
def test_number_arg(self):
py = create_tmp_test("""
from optparse import OptionParser
parser = OptionParser()
options, args = parser.parse_args()
print(args[0])
""")
out = python(py.name, 3).strip()
self.assertEqual(out, "3")
def test_empty_stdin_no_hang(self):
py = create_tmp_test("""
import sys
data = sys.stdin.read()
sys.stdout.write("no hang")
""")
out = python(py.name, _in="", _timeout=2)
self.assertEqual(out, "no hang")
out = python(py.name, _in=None, _timeout=2)
self.assertEqual(out, "no hang")
def test_exit_code(self):
from sh import ErrorReturnCode_3
py = create_tmp_test("""
exit(3)
""")
self.assertRaises(ErrorReturnCode_3, python, py.name)
def test_patched_glob(self):
from glob import glob
py = create_tmp_test("""
import sys
print(sys.argv[1:])
""")
files = glob("*.faowjefoajweofj")
out = python(py.name, files).strip()
self.assertEqual(out, "['*.faowjefoajweofj']")
@requires_py35
def test_patched_glob_with_recursive_argument(self):
from glob import glob
py = create_tmp_test("""
import sys
print(sys.argv[1:])
""")
files = glob("*.faowjefoajweofj", recursive=True)
out = python(py.name, files).strip()
self.assertEqual(out, "['*.faowjefoajweofj']")
def test_exit_code_with_hasattr(self):
from sh import ErrorReturnCode_3
py = create_tmp_test("""
exit(3)
""")
try:
out = python(py.name, _iter=True)
# hasattr can swallow exceptions
hasattr(out, 'something_not_there')
list(out)
self.assertEqual(out.exit_code, 3)
self.fail("Command exited with error, but no exception thrown")
except ErrorReturnCode_3:
pass
def test_exit_code_from_exception(self):
from sh import ErrorReturnCode_3
py = create_tmp_test("""
exit(3)
""")
self.assertRaises(ErrorReturnCode_3, python, py.name)
try:
python(py.name)
except Exception as e:
self.assertEqual(e.exit_code, 3)
def test_stdin_from_string(self):
from sh import sed
self.assertEqual(sed(_in="one test three", e="s/test/two/").strip(),
"one two three")
def test_ok_code(self):
from sh import ls, ErrorReturnCode_1, ErrorReturnCode_2
exc_to_test = ErrorReturnCode_2
code_to_pass = 2
if IS_MACOS:
exc_to_test = ErrorReturnCode_1
code_to_pass = 1
self.assertRaises(exc_to_test, ls, "/aofwje/garogjao4a/eoan3on")
ls("/aofwje/garogjao4a/eoan3on", _ok_code=code_to_pass)
ls("/aofwje/garogjao4a/eoan3on", _ok_code=[code_to_pass])
ls("/aofwje/garogjao4a/eoan3on", _ok_code=range(code_to_pass + 1))
def test_ok_code_none(self):
py = create_tmp_test("exit(0)")
python(py.name, _ok_code=None)
def test_ok_code_exception(self):
from sh import ErrorReturnCode_0
py = create_tmp_test("exit(0)")
self.assertRaises(ErrorReturnCode_0, python, py.name, _ok_code=2)
def test_none_arg(self):
py = create_tmp_test("""
import sys
print(sys.argv[1:])
""")
maybe_arg = "some"
out = python(py.name, maybe_arg).strip()
self.assertEqual(out, "['some']")
maybe_arg = None
out = python(py.name, maybe_arg).strip()
self.assertEqual(out, "[]")
def test_quote_escaping(self):
py = create_tmp_test("""
from optparse import OptionParser
parser = OptionParser()
options, args = parser.parse_args()
print(args)
""")
out = python(py.name, "one two three").strip()
self.assertEqual(out, "['one two three']")
out = python(py.name, "one \"two three").strip()
self.assertEqual(out, "['one \"two three']")
out = python(py.name, "one", "two three").strip()
self.assertEqual(out, "['one', 'two three']")
out = python(py.name, "one", "two \"haha\" three").strip()
self.assertEqual(out, "['one', 'two \"haha\" three']")
out = python(py.name, "one two's three").strip()
self.assertEqual(out, "[\"one two's three\"]")
out = python(py.name, 'one two\'s three').strip()
self.assertEqual(out, "[\"one two's three\"]")
def test_multiple_pipes(self):
import time
py = create_tmp_test("""
import sys
import os
import time
for l in "andrew":
sys.stdout.write(l)
time.sleep(.2)
""")
inc_py = create_tmp_test("""
import sys
while True:
letter = sys.stdin.read(1)
if not letter:
break
sys.stdout.write(chr(ord(letter)+1))
""")
def inc(proc, *args, **kwargs):
return python(proc, "-u", inc_py.name, *args, **kwargs)
class Derp(object):
def __init__(self):
self.times = []
self.stdout = []
self.last_received = None
def agg(self, line):
self.stdout.append(line.strip())
now = time.time()
if self.last_received:
self.times.append(now - self.last_received)
self.last_received = now
derp = Derp()
p = inc(
inc(
inc(
python("-u", py.name, _piped=True),
_piped=True),
_piped=True),
_out=derp.agg)
p.wait()
self.assertEqual("".join(derp.stdout), "dqguhz")
self.assertTrue(all([t > .15 for t in derp.times]))
def test_manual_stdin_string(self):
from sh import tr
out = tr("[:lower:]", "[:upper:]", _in="andrew").strip()
self.assertEqual(out, "ANDREW")
def test_manual_stdin_iterable(self):
from sh import tr
test = ["testing\n", "herp\n", "derp\n"]
out = tr("[:lower:]", "[:upper:]", _in=test)
match = "".join([t.upper() for t in test])
self.assertEqual(out, match)
def test_manual_stdin_file(self):
from sh import tr
import tempfile
test_string = "testing\nherp\nderp\n"
stdin = tempfile.NamedTemporaryFile()
stdin.write(test_string.encode())
stdin.flush()
stdin.seek(0)
out = tr("[:lower:]", "[:upper:]", _in=stdin)
self.assertEqual(out, test_string.upper())
def test_manual_stdin_queue(self):
from sh import tr
try:
from Queue import Queue
except ImportError:
from queue import Queue
test = ["testing\n", "herp\n", "derp\n"]
q = Queue()
for t in test:
q.put(t)
q.put(None) # EOF
out = tr("[:lower:]", "[:upper:]", _in=q)
match = "".join([t.upper() for t in test])
self.assertEqual(out, match)
def test_environment(self):
""" tests that environments variables that we pass into sh commands
exist in the environment, and on the sh module """
import os
# this is the environment we'll pass into our commands
env = {"HERP": "DERP"}
# first we test that the environment exists in our child process as
# we've set it
py = create_tmp_test("""
import os
for key in list(os.environ.keys()):
if key != "HERP":
del os.environ[key]
print(dict(os.environ))
""")
out = python(py.name, _env=env).strip()
self.assertEqual(out, "{'HERP': 'DERP'}")
py = create_tmp_test("""
import os, sys
sys.path.insert(0, os.getcwd())
import sh
for key in list(os.environ.keys()):
if key != "HERP":
del os.environ[key]
print(dict(HERP=sh.HERP))
""")
out = python(py.name, _env=env, _cwd=THIS_DIR).strip()
self.assertEqual(out, "{'HERP': 'DERP'}")
# Test that _env also accepts os.environ which is a mpping but not a dict.
os.environ["HERP"] = "DERP"
out = python(py.name, _env=os.environ, _cwd=THIS_DIR).strip()
self.assertEqual(out, "{'HERP': 'DERP'}")
def test_which(self):
from sh import which, ls
self.assertEqual(which("fjoawjefojawe"), None)
self.assertEqual(which("ls"), str(ls))
def test_which_paths(self):
from sh import which
py = create_tmp_test("""
print("hi")
""")
test_path = dirname(py.name)
_, test_name = os.path.split(py.name)
found_path = which(test_name)
self.assertEqual(found_path, None)
found_path = which(test_name, [test_path])
self.assertEqual(found_path, py.name)
def test_no_close_fds(self):
# guarantee some extra fds in our parent process that don't close on exec. we have to explicitly do this
# because at some point (I believe python 3.4), python started being more stringent with closing fds to prevent
# security vulnerabilities. python 2.7, for example, doesn't set CLOEXEC on tempfile.TemporaryFile()s
#
# https://www.python.org/dev/peps/pep-0446/
tmp = [tempfile.TemporaryFile() for i in range(10)]
for t in tmp:
flags = fcntl.fcntl(t.fileno(), fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(t.fileno(), fcntl.F_SETFD, flags)
py = create_tmp_test("""
import os
print(len(os.listdir("/dev/fd")))
""")
out = python(py.name, _close_fds=False).strip()
# pick some number greater than 4, since it's hard to know exactly how many fds will be open/inherted in the
# child
self.assertGreater(int(out), 7)
for t in tmp:
t.close()
def test_close_fds(self):
# guarantee some extra fds in our parent process that don't close on exec. we have to explicitly do this
# because at some point (I believe python 3.4), python started being more stringent with closing fds to prevent
# security vulnerabilities. python 2.7, for example, doesn't set CLOEXEC on tempfile.TemporaryFile()s
#
# https://www.python.org/dev/peps/pep-0446/
tmp = [tempfile.TemporaryFile() for i in range(10)]
for t in tmp:
flags = fcntl.fcntl(t.fileno(), fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(t.fileno(), fcntl.F_SETFD, flags)
py = create_tmp_test("""
import os
print(os.listdir("/dev/fd"))
""")
out = python(py.name).strip()
self.assertEqual(out, "['0', '1', '2', '3']")
for t in tmp:
t.close()
def test_pass_fds(self):
# guarantee some extra fds in our parent process that don't close on exec. we have to explicitly do this
# because at some point (I believe python 3.4), python started being more stringent with closing fds to prevent
# security vulnerabilities. python 2.7, for example, doesn't set CLOEXEC on tempfile.TemporaryFile()s
#
# https://www.python.org/dev/peps/pep-0446/
tmp = [tempfile.TemporaryFile() for i in range(10)]
for t in tmp:
flags = fcntl.fcntl(t.fileno(), fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(t.fileno(), fcntl.F_SETFD, flags)
last_fd = tmp[-1].fileno()
py = create_tmp_test("""
import os
print(os.listdir("/dev/fd"))
""")
out = python(py.name, _pass_fds=[last_fd]).strip()
inherited = [0, 1, 2, 3, last_fd]
inherited_str = [str(i) for i in inherited]
self.assertEqual(out, str(inherited_str))
for t in tmp:
t.close()
def test_no_arg(self):
import pwd
from sh import whoami
u1 = whoami().strip()
u2 = pwd.getpwuid(os.geteuid())[0]
self.assertEqual(u1, u2)
def test_incompatible_special_args(self):
from sh import ls
self.assertRaises(TypeError, ls, _iter=True, _piped=True)
def test_invalid_env(self):
from sh import ls
exc = TypeError
if IS_PY2 and MINOR_VER == 6:
exc = ValueError
self.assertRaises(exc, ls, _env="XXX")
self.assertRaises(exc, ls, _env={"foo": 123})
self.assertRaises(exc, ls, _env={123: "bar"})
def test_exception(self):
from sh import ErrorReturnCode_2
py = create_tmp_test("""
exit(2)
""")
self.assertRaises(ErrorReturnCode_2, python, py.name)
def test_piped_exception1(self):
from sh import ErrorReturnCode_2
py = create_tmp_test("""
import sys
sys.stdout.write("line1\\n")
sys.stdout.write("line2\\n")
exit(2)
""")
py2 = create_tmp_test("")
def fn():
list(python(python(py.name, _piped=True), "-u", py2.name, _iter=True))
self.assertRaises(ErrorReturnCode_2, fn)
def test_piped_exception2(self):
from sh import ErrorReturnCode_2
py = create_tmp_test("""
import sys
sys.stdout.write("line1\\n")
sys.stdout.write("line2\\n")
exit(2)
""")
py2 = create_tmp_test("")
def fn():
python(python(py.name, _piped=True), "-u", py2.name)
self.assertRaises(ErrorReturnCode_2, fn)
def test_command_not_found(self):
from sh import CommandNotFound
def do_import():
from sh import aowjgoawjoeijaowjellll # noqa: F401
self.assertRaises(ImportError, do_import)
def do_import():
import sh
sh.awoefaowejfw
self.assertRaises(CommandNotFound, do_import)
def do_import():
import sh
sh.Command("ofajweofjawoe")
self.assertRaises(CommandNotFound, do_import)
def test_command_wrapper_equivalence(self):
from sh import Command, ls, which
self.assertEqual(Command(which("ls")), ls)
def test_doesnt_execute_directories(self):
save_path = os.environ['PATH']
bin_dir1 = tempfile.mkdtemp()
bin_dir2 = tempfile.mkdtemp()
gcc_dir1 = os.path.join(bin_dir1, 'gcc')
gcc_file2 = os.path.join(bin_dir2, 'gcc')
try:
os.environ['PATH'] = os.pathsep.join((bin_dir1, bin_dir2))
# a folder named 'gcc', its executable, but should not be
# discovered by internal which(1)-clone
os.makedirs(gcc_dir1)
# an executable named gcc -- only this should be executed
bunk_header = '#!/bin/sh\necho $*'
with open(gcc_file2, "w") as h:
h.write(bunk_header)
os.chmod(gcc_file2, int(0o755))
import sh
from sh import gcc
if IS_PY3:
self.assertEqual(gcc._path,
gcc_file2.encode(sh.DEFAULT_ENCODING))
else:
self.assertEqual(gcc._path, gcc_file2)
self.assertEqual(gcc('no-error').stdout.strip(),
'no-error'.encode("ascii"))
finally:
os.environ['PATH'] = save_path
if exists(gcc_file2):
os.unlink(gcc_file2)
if exists(gcc_dir1):
os.rmdir(gcc_dir1)
if exists(bin_dir1):
os.rmdir(bin_dir1)
if exists(bin_dir1):
os.rmdir(bin_dir2)
def test_multiple_args_short_option(self):
py = create_tmp_test("""
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-l", dest="long_option")
options, args = parser.parse_args()
print(len(options.long_option.split()))
""")
num_args = int(python(py.name, l="one two three")) # noqa: E741
self.assertEqual(num_args, 3)
num_args = int(python(py.name, "-l", "one's two's three's"))
self.assertEqual(num_args, 3)
def test_multiple_args_long_option(self):
py = create_tmp_test("""
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-l", "--long-option", dest="long_option")
options, args = parser.parse_args()
print(len(options.long_option.split()))
""")
num_args = int(python(py.name, long_option="one two three",
nothing=False))
self.assertEqual(num_args, 3)
num_args = int(python(py.name, "--long-option", "one's two's three's"))
self.assertEqual(num_args, 3)
def test_short_bool_option(self):
py = create_tmp_test("""
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-s", action="store_true", default=False, dest="short_option")
options, args = parser.parse_args()
print(options.short_option)
""")
self.assertTrue(python(py.name, s=True).strip() == "True")
self.assertTrue(python(py.name, s=False).strip() == "False")
self.assertTrue(python(py.name).strip() == "False")
def test_long_bool_option(self):
py = create_tmp_test("""
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-l", "--long-option", action="store_true", default=False, dest="long_option")
options, args = parser.parse_args()
print(options.long_option)
""")
self.assertTrue(python(py.name, long_option=True).strip() == "True")
self.assertTrue(python(py.name).strip() == "False")
def test_false_bool_ignore(self):
py = create_tmp_test("""
import sys
print(sys.argv[1:])
""")
test = True
self.assertEqual(python(py.name, test and "-n").strip(), "['-n']")
test = False
self.assertEqual(python(py.name, test and "-n").strip(), "[]")
def test_composition(self):
from sh import ls, wc
c1 = int(wc(ls("-A1"), l=True)) # noqa: E741
c2 = len(os.listdir("."))
self.assertEqual(c1, c2)
def test_incremental_composition(self):
from sh import ls, wc
c1 = int(wc(ls("-A1", _piped=True), l=True).strip()) # noqa: E741
c2 = len(os.listdir("."))
self.assertEqual(c1, c2)
def test_short_option(self):
from sh import sh
s1 = sh(c="echo test").strip()
s2 = "test"
self.assertEqual(s1, s2)
def test_long_option(self):
py = create_tmp_test("""
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-l", "--long-option", action="store", default="", dest="long_option")
options, args = parser.parse_args()
print(options.long_option.upper())
""")
self.assertTrue(python(py.name, long_option="testing").strip() == "TESTING")
self.assertTrue(python(py.name).strip() == "")
def test_raw_args(self):
py = create_tmp_test("""
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--long_option", action="store", default=None,
dest="long_option1")
parser.add_option("--long-option", action="store", default=None,
dest="long_option2")
options, args = parser.parse_args()
if options.long_option1:
print(options.long_option1.upper())
else:
print(options.long_option2.upper())
""")
self.assertEqual(python(py.name,
{"long_option": "underscore"}).strip(), "UNDERSCORE")
self.assertEqual(python(py.name, long_option="hyphen").strip(), "HYPHEN")
def test_custom_separator(self):
py = create_tmp_test("""
import sys
print(sys.argv[1])
""")
opt = {"long-option": "underscore"}
correct = "--long-option=custom=underscore"
out = python(py.name, opt, _long_sep="=custom=").strip()
self.assertEqual(out, correct)
# test baking too
correct = "--long-option=baked=underscore"
python_baked = python.bake(py.name, opt, _long_sep="=baked=")
out = python_baked().strip()
self.assertEqual(out, correct)
def test_custom_separator_space(self):
py = create_tmp_test("""
import sys
print(str(sys.argv[1:]))
""")
opt = {"long-option": "space"}
correct = ["--long-option", "space"]
out = python(py.name, opt, _long_sep=" ").strip()
self.assertEqual(out, str(correct))
def test_custom_long_prefix(self):
py = create_tmp_test("""
import sys
print(sys.argv[1])
""")
out = python(py.name, {"long-option": "underscore"},
_long_prefix="-custom-").strip()
self.assertEqual(out, "-custom-long-option=underscore")
out = python(py.name, {"long-option": True},
_long_prefix="-custom-").strip()
self.assertEqual(out, "-custom-long-option")
# test baking too
out = python.bake(py.name, {"long-option": "underscore"},
_long_prefix="-baked-")().strip()
self.assertEqual(out, "-baked-long-option=underscore")
out = python.bake(py.name, {"long-option": True},
_long_prefix="-baked-")().strip()
self.assertEqual(out, "-baked-long-option")
def test_command_wrapper(self):
from sh import Command, which
ls = Command(which("ls"))
wc = Command(which("wc"))
c1 = int(wc(ls("-A1"), l=True)) # noqa: E741
c2 = len(os.listdir("."))
self.assertEqual(c1, c2)
def test_background(self):
from sh import sleep
import time
start = time.time()
sleep_time = .5
p = sleep(sleep_time, _bg=True)
now = time.time()
self.assertLess(now - start, sleep_time)
p.wait()
now = time.time()
self.assertGreater(now - start, sleep_time)
def test_background_exception(self):
from sh import ls, ErrorReturnCode_1, ErrorReturnCode_2
p = ls("/ofawjeofj", _bg=True, _bg_exc=False) # should not raise
exc_to_test = ErrorReturnCode_2
if IS_MACOS:
exc_to_test = ErrorReturnCode_1
self.assertRaises(exc_to_test, p.wait) # should raise
def test_with_context(self):
from sh import whoami
import getpass
py = create_tmp_test("""
import sys
import os
import subprocess
print("with_context")
subprocess.Popen(sys.argv[1:], shell=False).wait()
""")
cmd1 = python.bake(py.name, _with=True)
with cmd1:
out = whoami()
self.assertIn("with_context", out)
self.assertIn(getpass.getuser(), out)
def test_with_context_args(self):
from sh import whoami
import getpass
py = create_tmp_test("""
import sys
import os
import subprocess
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-o", "--opt", action="store_true", default=False, dest="opt")
options, args = parser.parse_args()
if options.opt:
subprocess.Popen(args[0], shell=False).wait()
""")
with python(py.name, opt=True, _with=True):
out = whoami()
self.assertTrue(getpass.getuser() == out.strip())
with python(py.name, _with=True):
out = whoami()
self.assertTrue(out == "")
def test_binary_input(self):
py = create_tmp_test("""
import sys
data = sys.stdin.read()
sys.stdout.write(data)
""")
data = b'1234'
out = python(py.name, _in=data)
self.assertEqual(out, "1234")
def test_err_to_out(self):
py = create_tmp_test("""
import sys
import os
sys.stdout.write("stdout")
sys.stdout.flush()
sys.stderr.write("stderr")
sys.stderr.flush()
""")
stdout = python(py.name, _err_to_out=True)
self.assertEqual(stdout, "stdoutstderr")
def test_err_to_out_and_sys_stdout(self):
py = create_tmp_test("""
import sys
import os
sys.stdout.write("stdout")
sys.stdout.flush()
sys.stderr.write("stderr")
sys.stderr.flush()
""")
master, slave = os.pipe()
stdout = python(py.name, _err_to_out=True, _out=slave)
self.assertEqual(stdout, "")
self.assertEqual(os.read(master, 12), b"stdoutstderr")
def test_err_piped(self):
py = create_tmp_test("""
import sys
sys.stderr.write("stderr")
""")
py2 = create_tmp_test("""
import sys
while True:
line = sys.stdin.read()
if not line:
break
sys.stdout.write(line)
""")
out = python(python("-u", py.name, _piped="err"), "-u", py2.name)
self.assertEqual(out, "stderr")
def test_out_redirection(self):
import tempfile
py = create_tmp_test("""
import sys
import os
sys.stdout.write("stdout")
sys.stderr.write("stderr")
""")
file_obj = tempfile.NamedTemporaryFile()
out = python(py.name, _out=file_obj)
self.assertEqual(len(out), 0)
file_obj.seek(0)
actual_out = file_obj.read()
file_obj.close()
self.assertNotEqual(len(actual_out), 0)
# test with tee
file_obj = tempfile.NamedTemporaryFile()
out = python(py.name, _out=file_obj, _tee=True)
self.assertGreater(len(out), 0)
file_obj.seek(0)
actual_out = file_obj.read()
file_obj.close()
self.assertGreater(len(actual_out), 0)
def test_err_redirection(self):
import tempfile
py = create_tmp_test("""
import sys
import os
sys.stdout.write("stdout")
sys.stderr.write("stderr")
""")
file_obj = tempfile.NamedTemporaryFile()
p = python("-u", py.name, _err=file_obj)
file_obj.seek(0)
stderr = file_obj.read().decode()
file_obj.close()
self.assertEqual(p.stdout, b"stdout")
self.assertEqual(stderr, "stderr")
self.assertEqual(len(p.stderr), 0)
# now with tee
file_obj = tempfile.NamedTemporaryFile()
p = python(py.name, _err=file_obj, _tee="err")
file_obj.seek(0)
stderr = file_obj.read().decode()
file_obj.close()
self.assertEqual(p.stdout, b"stdout")
self.assertEqual(stderr, "stderr")
self.assertGreater(len(p.stderr), 0)
def test_tty_tee(self):
py = create_tmp_test("""
import sys
sys.stdout.write("stdout")
""")
read, write = pty.openpty()
out = python("-u", py.name, _out=write).stdout
tee = os.read(read, 6)
self.assertEqual(out, b"")
self.assertEqual(tee, b"stdout")
os.close(write)
os.close(read)
read, write = pty.openpty()
out = python("-u", py.name, _out=write, _tee=True).stdout
tee = os.read(read, 6)
self.assertEqual(out, b"stdout")
self.assertEqual(tee, b"stdout")
os.close(write)
os.close(read)
def test_err_redirection_actual_file(self):
import tempfile
file_obj = tempfile.NamedTemporaryFile()
py = create_tmp_test("""
import sys
import os
sys.stdout.write("stdout")
sys.stderr.write("stderr")
""")
stdout = python("-u", py.name, _err=file_obj.name).wait()
file_obj.seek(0)
stderr = file_obj.read().decode()
file_obj.close()
self.assertTrue(stdout == "stdout")
self.assertTrue(stderr == "stderr")
def test_subcommand_and_bake(self):
import getpass
py = create_tmp_test("""
import sys
import os
import subprocess
print("subcommand")
subprocess.Popen(sys.argv[1:], shell=False).wait()
""")
cmd1 = python.bake(py.name)
out = cmd1.whoami()
self.assertIn("subcommand", out)
self.assertIn(getpass.getuser(), out)
def test_multiple_bakes(self):
py = create_tmp_test("""
import sys
sys.stdout.write(str(sys.argv[1:]))
""")
out = python.bake(py.name).bake("bake1").bake("bake2")()
self.assertEqual("['bake1', 'bake2']", out)
def test_arg_preprocessor(self):
py = create_tmp_test("""
import sys
sys.stdout.write(str(sys.argv[1:]))
""")
def arg_preprocess(args, kwargs):
args.insert(0, "preprocessed")
kwargs["a-kwarg"] = 123
return args, kwargs
cmd = python.bake(py.name, _arg_preprocess=arg_preprocess)
out = cmd("arg")
self.assertEqual("['preprocessed', 'arg', '--a-kwarg=123']", out)
def test_bake_args_come_first(self):
from sh import ls
ls = ls.bake(h=True)
ran = ls("-la").ran
ft = ran.index("-h")
self.assertIn("-la", ran[ft:])
def test_output_equivalence(self):
from sh import whoami
iam1 = whoami()
iam2 = whoami()
self.assertEqual(iam1, iam2)
# https://github.com/amoffat/sh/pull/252
def test_stdout_pipe(self):
py = create_tmp_test(r"""
import sys
sys.stdout.write("foobar\n")
""")
read_fd, write_fd = os.pipe()
python(py.name, _out=write_fd, u=True)
def alarm(sig, action):
self.fail("Timeout while reading from pipe")
import signal
signal.signal(signal.SIGALRM, alarm)
signal.alarm(3)
data = os.read(read_fd, 100)
self.assertEqual(b"foobar\n", data)
signal.alarm(0)
signal.signal(signal.SIGALRM, signal.SIG_DFL)
def test_stdout_callback(self):
py = create_tmp_test("""
import sys
import os
for i in range(5): print(i)
""")
stdout = []
def agg(line):
stdout.append(line)
p = python("-u", py.name, _out=agg)
p.wait()
self.assertEqual(len(stdout), 5)
def test_stdout_callback_no_wait(self):
import time
py = create_tmp_test("""
import sys
import os
import time
for i in range(5):
print(i)
time.sleep(.5)
""")
stdout = []
def agg(line): stdout.append(line)
python("-u", py.name, _out=agg, _bg=True)
# we give a little pause to make sure that the NamedTemporaryFile
# exists when the python process actually starts
time.sleep(.5)
self.assertNotEqual(len(stdout), 5)
def test_stdout_callback_line_buffered(self):
py = create_tmp_test("""
import sys
import os
for i in range(5): print("herpderp")
""")
stdout = []
def agg(line): stdout.append(line)
p = python("-u", py.name, _out=agg, _out_bufsize=1)
p.wait()
self.assertEqual(len(stdout), 5)
def test_stdout_callback_line_unbuffered(self):
py = create_tmp_test("""
import sys
import os
for i in range(5): print("herpderp")
""")
stdout = []
def agg(char): stdout.append(char)
p = python("-u", py.name, _out=agg, _out_bufsize=0)
p.wait()
# + 5 newlines
self.assertEqual(len(stdout), len("herpderp") * 5 + 5)
def test_stdout_callback_buffered(self):
py = create_tmp_test("""
import sys
import os
for i in range(5): sys.stdout.write("herpderp")
""")
stdout = []
def agg(chunk): stdout.append(chunk)
p = python("-u", py.name, _out=agg, _out_bufsize=4)
p.wait()
self.assertEqual(len(stdout), len("herp") / 2 * 5)
def test_stdout_callback_with_input(self):
py = create_tmp_test("""
import sys
import os
IS_PY3 = sys.version_info[0] == 3
if IS_PY3: raw_input = input
for i in range(5): print(str(i))
derp = raw_input("herp? ")
print(derp)
""")
def agg(line, stdin):
if line.strip() == "4":
stdin.put("derp\n")
p = python("-u", py.name, _out=agg, _tee=True)
p.wait()
self.assertIn("derp", p)
def test_stdout_callback_exit(self):
py = create_tmp_test("""
import sys
import os
for i in range(5): print(i)
""")
stdout = []
def agg(line):
line = line.strip()
stdout.append(line)
if line == "2":
return True
p = python("-u", py.name, _out=agg, _tee=True)
p.wait()
self.assertIn("4", p)
self.assertNotIn("4", stdout)
def test_stdout_callback_terminate(self):
import signal
py = create_tmp_test("""
import sys
import os
import time
for i in range(5):
print(i)
time.sleep(.5)
""")
stdout = []
def agg(line, stdin, process):
line = line.strip()
stdout.append(line)
if line == "3":
process.terminate()
return True
import sh
caught_signal = False
try:
p = python("-u", py.name, _out=agg, _bg=True)
p.wait()
except sh.SignalException_SIGTERM:
caught_signal = True
self.assertTrue(caught_signal)
self.assertEqual(p.process.exit_code, -signal.SIGTERM)
self.assertNotIn("4", p)
self.assertNotIn("4", stdout)
def test_stdout_callback_kill(self):
import signal
py = create_tmp_test("""
import sys
import os
import time
for i in range(5):
print(i)
time.sleep(.5)
""")
stdout = []
def agg(line, stdin, process):
line = line.strip()
stdout.append(line)
if line == "3":
process.kill()
return True
import sh
caught_signal = False
try:
p = python("-u", py.name, _out=agg, _bg=True)
p.wait()
except sh.SignalException_SIGKILL:
caught_signal = True
self.assertTrue(caught_signal)
self.assertEqual(p.process.exit_code, -signal.SIGKILL)
self.assertNotIn("4", p)
self.assertNotIn("4", stdout)
def test_general_signal(self):
from signal import SIGINT
py = create_tmp_test("""
import sys
import os
import time
import signal
i = 0
def sig_handler(sig, frame):
global i
i = 42
signal.signal(signal.SIGINT, sig_handler)
for _ in range(6):
print(i)
i += 1
sys.stdout.flush()
time.sleep(0.5)
""")
stdout = []
def agg(line, stdin, process):
line = line.strip()
stdout.append(line)
if line == "3":
process.signal(SIGINT)
return True
p = python(py.name, _out=agg, _tee=True)
p.wait()
self.assertEqual(p.process.exit_code, 0)
self.assertEqual(p, "0\n1\n2\n3\n42\n43\n")
def test_iter_generator(self):
py = create_tmp_test("""
import sys
import os
import time
for i in range(42):
print(i)
sys.stdout.flush()
""")
out = []
for line in python(py.name, _iter=True):
out.append(int(line.strip()))
self.assertEqual(len(out), 42)
self.assertEqual(sum(out), 861)
def test_iter_unicode(self):
# issue https://github.com/amoffat/sh/issues/224
test_string = "\xe4\xbd\x95\xe4\xbd\x95\n" * 150 # len > buffer_s
txt = create_tmp_test(test_string)
for line in sh.cat(txt.name, _iter=True):
break
self.assertLess(len(line), 1024)
def test_nonblocking_iter(self):
from errno import EWOULDBLOCK
py = create_tmp_test("""
import time
import sys
time.sleep(1)
sys.stdout.write("stdout")
""")
count = 0
value = None
for line in python(py.name, _iter_noblock=True):
if line == EWOULDBLOCK:
count += 1
else:
value = line
self.assertGreater(count, 0)
self.assertEqual(value, "stdout")
py = create_tmp_test("""
import time
import sys
time.sleep(1)
sys.stderr.write("stderr")
""")
count = 0
value = None
for line in python(py.name, _iter_noblock="err"):
if line == EWOULDBLOCK:
count += 1
else:
value = line
self.assertGreater(count, 0)
self.assertEqual(value, "stderr")
def test_for_generator_to_err(self):
py = create_tmp_test("""
import sys
import os
for i in range(42):
sys.stderr.write(str(i)+"\\n")
""")
out = []
for line in python("-u", py.name, _iter="err"):
out.append(line)
self.assertEqual(len(out), 42)
# verify that nothing is going to stdout
out = []
for line in python("-u", py.name, _iter="out"):
out.append(line)
self.assertEqual(len(out), 0)
def test_sigpipe(self):
py1 = create_tmp_test("""
import sys
import os
import time
import signal
# by default, python disables SIGPIPE, in favor of using IOError exceptions, so
# let's put that back to the system default where we terminate with a signal
# exit code
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
for letter in "andrew":
time.sleep(0.6)
print(letter)
""")
py2 = create_tmp_test("""
import sys
import os
import time
while True:
line = sys.stdin.readline()
if not line:
break
print(line.strip().upper())
exit(0)
""")
p1 = python("-u", py1.name, _piped="out")
p2 = python(p1, "-u", py2.name)
# SIGPIPE should happen, but it shouldn't be an error, since _piped is
# truthful
self.assertEqual(-p1.exit_code, signal.SIGPIPE)
self.assertEqual(p2.exit_code, 0)
def test_piped_generator(self):
import time
py1 = create_tmp_test("""
import sys
import os
import time
for letter in "andrew":
time.sleep(0.6)
print(letter)
""")
py2 = create_tmp_test("""
import sys
import os
import time
while True:
line = sys.stdin.readline()
if not line:
break
print(line.strip().upper())
""")
times = []
last_received = None
letters = ""
for line in python(python("-u", py1.name, _piped="out"), "-u",
py2.name, _iter=True):
letters += line.strip()
now = time.time()
if last_received:
times.append(now - last_received)
last_received = now
self.assertEqual("ANDREW", letters)
self.assertTrue(all([t > .3 for t in times]))
def test_generator_and_callback(self):
py = create_tmp_test("""
import sys
import os
for i in range(42):
sys.stderr.write(str(i * 2)+"\\n")
print(i)
""")
stderr = []
def agg(line):
stderr.append(int(line.strip()))
out = []
for line in python("-u", py.name, _iter=True, _err=agg):
out.append(line)
self.assertEqual(len(out), 42)
self.assertEqual(sum(stderr), 1722)
def test_cast_bg(self):
py = create_tmp_test("""
import sys
import time
time.sleep(0.5)
sys.stdout.write(sys.argv[1])
""")
self.assertEqual(int(python(py.name, "123", _bg=True)), 123)
self.assertEqual(long(python(py.name, "456", _bg=True)), 456)
self.assertEqual(float(python(py.name, "789", _bg=True)), 789.0)
def test_cmd_eq(self):
py = create_tmp_test("")
cmd1 = python.bake(py.name, "-u")
cmd2 = python.bake(py.name, "-u")
cmd3 = python.bake(py.name)
self.assertEqual(cmd1, cmd2)
self.assertNotEqual(cmd1, cmd3)
def test_fg(self):
py = create_tmp_test("exit(0)")
# notice we're using `system_python`, and not `python`. this is because
# `python` has an env baked into it, and we want `_env` to be None for
# coverage
system_python(py.name, _fg=True)
def test_fg_false(self):
""" https://github.com/amoffat/sh/issues/520 """
py = create_tmp_test("print('hello')")
buf = StringIO()
python(py.name, _fg=False, _out=buf)
self.assertEqual(buf.getvalue(), "hello\n")
def test_fg_true(self):
""" https://github.com/amoffat/sh/issues/520 """
py = create_tmp_test("print('hello')")
buf = StringIO()
self.assertRaises(TypeError, python, py.name, _fg=True, _out=buf)
def test_fg_env(self):
py = create_tmp_test("""
import os
code = int(os.environ.get("EXIT", "0"))
exit(code)
""")
env = os.environ.copy()
env["EXIT"] = "3"
self.assertRaises(sh.ErrorReturnCode_3, python, py.name, _fg=True,
_env=env)
def test_fg_alternative(self):
py = create_tmp_test("exit(0)")
python(py.name, _in=sys.stdin, _out=sys.stdout, _err=sys.stderr)
def test_fg_exc(self):
py = create_tmp_test("exit(1)")
self.assertRaises(sh.ErrorReturnCode_1, python, py.name, _fg=True)
def test_out_filename(self):
outfile = tempfile.NamedTemporaryFile()
py = create_tmp_test("print('output')")
python(py.name, _out=outfile.name)
outfile.seek(0)
self.assertEqual(b"output\n", outfile.read())
def test_bg_exit_code(self):
py = create_tmp_test("""
import time
time.sleep(1)
exit(49)
""")
p = python(py.name, _ok_code=49, _bg=True)
self.assertEqual(49, p.exit_code)
def test_cwd(self):
from sh import pwd
from os.path import realpath
self.assertEqual(str(pwd(_cwd="/tmp")), realpath("/tmp") + "\n")
self.assertEqual(str(pwd(_cwd="/etc")), realpath("/etc") + "\n")
def test_cwd_fg(self):
td = realpath(tempfile.mkdtemp())
py = create_tmp_test("""
import sh
import os
from os.path import realpath
orig = realpath(os.getcwd())
print(orig)
sh.pwd(_cwd="{newdir}", _fg=True)
print(realpath(os.getcwd()))
""".format(newdir=td))
orig, newdir, restored = python(py.name).strip().split("\n")
newdir = realpath(newdir)
self.assertEqual(newdir, td)
self.assertEqual(orig, restored)
self.assertNotEqual(orig, newdir)
os.rmdir(td)
def test_huge_piped_data(self):
from sh import tr
stdin = tempfile.NamedTemporaryFile()
data = "herpderp" * 4000 + "\n"
stdin.write(data.encode())
stdin.flush()
stdin.seek(0)
out = tr(tr("[:lower:]", "[:upper:]", _in=data), "[:upper:]", "[:lower:]")
self.assertTrue(out == data)
def test_tty_input(self):
py = create_tmp_test("""
import sys
import os
if os.isatty(sys.stdin.fileno()):
sys.stdout.write("password?\\n")
sys.stdout.flush()
pw = sys.stdin.readline().strip()
sys.stdout.write("%s\\n" % ("*" * len(pw)))
sys.stdout.flush()
else:
sys.stdout.write("no tty attached!\\n")
sys.stdout.flush()
""")
test_pw = "test123"
expected_stars = "*" * len(test_pw)
d = {}
def password_enterer(line, stdin):
line = line.strip()
if not line:
return
if line == "password?":
stdin.put(test_pw + "\n")
elif line.startswith("*"):
d["stars"] = line
return True
pw_stars = python(py.name, _tty_in=True, _out=password_enterer)
pw_stars.wait()
self.assertEqual(d["stars"], expected_stars)
response = python(py.name)
self.assertEqual(response, "no tty attached!\n")
def test_tty_output(self):
py = create_tmp_test("""
import sys
import os
if os.isatty(sys.stdout.fileno()):
sys.stdout.write("tty attached")
sys.stdout.flush()
else:
sys.stdout.write("no tty attached")
sys.stdout.flush()
""")
out = python(py.name, _tty_out=True)
self.assertEqual(out, "tty attached")
out = python(py.name, _tty_out=False)
self.assertEqual(out, "no tty attached")
def test_stringio_output(self):
from sh import echo
out = StringIO()
echo("-n", "testing 123", _out=out)
self.assertEqual(out.getvalue(), "testing 123")
out = cStringIO()
echo("-n", "testing 123", _out=out)
self.assertEqual(out.getvalue().decode(), "testing 123")
out = ioStringIO()
echo("-n", "testing 123", _out=out)
self.assertEqual(out.getvalue(), "testing 123")
out = iocStringIO()
echo("-n", "testing 123", _out=out)
self.assertEqual(out.getvalue().decode(), "testing 123")
def test_stringio_input(self):
from sh import cat
input = StringIO()
input.write("herpderp")
input.seek(0)
out = cat(_in=input)
self.assertEqual(out, "herpderp")
def test_internal_bufsize(self):
from sh import cat
output = cat(_in="a" * 1000, _internal_bufsize=100, _out_bufsize=0)
self.assertEqual(len(output), 100)
output = cat(_in="a" * 1000, _internal_bufsize=50, _out_bufsize=2)
self.assertEqual(len(output), 100)
def test_change_stdout_buffering(self):
py = create_tmp_test("""
import sys
import os
# this proves that we won't get the output into our callback until we send
# a newline
sys.stdout.write("switch ")
sys.stdout.flush()
sys.stdout.write("buffering\\n")
sys.stdout.flush()
sys.stdin.read(1)
sys.stdout.write("unbuffered")
sys.stdout.flush()
# this is to keep the output from being flushed by the process ending, which
# would ruin our test. we want to make sure we get the string "unbuffered"
# before the process ends, without writing a newline
sys.stdin.read(1)
""")
d = {
"newline_buffer_success": False,
"unbuffered_success": False,
}
def interact(line, stdin, process):
line = line.strip()
if not line:
return
if line == "switch buffering":
d["newline_buffer_success"] = True
process.change_out_bufsize(0)
stdin.put("a")
elif line == "unbuffered":
stdin.put("b")
d["unbuffered_success"] = True
return True
# start with line buffered stdout
pw_stars = python("-u", py.name, _out=interact, _out_bufsize=1)
pw_stars.wait()
self.assertTrue(d["newline_buffer_success"])
self.assertTrue(d["unbuffered_success"])
def test_callable_interact(self):
py = create_tmp_test("""
import sys
sys.stdout.write("line1")
""")
class Callable(object):
def __init__(self):
self.line = None
def __call__(self, line):
self.line = line
cb = Callable()
python(py.name, _out=cb)
self.assertEqual(cb.line, "line1")
def test_encoding(self):
return self.skipTest("what's the best way to test a different '_encoding' special keyword argument?")
def test_timeout(self):
import sh
from time import time
sleep_for = 3
timeout = 1
started = time()
try:
sh.sleep(sleep_for, _timeout=timeout).wait()
except sh.TimeoutException as e:
assert 'sleep 3' in e.full_cmd
else:
self.fail("no timeout exception")
elapsed = time() - started
self.assertLess(abs(elapsed - timeout), 0.5)
def test_timeout_overstep(self):
started = time.time()
sh.sleep(1, _timeout=5)
elapsed = time.time() - started
self.assertLess(abs(elapsed - 1), 0.5)
def test_timeout_wait(self):
p = sh.sleep(3, _bg=True)
self.assertRaises(sh.TimeoutException, p.wait, timeout=1)
def test_timeout_wait_overstep(self):
p = sh.sleep(1, _bg=True)
p.wait(timeout=5)
def test_timeout_wait_negative(self):
p = sh.sleep(3, _bg=True)
self.assertRaises(RuntimeError, p.wait, timeout=-3)
def test_binary_pipe(self):
binary = b'\xec;\xedr\xdbF'
py1 = create_tmp_test("""
import sys
import os
sys.stdout = os.fdopen(sys.stdout.fileno(), "wb", 0)
sys.stdout.write(b'\\xec;\\xedr\\xdbF')
""")
py2 = create_tmp_test("""
import sys
import os
sys.stdin = os.fdopen(sys.stdin.fileno(), "rb", 0)
sys.stdout = os.fdopen(sys.stdout.fileno(), "wb", 0)
sys.stdout.write(sys.stdin.read())
""")
out = python(python(py1.name), py2.name)
self.assertEqual(out.stdout, binary)
# designed to trigger the "... (%d more, please see e.stdout)" output
# of the ErrorReturnCode class
def test_failure_with_large_output(self):
from sh import ErrorReturnCode_1
py = create_tmp_test("""
print("andrewmoffat" * 1000)
exit(1)
""")
self.assertRaises(ErrorReturnCode_1, python, py.name)
# designed to check if the ErrorReturnCode constructor does not raise
# an UnicodeDecodeError
def test_non_ascii_error(self):
from sh import ls, ErrorReturnCode
test = "/á"
# coerce to unicode
if IS_PY3:
pass
else:
test = test.decode("utf8")
self.assertRaises(ErrorReturnCode, ls, test)
def test_no_out(self):
py = create_tmp_test("""
import sys
sys.stdout.write("stdout")
sys.stderr.write("stderr")
""")
p = python(py.name, _no_out=True)
self.assertEqual(p.stdout, b"")
self.assertEqual(p.stderr, b"stderr")
self.assertTrue(p.process._pipe_queue.empty())
def callback(line): pass
p = python(py.name, _out=callback)
self.assertEqual(p.stdout, b"")
self.assertEqual(p.stderr, b"stderr")
self.assertTrue(p.process._pipe_queue.empty())
p = python(py.name)
self.assertEqual(p.stdout, b"stdout")
self.assertEqual(p.stderr, b"stderr")
self.assertFalse(p.process._pipe_queue.empty())
def test_tty_stdin(self):
py = create_tmp_test("""
import sys
sys.stdout.write(sys.stdin.read())
sys.stdout.flush()
""")
out = python(py.name, _in="test\n", _tty_in=True)
self.assertEqual("test\n", out)
def test_no_err(self):
py = create_tmp_test("""
import sys
sys.stdout.write("stdout")
sys.stderr.write("stderr")
""")
p = python(py.name, _no_err=True)
self.assertEqual(p.stderr, b"")
self.assertEqual(p.stdout, b"stdout")
self.assertFalse(p.process._pipe_queue.empty())
def callback(line): pass
p = python(py.name, _err=callback)
self.assertEqual(p.stderr, b"")
self.assertEqual(p.stdout, b"stdout")
self.assertFalse(p.process._pipe_queue.empty())
p = python(py.name)
self.assertEqual(p.stderr, b"stderr")
self.assertEqual(p.stdout, b"stdout")
self.assertFalse(p.process._pipe_queue.empty())
def test_no_pipe(self):
from sh import ls
# calling a command regular should fill up the pipe_queue
p = ls()
self.assertFalse(p.process._pipe_queue.empty())
# calling a command with a callback should not
def callback(line): pass
p = ls(_out=callback)
self.assertTrue(p.process._pipe_queue.empty())
# calling a command regular with no_pipe also should not
p = ls(_no_pipe=True)
self.assertTrue(p.process._pipe_queue.empty())
def test_decode_error_handling(self):
from functools import partial
py = create_tmp_test("""
# -*- coding: utf8 -*-
import sys
import os
sys.stdout = os.fdopen(sys.stdout.fileno(), 'wb')
IS_PY3 = sys.version_info[0] == 3
if IS_PY3:
sys.stdout.write(bytes("te漢字st", "utf8"))
else:
sys.stdout.write("te漢字st")
""")
fn = partial(python, py.name, _encoding="ascii")
def s(fn): str(fn())
self.assertRaises(UnicodeDecodeError, s, fn)
p = python(py.name, _encoding="ascii", _decode_errors="ignore")
self.assertEqual(p, "test")
def test_signal_exception(self):
from sh import SignalException_15
def throw_terminate_signal():
py = create_tmp_test("""
import time
while True: time.sleep(1)
""")
to_kill = python(py.name, _bg=True)
to_kill.terminate()
to_kill.wait()
self.assertRaises(SignalException_15, throw_terminate_signal)
def test_signal_group(self):
child = create_tmp_test("""
import time
time.sleep(3)
""")
parent = create_tmp_test("""
import sys
import sh
python = sh.Command(sys.executable)
p = python("{child_file}", _bg=True, _new_session=False)
print(p.pid)
print(p.process.pgid)
p.wait()
""", child_file=child.name)
def launch():
p = python(parent.name, _bg=True, _iter=True)
child_pid = int(next(p).strip())
child_pgid = int(next(p).strip())
parent_pid = p.pid
parent_pgid = p.process.pgid
return p, child_pid, child_pgid, parent_pid, parent_pgid
def assert_alive(pid):
os.kill(pid, 0)
def assert_dead(pid):
self.assert_oserror(errno.ESRCH, os.kill, pid, 0)
# first let's prove that calling regular SIGKILL on the parent does
# nothing to the child, since the child was launched in the same process
# group (_new_session=False) and the parent is not a controlling process
p, child_pid, child_pgid, parent_pid, parent_pgid = launch()
assert_alive(parent_pid)
assert_alive(child_pid)
p.kill()
time.sleep(0.1)
assert_dead(parent_pid)
assert_alive(child_pid)
self.assertRaises(sh.SignalException_SIGKILL, p.wait)
assert_dead(child_pid)
# now let's prove that killing the process group kills both the parent
# and the child
p, child_pid, child_pgid, parent_pid, parent_pgid = launch()
assert_alive(parent_pid)
assert_alive(child_pid)
p.kill_group()
time.sleep(0.1)
assert_dead(parent_pid)
assert_dead(child_pid)
def test_pushd(self):
""" test basic pushd functionality """
child = realpath(tempfile.mkdtemp())
old_wd1 = sh.pwd().strip()
old_wd2 = os.getcwd()
self.assertEqual(old_wd1, old_wd2)
self.assertNotEqual(old_wd1, child)
with sh.pushd(child):
new_wd1 = sh.pwd().strip()
new_wd2 = os.getcwd()
old_wd3 = sh.pwd().strip()
old_wd4 = os.getcwd()
self.assertEqual(old_wd3, old_wd4)
self.assertEqual(old_wd1, old_wd3)
self.assertEqual(new_wd1, child)
self.assertEqual(new_wd2, child)
def test_pushd_cd(self):
""" test that pushd works like pushd/popd with built-in cd correctly """
import sh
child = realpath(tempfile.mkdtemp())
try:
old_wd = os.getcwd()
with sh.pushd(tempdir):
self.assertEqual(tempdir, os.getcwd())
sh.cd(child)
self.assertEqual(child, os.getcwd())
self.assertEqual(old_wd, os.getcwd())
finally:
os.rmdir(child)
def test_cd_homedir(self):
orig = os.getcwd()
my_dir = os.path.realpath(os.path.expanduser("~")) # Use realpath because homedir may be a symlink
sh.cd()
self.assertNotEqual(orig, os.getcwd())
self.assertEqual(my_dir, os.getcwd())
def test_non_existant_cwd(self):
from sh import ls
# sanity check
non_exist_dir = join(tempdir, "aowjgoahewro")
self.assertFalse(exists(non_exist_dir))
self.assertRaises(sh.ForkException, ls, _cwd=non_exist_dir)
# https://github.com/amoffat/sh/issues/176
def test_baked_command_can_be_printed(self):
from sh import ls
ll = ls.bake("-l")
self.assertTrue(str(ll).endswith("/ls -l"))
# https://github.com/amoffat/sh/issues/185
def test_done_callback(self):
import time
class Callback(object):
def __init__(self):
self.called = False
self.exit_code = None
self.success = None
def __call__(self, p, success, exit_code):
self.called = True
self.exit_code = exit_code
self.success = success
py = create_tmp_test("""
from time import time, sleep
sleep(1)
print(time())
""")
callback = Callback()
p = python(py.name, _done=callback, _bg=True)
# do a little setup to prove that a command with a _done callback is run
# in the background
wait_start = time.time()
p.wait()
wait_elapsed = time.time() - wait_start
self.assertTrue(callback.called)
self.assertLess(abs(wait_elapsed - 1.0), 1.0)
self.assertEqual(callback.exit_code, 0)
self.assertTrue(callback.success)
def test_fork_exc(self):
from sh import ForkException
py = create_tmp_test("")
def fail():
raise RuntimeError("nooo")
self.assertRaises(ForkException, python, py.name, _preexec_fn=fail)
def test_new_session(self):
from threading import Event
py = create_tmp_test("""
import os
import time
pid = os.getpid()
pgid = os.getpgid(pid)
sid = os.getsid(pid)
stuff = [pid, pgid, sid]
print(",".join([str(el) for el in stuff]))
time.sleep(0.5)
""")
event = Event()
def handle(line, stdin, p):
pid, pgid, sid = line.strip().split(",")
pid = int(pid)
pgid = int(pgid)
sid = int(sid)
self.assertEqual(p.pid, pid)
self.assertEqual(pid, pgid)
self.assertEqual(p.pgid, pgid)
self.assertEqual(pgid, p.get_pgid())
self.assertEqual(pid, sid)
self.assertEqual(sid, pgid)
self.assertEqual(p.sid, sid)
self.assertEqual(sid, p.get_sid())
event.set()
# new session
p = python(py.name, _out=handle)
p.wait()
self.assertTrue(event.is_set())
event.clear()
def handle(line, stdin, p):
pid, pgid, sid = line.strip().split(",")
pid = int(pid)
pgid = int(pgid)
sid = int(sid)
test_pid = os.getpgid(os.getpid())
self.assertEqual(p.pid, pid)
self.assertNotEqual(test_pid, pgid)
self.assertEqual(p.pgid, pgid)
self.assertEqual(pgid, p.get_pgid())
self.assertNotEqual(pid, sid)
self.assertNotEqual(sid, pgid)
self.assertEqual(p.sid, sid)
self.assertEqual(sid, p.get_sid())
event.set()
# no new session
p = python(py.name, _out=handle, _new_session=False)
p.wait()
self.assertTrue(event.is_set())
def test_done_cb_exc(self):
from sh import ErrorReturnCode
class Callback(object):
def __init__(self):
self.called = False
self.success = None
def __call__(self, p, success, exit_code):
self.success = success
self.called = True
py = create_tmp_test("exit(1)")
callback = Callback()
try:
p = python(py.name, _done=callback, _bg=True)
p.wait()
except ErrorReturnCode:
self.assertTrue(callback.called)
self.assertFalse(callback.success)
else:
self.fail("command should've thrown an exception")
def test_callable_stdin(self):
py = create_tmp_test("""
import sys
sys.stdout.write(sys.stdin.read())
""")
def create_stdin():
state = {"count": 0}
def stdin():
count = state["count"]
if count == 4:
return None
state["count"] += 1
return str(count)
return stdin
out = python(py.name, _in=create_stdin())
self.assertEqual("0123", out)
def test_stdin_unbuffered_bufsize(self):
from time import sleep
# this tries to receive some known data and measures the time it takes
# to receive it. since we're flushing by newline, we should only be
# able to receive the data when a newline is fed in
py = create_tmp_test("""
import sys
from time import time
started = time()
data = sys.stdin.read(len("testing"))
waited = time() - started
sys.stdout.write(data + "\\n")
sys.stdout.write(str(waited) + "\\n")
started = time()
data = sys.stdin.read(len("done"))
waited = time() - started
sys.stdout.write(data + "\\n")
sys.stdout.write(str(waited) + "\\n")
sys.stdout.flush()
""")
def create_stdin():
yield "test"
sleep(1)
yield "ing"
sleep(1)
yield "done"
out = python(py.name, _in=create_stdin(), _in_bufsize=0)
word1, time1, word2, time2, _ = out.split("\n")
time1 = float(time1)
time2 = float(time2)
self.assertEqual(word1, "testing")
self.assertLess(abs(1 - time1), 0.5)
self.assertEqual(word2, "done")
self.assertLess(abs(1 - time2), 0.5)
def test_stdin_newline_bufsize(self):
from time import sleep
# this tries to receive some known data and measures the time it takes
# to receive it. since we're flushing by newline, we should only be
# able to receive the data when a newline is fed in
py = create_tmp_test("""
import sys
from time import time
started = time()
data = sys.stdin.read(len("testing\\n"))
waited = time() - started
sys.stdout.write(data)
sys.stdout.write(str(waited) + "\\n")
started = time()
data = sys.stdin.read(len("done\\n"))
waited = time() - started
sys.stdout.write(data)
sys.stdout.write(str(waited) + "\\n")
sys.stdout.flush()
""")
# we'll feed in text incrementally, sleeping strategically before
# sending a newline. we then measure the amount that we slept
# indirectly in the child process
def create_stdin():
yield "test"
sleep(1)
yield "ing\n"
sleep(1)
yield "done\n"
out = python(py.name, _in=create_stdin(), _in_bufsize=1)
word1, time1, word2, time2, _ = out.split("\n")
time1 = float(time1)
time2 = float(time2)
self.assertEqual(word1, "testing")
self.assertLess(abs(1 - time1), 0.5)
self.assertEqual(word2, "done")
self.assertLess(abs(1 - time2), 0.5)
def test_custom_timeout_signal(self):
from sh import TimeoutException
import signal
py = create_tmp_test("""
import time
time.sleep(3)
""")
try:
python(py.name, _timeout=1, _timeout_signal=signal.SIGQUIT)
except TimeoutException as e:
self.assertEqual(e.exit_code, signal.SIGQUIT)
else:
self.fail("we should have handled a TimeoutException")
def test_append_stdout(self):
py = create_tmp_test("""
import sys
num = sys.stdin.read()
sys.stdout.write(num)
""")
append_file = tempfile.NamedTemporaryFile(mode="a+b")
python(py.name, _in="1", _out=append_file)
python(py.name, _in="2", _out=append_file)
append_file.seek(0)
output = append_file.read()
self.assertEqual(b"12", output)
def test_shadowed_subcommand(self):
py = create_tmp_test("""
import sys
sys.stdout.write(sys.argv[1])
""")
out = python.bake(py.name).bake_()
self.assertEqual("bake", out)
def test_no_proc_no_attr(self):
py = create_tmp_test("")
with python(py.name) as p:
self.assertRaises(AttributeError, getattr, p, "exit_code")
def test_partially_applied_callback(self):
from functools import partial
py = create_tmp_test("""
for i in range(10):
print(i)
""")
output = []
def fn(foo, line):
output.append((foo, int(line.strip())))
log_line = partial(fn, "hello")
python(py.name, _out=log_line)
self.assertEqual(output, [("hello", i) for i in range(10)])
output = []
def fn(foo, line, stdin, proc):
output.append((foo, int(line.strip())))
log_line = partial(fn, "hello")
python(py.name, _out=log_line)
self.assertEqual(output, [("hello", i) for i in range(10)])
# https://github.com/amoffat/sh/issues/266
def test_grandchild_no_sighup(self):
import time
# child process that will write to a file if it receives a SIGHUP
child = create_tmp_test("""
import signal
import sys
import time
output_file = sys.argv[1]
with open(output_file, "w") as f:
def handle_sighup(signum, frame):
f.write("got signal %d" % signum)
sys.exit(signum)
signal.signal(signal.SIGHUP, handle_sighup)
time.sleep(2)
f.write("made it!\\n")
""")
# the parent that will terminate before the child writes to the output
# file, potentially causing a SIGHUP
parent = create_tmp_test("""
import os
import time
import sys
child_file = sys.argv[1]
output_file = sys.argv[2]
python_name = os.path.basename(sys.executable)
os.spawnlp(os.P_NOWAIT, python_name, python_name, child_file, output_file)
time.sleep(1) # give child a chance to set up
""")
output_file = tempfile.NamedTemporaryFile(delete=True)
python(parent.name, child.name, output_file.name)
time.sleep(3)
out = output_file.readlines()[0]
self.assertEqual(out, b"made it!\n")
def test_unchecked_producer_failure(self):
from sh import ErrorReturnCode_2
producer = create_tmp_test("""
import sys
for i in range(10):
print(i)
sys.exit(2)
""")
consumer = create_tmp_test("""
import sys
for line in sys.stdin:
pass
""")
direct_pipe = python(producer.name, _piped=True)
self.assertRaises(ErrorReturnCode_2, python, direct_pipe, consumer.name)
def test_unchecked_pipeline_failure(self):
# similar to test_unchecked_producer_failure, but this
# tests a multi-stage pipeline
from sh import ErrorReturnCode_2
producer = create_tmp_test("""
import sys
for i in range(10):
print(i)
sys.exit(2)
""")
middleman = create_tmp_test("""
import sys
for line in sys.stdin:
print("> " + line)
""")
consumer = create_tmp_test("""
import sys
for line in sys.stdin:
pass
""")
producer_normal_pipe = python(producer.name, _piped=True)
middleman_normal_pipe = python(producer_normal_pipe, middleman.name, _piped=True)
self.assertRaises(ErrorReturnCode_2, python, middleman_normal_pipe, consumer.name)
@skip_unless(HAS_MOCK, "requires unittest.mock")
class MockTests(BaseTests):
def test_patch_command_cls(self):
def fn():
cmd = sh.Command("afowejfow")
return cmd()
@unittest.mock.patch("sh.Command")
def test(Command):
Command().return_value = "some output"
return fn()
self.assertEqual(test(), "some output")
self.assertRaises(sh.CommandNotFound, fn)
def test_patch_command(self):
def fn():
return sh.afowejfow()
@unittest.mock.patch("sh.afowejfow", create=True)
def test(cmd):
cmd.return_value = "some output"
return fn()
self.assertEqual(test(), "some output")
self.assertRaises(sh.CommandNotFound, fn)
class MiscTests(BaseTests):
def test_pickling(self):
import pickle
py = create_tmp_test("""
import sys
sys.stdout.write("some output")
sys.stderr.write("some error")
exit(1)
""")
try:
python(py.name)
except sh.ErrorReturnCode as e:
restored = pickle.loads(pickle.dumps(e))
self.assertEqual(restored.stdout, b"some output")
self.assertEqual(restored.stderr, b"some error")
self.assertEqual(restored.exit_code, 1)
else:
self.fail("Didn't get an exception")
@requires_poller("poll")
def test_fd_over_1024(self):
py = create_tmp_test("""print("hi world")""")
with ulimit(resource.RLIMIT_NOFILE, 2048):
cutoff_fd = 1024
pipes = []
for i in xrange(cutoff_fd):
master, slave = os.pipe()
pipes.append((master, slave))
if slave >= cutoff_fd:
break
python(py.name)
for master, slave in pipes:
os.close(master)
os.close(slave)
def test_args_deprecated(self):
self.assertRaises(DeprecationWarning, sh.args, _env={})
def test_percent_doesnt_fail_logging(self):
""" test that a command name doesn't interfere with string formatting in
the internal loggers """
py = create_tmp_test("""
print("cool")
""")
python(py.name, "%")
python(py.name, "%%")
python(py.name, "%%%")
# TODO
# for some reason, i can't get a good stable baseline measured in this test
# on osx. so skip it for now if osx
@not_macos
@requires_progs("lsof")
def test_no_fd_leak(self):
import sh
import os
from itertools import product
# options whose combinations can possibly cause fd leaks
kwargs = {
"_tty_out": (True, False),
"_tty_in": (True, False),
"_err_to_out": (True, False),
}
def get_opts(possible_values):
all_opts = []
for opt, values in possible_values.items():
opt_collection = []
all_opts.append(opt_collection)
for val in values:
pair = (opt, val)
opt_collection.append(pair)
for combo in product(*all_opts):
opt_dict = {}
for key, val in combo:
opt_dict[key] = val
yield opt_dict
test_pid = os.getpid()
def get_num_fds():
lines = sh.lsof(p=test_pid).strip().split("\n")
def test(line):
line = line.upper()
return "CHR" in line or "PIPE" in line
lines = [line for line in lines if test(line)]
return len(lines) - 1
py = create_tmp_test("")
def test_command(**opts):
python(py.name, **opts)
# make sure our baseline is stable.. we can remove this
test_command()
baseline = get_num_fds()
for i in xrange(10):
test_command()
num_fds = get_num_fds()
self.assertEqual(baseline, num_fds)
for opts in get_opts(kwargs):
for i in xrange(2):
test_command(**opts)
num_fds = get_num_fds()
self.assertEqual(baseline, num_fds, (baseline, num_fds, opts))
def test_pushd_thread_safety(self):
import threading
import time
temp1 = realpath(tempfile.mkdtemp())
temp2 = realpath(tempfile.mkdtemp())
try:
results = [None, None]
def fn1():
with sh.pushd(temp1):
time.sleep(0.2)
results[0] = realpath(os.getcwd())
def fn2():
time.sleep(0.1)
with sh.pushd(temp2):
results[1] = realpath(os.getcwd())
time.sleep(0.3)
t1 = threading.Thread(name="t1", target=fn1)
t2 = threading.Thread(name="t2", target=fn2)
t1.start()
t2.start()
t1.join()
t2.join()
self.assertEqual(results, [temp1, temp2])
finally:
os.rmdir(temp1)
os.rmdir(temp2)
def test_stdin_nohang(self):
py = create_tmp_test("""
print("hi")
""")
read, write = os.pipe()
stdin = os.fdopen(read, "r")
python(py.name, _in=stdin)
@requires_utf8
def test_unicode_path(self):
from sh import Command
python_name = os.path.basename(sys.executable)
py = create_tmp_test("""#!/usr/bin/env {0}
# -*- coding: utf8 -*-
print("字")
""".format(python_name), prefix="字", delete=False)
try:
py.close()
os.chmod(py.name, int(0o755))
cmd = Command(py.name)
# all of these should behave just fine
str(cmd)
repr(cmd)
unicode(cmd)
running = cmd()
str(running)
repr(running)
unicode(running)
str(running.process)
repr(running.process)
unicode(running.process)
finally:
os.unlink(py.name)
# https://github.com/amoffat/sh/issues/121
def test_wraps(self):
from sh import ls
wraps(ls)(lambda f: True)
def test_signal_exception_aliases(self):
""" proves that signal exceptions with numbers and names are equivalent
"""
import signal
import sh
sig_name = "SignalException_%d" % signal.SIGQUIT
sig = getattr(sh, sig_name)
from sh import SignalException_SIGQUIT
self.assertEqual(sig, SignalException_SIGQUIT)
def test_change_log_message(self):
py = create_tmp_test("""
print("cool")
""")
def log_msg(cmd, call_args, pid=None):
return "Hi! I ran something"
buf = StringIO()
handler = logging.StreamHandler(buf)
logger = logging.getLogger("sh")
logger.setLevel(logging.INFO)
try:
logger.addHandler(handler)
python(py.name, "meow", "bark", _log_msg=log_msg)
finally:
logger.removeHandler(handler)
loglines = buf.getvalue().split("\n")
self.assertTrue(loglines, "Log handler captured no messages?")
self.assertTrue(loglines[0].startswith("Hi! I ran something"))
# https://github.com/amoffat/sh/issues/273
def test_stop_iteration_doesnt_block(self):
""" proves that calling calling next() on a stopped iterator doesn't
hang. """
py = create_tmp_test("""
print("cool")
""")
p = python(py.name, _iter=True)
for i in range(100):
try:
next(p)
except StopIteration:
pass
# https://github.com/amoffat/sh/issues/195
def test_threaded_with_contexts(self):
import threading
import time
py = create_tmp_test("""
import sys
a = sys.argv
res = (a[1], a[3])
sys.stdout.write(repr(res))
""")
p1 = python.bake("-u", py.name, 1)
p2 = python.bake("-u", py.name, 2)
results = [None, None]
def f1():
with p1:
time.sleep(1)
results[0] = str(system_python("one"))
def f2():
with p2:
results[1] = str(system_python("two"))
t1 = threading.Thread(target=f1)
t1.start()
t2 = threading.Thread(target=f2)
t2.start()
t1.join()
t2.join()
correct = [
"('1', 'one')",
"('2', 'two')",
]
self.assertEqual(results, correct)
# https://github.com/amoffat/sh/pull/292
def test_eintr(self):
import signal
def handler(num, frame): pass
signal.signal(signal.SIGALRM, handler)
py = create_tmp_test("""
import time
time.sleep(2)
""")
p = python(py.name, _bg=True)
signal.alarm(1)
p.wait()
class StreamBuffererTests(unittest.TestCase):
def test_unbuffered(self):
from sh import StreamBufferer
b = StreamBufferer(0)
self.assertEqual(b.process(b"test"), [b"test"])
self.assertEqual(b.process(b"one"), [b"one"])
self.assertEqual(b.process(b""), [b""])
self.assertEqual(b.flush(), b"")
def test_newline_buffered(self):
from sh import StreamBufferer
b = StreamBufferer(1)
self.assertEqual(b.process(b"testing\none\ntwo"), [b"testing\n", b"one\n"])
self.assertEqual(b.process(b"\nthree\nfour"), [b"two\n", b"three\n"])
self.assertEqual(b.flush(), b"four")
def test_chunk_buffered(self):
from sh import StreamBufferer
b = StreamBufferer(10)
self.assertEqual(b.process(b"testing\none\ntwo"), [b"testing\non"])
self.assertEqual(b.process(b"\nthree\n"), [b"e\ntwo\nthre"])
self.assertEqual(b.flush(), b"e\n")
@requires_posix
class ExecutionContextTests(unittest.TestCase):
def test_basic(self):
import sh
out = StringIO()
_sh = sh(_out=out)
_sh.echo("-n", "TEST")
self.assertEqual("TEST", out.getvalue())
def test_no_interfere1(self):
import sh
out = StringIO()
_sh = sh(_out=out) # noqa: F841
from _sh import echo
echo("-n", "TEST")
self.assertEqual("TEST", out.getvalue())
# Emptying the StringIO
out.seek(0)
out.truncate(0)
sh.echo("-n", "KO")
self.assertEqual("", out.getvalue())
def test_no_interfere2(self):
import sh
out = StringIO()
from sh import echo
_sh = sh(_out=out) # noqa: F841
echo("-n", "TEST")
self.assertEqual("", out.getvalue())
def test_no_bad_name(self):
out = StringIO()
def fn():
import sh
sh = sh(_out=out)
self.assertRaises(RuntimeError, fn)
def test_set_in_parent_function(self):
import sh
out = StringIO()
_sh = sh(_out=out)
def nested1():
_sh.echo("-n", "TEST1")
def nested2():
import sh
sh.echo("-n", "TEST2")
nested1()
nested2()
self.assertEqual("TEST1", out.getvalue())
def test_reimport_no_interfere(self):
import sh
out = StringIO()
_sh = sh(_out=out)
import _sh # this reimport '_sh' from the eponymous local variable
_sh.echo("-n", "TEST")
self.assertEqual("TEST", out.getvalue())
def test_command_with_baked_call_args(self):
# Test that sh.Command() knows about baked call args
import sh
_sh = sh(_ok_code=1)
self.assertEqual(sh.Command._call_args['ok_code'], 0)
self.assertEqual(_sh.Command._call_args['ok_code'], 1)
def test_importer_detects_module_name(self):
import sh
_sh = sh()
omg = _sh # noqa: F841
from omg import cat # noqa: F401
def test_importer_only_works_with_sh(self):
def unallowed_import():
_os = os # noqa: F841
from _os import path # noqa: F401
self.assertRaises(ImportError, unallowed_import)
def test_reimport_from_cli(self):
# The REPL and CLI both need special handling to create an execution context that is safe to
# reimport
if IS_PY3:
cmdstr = '; '.join(('import sh, io, sys',
'out = io.StringIO()',
'_sh = sh(_out=out)',
'import _sh',
'_sh.echo("-n", "TEST")',
'sys.stderr.write(out.getvalue())',
))
else:
cmdstr = '; '.join(('import sh, StringIO, sys',
'out = StringIO.StringIO()',
'_sh = sh(_out=out)',
'import _sh',
'_sh.echo("-n", "TEST")',
'sys.stderr.write(out.getvalue())',
))
err = StringIO()
python('-c', cmdstr, _err=err)
self.assertEqual('TEST', err.getvalue())
if __name__ == "__main__":
root = logging.getLogger()
root.setLevel(logging.DEBUG)
root.addHandler(NullHandler())
test_kwargs = {}
if IS_PY2 and MINOR_VER != 6:
test_kwargs["failfast"] = True
test_kwargs["verbosity"] = 2
try:
# if we're running a specific test, we can let unittest framework figure out
# that test and run it itself. it will also handle setting the return code
# of the process if any tests error or fail
if len(sys.argv) > 1:
unittest.main(**test_kwargs)
# otherwise, it looks like we want to run all the tests
else:
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
test_kwargs["verbosity"] = 2
result = unittest.TextTestRunner(**test_kwargs).run(suite)
if not result.wasSuccessful():
exit(1)
finally:
pass
|
update_repository_manager.py | """
Determine if installed tool shed repositories have updates available in their respective tool sheds.
"""
import logging
import threading
from sqlalchemy import false
from galaxy import util
from galaxy.tool_shed.util.repository_util import get_tool_shed_status_for_installed_repository
from galaxy.tool_shed.util.shed_util_common import clean_dependency_relationships
from galaxy.util.tool_shed.common_util import get_tool_shed_url_from_tool_shed_registry
from galaxy.util.tool_shed.encoding_util import tool_shed_decode
log = logging.getLogger(__name__)
class UpdateRepositoryManager:
def __init__(self, app):
self.app = app
self.context = self.app.install_model.context
# Ideally only one Galaxy server process should be able to check for repository updates.
if self.app.config.enable_tool_shed_check:
self.running = True
self.sleeper = Sleeper()
self.restarter = threading.Thread(target=self.__restarter)
self.restarter.daemon = True
self.app.application_stack.register_postfork_function(self.restarter.start)
self.seconds_to_sleep = int(app.config.hours_between_check * 3600)
def get_update_to_changeset_revision_and_ctx_rev(self, repository):
"""Return the changeset revision hash to which the repository can be updated."""
changeset_revision_dict = {}
tool_shed_url = get_tool_shed_url_from_tool_shed_registry(self.app, str(repository.tool_shed))
params = dict(
name=str(repository.name),
owner=str(repository.owner),
changeset_revision=str(repository.installed_changeset_revision),
)
pathspec = ["repository", "get_changeset_revision_and_ctx_rev"]
try:
encoded_update_dict = util.url_get(
tool_shed_url,
auth=self.app.tool_shed_registry.url_auth(tool_shed_url),
pathspec=pathspec,
params=params,
)
if encoded_update_dict:
update_dict = tool_shed_decode(encoded_update_dict)
includes_data_managers = update_dict.get("includes_data_managers", False)
includes_datatypes = update_dict.get("includes_datatypes", False)
includes_tools = update_dict.get("includes_tools", False)
includes_tools_for_display_in_tool_panel = update_dict.get(
"includes_tools_for_display_in_tool_panel", False
)
includes_tool_dependencies = update_dict.get("includes_tool_dependencies", False)
includes_workflows = update_dict.get("includes_workflows", False)
has_repository_dependencies = update_dict.get("has_repository_dependencies", False)
has_repository_dependencies_only_if_compiling_contained_td = update_dict.get(
"has_repository_dependencies_only_if_compiling_contained_td", False
)
changeset_revision = update_dict.get("changeset_revision", None)
ctx_rev = update_dict.get("ctx_rev", None)
changeset_revision_dict["includes_data_managers"] = includes_data_managers
changeset_revision_dict["includes_datatypes"] = includes_datatypes
changeset_revision_dict["includes_tools"] = includes_tools
changeset_revision_dict[
"includes_tools_for_display_in_tool_panel"
] = includes_tools_for_display_in_tool_panel
changeset_revision_dict["includes_tool_dependencies"] = includes_tool_dependencies
changeset_revision_dict["includes_workflows"] = includes_workflows
changeset_revision_dict["has_repository_dependencies"] = has_repository_dependencies
changeset_revision_dict[
"has_repository_dependencies_only_if_compiling_contained_td"
] = has_repository_dependencies_only_if_compiling_contained_td
changeset_revision_dict["changeset_revision"] = changeset_revision
changeset_revision_dict["ctx_rev"] = ctx_rev
except Exception as e:
log.debug(
f"Error getting change set revision for update from the tool shed for repository '{repository.name}': {str(e)}"
)
changeset_revision_dict["includes_data_managers"] = False
changeset_revision_dict["includes_datatypes"] = False
changeset_revision_dict["includes_tools"] = False
changeset_revision_dict["includes_tools_for_display_in_tool_panel"] = False
changeset_revision_dict["includes_tool_dependencies"] = False
changeset_revision_dict["includes_workflows"] = False
changeset_revision_dict["has_repository_dependencies"] = False
changeset_revision_dict["has_repository_dependencies_only_if_compiling_contained_td"] = False
changeset_revision_dict["changeset_revision"] = None
changeset_revision_dict["ctx_rev"] = None
return changeset_revision_dict
def __restarter(self):
log.info("Update repository manager restarter starting up...")
while self.running:
# Make a call to the Tool Shed for each installed repository to get the latest
# status information in the Tool Shed for the repository. This information includes
# items like newer installable repository revisions, current revision updates, whether
# the repository revision is the latest installable revision, and whether the repository
# has been deprecated in the Tool Shed.
for repository in self.context.query(self.app.install_model.ToolShedRepository).filter(
self.app.install_model.ToolShedRepository.table.c.deleted == false()
):
tool_shed_status_dict = get_tool_shed_status_for_installed_repository(self.app, repository)
if tool_shed_status_dict:
if tool_shed_status_dict != repository.tool_shed_status:
repository.tool_shed_status = tool_shed_status_dict
self.context.flush()
else:
# The received tool_shed_status_dict is an empty dictionary, so coerce to None.
tool_shed_status_dict = None
if tool_shed_status_dict != repository.tool_shed_status:
repository.tool_shed_status = tool_shed_status_dict
self.context.flush()
self.sleeper.sleep(self.seconds_to_sleep)
log.info("Update repository manager restarter shutting down...")
def shutdown(self):
if self.app.config.enable_tool_shed_check:
self.running = False
self.sleeper.wake()
def update_repository_record(self, repository, updated_metadata_dict, updated_changeset_revision, updated_ctx_rev):
"""
Update a tool_shed_repository database record with new information retrieved from the
Tool Shed. This happens when updating an installed repository to a new changeset revision.
"""
repository.metadata_ = updated_metadata_dict
tool_shed_url = get_tool_shed_url_from_tool_shed_registry(self.app, repository.tool_shed)
clean_dependency_relationships(self.app, updated_metadata_dict, repository, tool_shed_url)
# Update the repository.changeset_revision column in the database.
repository.changeset_revision = updated_changeset_revision
repository.ctx_rev = updated_ctx_rev
# Update the repository.tool_shed_status column in the database.
tool_shed_status_dict = get_tool_shed_status_for_installed_repository(self.app, repository)
if tool_shed_status_dict:
repository.tool_shed_status = tool_shed_status_dict
else:
repository.tool_shed_status = None
self.app.install_model.context.add(repository)
self.app.install_model.context.flush()
self.app.install_model.context.refresh(repository)
return repository
class Sleeper:
"""
Provides a 'sleep' method that sleeps for a number of seconds *unless* the notify method
is called (from a different thread).
"""
def __init__(self):
self.condition = threading.Condition()
def sleep(self, seconds):
self.condition.acquire()
self.condition.wait(seconds)
self.condition.release()
def wake(self):
self.condition.acquire()
self.condition.notify()
self.condition.release()
|
0001.py | # -*- coding: utf-8 -*-
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
import time, random, sys, ast, re, os, io, json, subprocess, threading, string, codecs, requests, ctypes, urllib, urllib2, urllib3, wikipedia, tempfile
from bs4 import BeautifulSoup
from urllib import urlopen
from io import StringIO
from threading import Thread
from gtts import gTTS
from googletrans import Translator
cl = LINETCR.LINE()
cl.login(token="EqWUCQVhPZFFZ6GrDBo0.OkriOxgFfKMW2FOdq9cL4a.7xJAO5R+MOIqHomz35m5csPkRoN3ExblHe2/12E3xcY=")
cl.loginResult()
ki1 = LINETCR.LINE()
ki1.login(token="EqWUCQVhPZFFZ6GrDBo0.OkriOxgFfKMW2FOdq9cL4a.7xJAO5R+MOIqHomz35m5csPkRoN3ExblHe2/12E3xcY=")
ki1.loginResult()
#ki2 = LINETCR.LINE()
#ki2.login(token="Eoanj57G8zVmnx1Lbcq0.+ZViS46tqNx/xW3+/xvqWa.7OEC6oc00qyDxg4oyNPkxUPR6cp4NQtisWXcXsisb04=")
#ki2.loginResult()
#ki3 = LINETCR.LINE()
#ki3.login(token="EoTaHjbFSm5Q4uSw9nD8.TDGC/4U6kG8mrZHlHb4Xsa.5GnU4dc+9495v9xtFbXwBQbj8Mkh1Tf9eo/QyyHDPQA=")
#ki3.loginResult()
#ki4 = LINETCR.LINE()
#ki4.login(token="Eov6BKJXHfTv7j3CRHU5.aOeodX8FNJewWK5vf5P9fq.LnPl7aQDvd/bjbou+L2w1lvh/ioHsSUQYDfd/swfrZA=")
#ki4.loginResult()
#ki5 = LINETCR.LINE()
#ki5.login(token="EoD4mpmx79uzg09jbML1.efbujRuyG0q0ahbGcSYguq.DxbEGtvtTXAKEePt4fLUXH8jRjh7VG+5CUM0fAPAWAw=")
#ki5.loginResult()
#ki6 = LINETCR.LINE()
#ki6.login(token="EoxwAg1N3lSmzFfZ0RX3.7f74kMOPgNIBSGT6+sawqW.zNL95ZZiregvvKd9pBWeCNQEvFK8pQVNb3TtBibAGEQ=")
#ki6.loginResult()
#ki7 = LINETCR.LINE()
#ki7.login(token="Eokv7n7uoq4tsmQWREkf.PqUHiCpv2pBtz8Q0TEIqxW.AgPVyVnLoG7UHyzHyQX/ICM4cQapp9zRSY2uGr95du8=")
#ki7.loginResult()
#ki8 = LINETCR.LINE()
#ki8.login(token="EoxWhUuk78IXHPBgYgbe.nMGqEcQDlP6dAl/gilNatG.90ay26S0VfNPti2ZmKGDVlu6FJ3ivCsIUSVDa6kmBEA=")
#ki8.loginResult()
#ki9 = LINETCR.LINE()
#ki9.login(token="EoluRsFVIBC6WfNecGja.XeTFQ55WYwSmXh4n0wOhcG.Zl36VJU8INIcSFmmXsMSXyUZW+gbjmQTgE6LxBQpCa4=")
#ki9.loginResult()
#ki10 = LINETCR.LINE()
#ki10.login(token="EoQW0fWuribDJDJRBTCa.XIBZSHhwVEi2szZJfJwo/G.0Vu2NC0IMsGRgSttKg/vSJz9ngGwx/lGkaqdUqt1GgM=")
#ki10.loginResult()
print "login success"
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage ="""By:™✾แมวเป้✍Գန້さັএπັஞ➢
─┅═✥Clone SelfBot Team✥═┅─
🇹🇭 『คท』= แสดงคอนแทรกเรา
🇹🇭 『ไอดี』= แสดงไอดีเรา
🇹🇭 『เชิญ』= ดึงคนด้วยคท
🇹🇭 『พูด 』= สั่งสิริพูดตามที่พิม
🇹🇭 『มิด』= ดูมิดของเรา
🇹🇭 『ร่าง』= โชว์ร่างคิกเกอร์ของเรา
🇹🇭 『ของขวัญ』= ส่งของขวัญปลอม
🇹🇭 『มิด @』= ดูมิดคนอื่น
🇹🇭 『ขอเพลง 』= ขอเพลงจากยูทูป
🇹🇭 『บุก』= สั่งร่างคิกเกอร์เข้า
🇹🇭 『ออก』= สั่งร่างคิกเกอร์ออก
🇹🇭 『Tl: text』= สร้างชื่อใวรัส
🇹🇭 『Auto join: on/off』= เข้า/ไม่ กลุ่มเอง
🇹🇭 『Auto add: on/off』= รับ/ไม่ เพื่อนเอง
🇹🇭 『ออกแชท: ไม่ออกแชท』= เข้า/ไม่ แชทรวม
🇹🇭 『Clock: on/off』= เปิด/ปิด ชื่อเวลา
🇹🇭 『Up』= อัพเวลา
🇹🇭 『ขอลิ้ง』= ขอลิ้งห้อง
🇹🇭 『กลุ่ม』= เชคกลุ่ม
🇹🇭 『เพื่อนทั้งหมด』= รายชื่อเพื่อนเรา
🇹🇭 『บลอค』= เชคว่าเราบลอคใครมั่ง
🇹🇭 『แทก』= แทกทั้งห้อง
🇹🇭 『มึงตาย』= ลงใวรัส แอนดรอยจะค้าง เด้งออก‼️‼️‼
🇹🇭 『ลบรัน』= ลบห้องรัน
──┅═✥===========✥═┅──
──┅═✥===========✥═┅──
🇨🇦 『ชื่อ 』= แสดงชื่อเรา
🇨🇦 『Gn: text 』= เปลี่ยนชื่อกลุ่ม
🇨🇦 『นน』= เชคคนแอบอ่าน
🇨🇦 『ออ』= เชคคนอ่าน
🇨🇦 『ป้องกันหมด』= เปิดป้องกันทั้งหมด
🇨🇦 『ปิดป้องกันหมด』= ปิดป้องกันทั้งหมด
🇨🇦 『เชคค่า』= ตรวดสอบตั้งค่า
🇨🇦 『Link on/off』= เปิด/ปิดไลค์
🇨🇦 『Spam on/off』= รันแชต
🇨🇦 『เทส』= เชคบอท
🇨🇦 『Myginfo』
🇨🇦 『Gurl』
🇨🇦 『Glist』
🇨🇦 『ยูทูป 』= เปิดยูทูป
🇨🇦 『Phet: Tag』
🇨🇦 『Gcancel:』
🇨🇦 『Masuk Join』
🇨🇦 『Sa:yang』
🇨🇦 『Beb』
🇨🇦 『Cinta』
🇨🇦 『Sayang: 』
🇨🇦 『P:ulang』
🇨🇦 『Ban @』= แบน
🇨🇦 『Uban @』= แก้แบน
🇨🇦 『เชคดำ』= ดูว่าใครติดแบน
🇨🇦 『ล้างดำ』= ลบคนรายชื่อแบน
🇨🇦 『Comment :』
🇨🇦 『Banlist』
🇨🇦 『Cekban』
🇹🇭 『Clear ban』
🇹🇭 『Kill @ Fuck @』= เตะ
🇹🇭 『Speed / Sp』= เชคความใว
🇹🇭 『Hack @2@3@4』= ขโมยรูป
🇹🇭 『Ambilin @』
🇹🇭 『Sampul @』
🇹🇭 『แปลงร่าง @』=ก๊อป
🇹🇭 『กลับ』= กลับร่างเดิม
🇹🇭 『Keluar :@』
🇹🇭 『music』
🇹🇭 『.reboot』
🇹🇭 『Wikipedia』
🇹🇭 『Cleanse』
🇹🇭 『Bs』= เชคความใวคิกเกอร์
🇹🇭 『P1-P36 link on/off』
──┅═✥===========✥═┅──
👿 『Key』
👿 『Qr on/off』
👿 『Backup on/off』
👿 『Protect On/off』
👿 『Namelock On/off』
─┅═✥ᵀᴴᴬᴵᴸᴬᴺᴰ✥═┅─
[By:✾แมวเป้✍Գန້さັএπັஞ➢
──┅═✥============✥═┅──"""
helpMessage2 ="""
╔═════════════════
║ ✟ New function ✟
╠═════════════════
╠➩〘Help protect〙
╠➩〘Help self〙
╠➩〘Help grup〙
╠➩〘Help set〙
╠➩〘Help media〙
╠➩〘Speed〙
╠➩〘Status〙
╚═════════════════
╔═════════════════
║ ✟ New function ✟
╠═════════════════
╠➩〘Protect on/off〙
╠➩〘Qr on/off〙
╠➩〘Invit on/off〙
╠➩〘Cancel on/off〙
╚═════════════════
╔═════════════════
║ ✟ New function ✟
╠═════════════════
╠➩〘Me〙
╠➩〘Myname: 〙
╠➩〘Mybio: 〙
╠➩〘Myname〙
╠➩〘Mybio〙
╠➩〘Mypict〙
╠➩〘Mycover〙
╠➩〘My,copy @〙
╠➩〘Mybackup〙
╠➩〘Getgrup image〙
╠➩〘Getmid @〙
╠➩〘Getprofile @〙
╠➩〘Getcontact @〙
╠➩〘Getinfo @〙
╠➩〘Getname @〙
╠➩〘Getbio @〙
╠➩〘Getpict @〙
╠➩〘Getcover @〙
╠➩〘Mention〙
╠➩〘Lurk on/off〙
╠➩〘Lurkers〙
╠➩〘Mimic on/off〙
╠➩〘Micadd @〙
╠➩〘Micdel @〙
╠═════════════════
║ ✟ New function ✟
╠═════════════════
╠➩〘Contact on/off〙
╠➩〘Autojoin on/off〙
╠➩〘Autoleave on/off〙
╠➩〘Autoadd on/off〙
╠➩〘Like me〙
╠➩〘Like friend〙
╠➩〘Like on〙
╠➩〘Respon on/off〙
╠➩〘Read on/off〙
╠➩〘Simisimi on/off〙
╠═════════════════
║ ✟ New function ✟
╠═════════════════
╠➩〘Link on/off〙
╠➩〘Url〙
╠➩〘Cancel〙
╠➩〘Gcreator〙
╠➩〘Ki'ck @〙
╠➩〘Ulti @〙
╠➩〘Cancel〙
╠➩〘Gname: 〙
╠➩〘Gbroadcast: 〙
╠➩〘Cbroadcast: 〙
╠➩〘Infogrup〙
╠➩〘Gruplist〙
╠➩〘Friendlist〙
╠➩〘Blocklist〙
╠➩〘Ba'n @〙
╠➩〘U'nban @〙
╠➩〘Clearban〙
╠➩〘Banlist〙
╠➩〘Contactban〙
╠➩〘Midban〙
╠═════════════════
║ ✟ New function ✟
╠═════════════════
╠➩〘Kalender〙
╠➩〘tr-id 〙
╠➩〘tr-en 〙
╠➩〘tr-jp 〙
╠➩〘tr-ko 〙
╠➩〘say-id 〙
╠➩〘say-en 〙
╠➩〘say-jp 〙
╠➩〘say-ko 〙
╠➩〘profileig 〙
╠➩〘checkdate 〙
╚═════════════════
"""
helpMessage3 ="""
╔══════════════════════
║ 🇹🇭เปิด/ปิดข้อความต้อนรับ🇹🇭
╠══════════════════════
║🔥 Hhx1 on ➠เปิดข้อความต้อนรับ
║🔥 Hhx1 off ➠ปิดข้อความต้อนรับ
║🔥 Hhx2 on ➠เปิดข้อความออกกลุ่ม
║🔥 Hhx2 off ➠เปิดข้อความออกกลุ่ม
║🔥 Hhx3 on ➠เปิดข้อความคนลบ
║🔥 Hhx3 off ➠เปิดข้อความคนลบ
║🔥 Mbot on ➠เปิดเเจ้งเตือนบอท
║🔥 Mbot off ➠ปิดเเจ้งเตือนบอท
║🔥 M on ➠เปิดเเจ้งเตือนตนเอง
║🔥 M off ➠ปิดเเจ้งเตือนตนเอง
║🔥 Tag on ➠เปิดกล่าวถึงเเท็ค
║🔥 Tag off ➠ปิดกล่าวถึงเเท็ค
║🔥 Kicktag on ➠เปิดเตะคนเเท็ค
║🔥 Kicktag off ➠ปิดเตะคนเเท็ค
╚══════════════════════
╔══════════════════════
║ 🇹🇭โหมดตั้งค่าข้อความ🇹🇭
╠══════════════════════
║🔥 Hhx1˓: ➠ไส่ข้อความต้อนรับ
║🔥 Hhx2˓: ➠ไส่ข้อความออกจากกลุ่ม
║🔥 Hhx3˓: ➠ไส่ข้อความเมื่อมีคนลบ
╚══════════════════════
╔══════════════════════
║ 🇹🇭โหมดเช็คตั้งค่าข้อความ🇹🇭
╠══════════════════════
║🔥 Hhx1 ➠เช็คข้อความต้อนรับ
║🔥 Hhx2 ➠เช็คข้อความคนออก
║🔥 Hhx3 ➠เช็คข้อความคนลบ
╚══════════════════════
"""
helpMessage4 ="""
╔══════════════════════
║ By:✾แมวเป้✍Գန້さັএπັஞ➢
╠══════════════════════
║🇹🇭 เช็คแอด/เชคแอด ➠เช็คแอดมินกลุ่ม
║🇹🇭 ยกเลิก ➠ร่างเรายกเลิกค้างเชิญทั้งหมด
║🇹🇭 ยกเลิก1 ➠คิกเกอร์ยกเลิกค้างเชิญทั้งหมด
║🇹🇭 ข้อมูลเปิด ➠ดูข้อมูลตอนส่งคอนแทค
║🇹🇭 ข้อมูลปิด ➠ปิดดูข้อมูลตอนส่งคอนแทค
║🇹🇭 เตะแม่ง ➠สั่งคิกเกอร์บินห้อง
╚══════════════════════
"""
KAC=[cl,ki1]
mid = cl.getProfile().mid
Amid1 = ki1.getProfile().mid
#Amid2 = ki2.getProfile().mid
#Amid3 = ki3.getProfile().mid
#Amid4 = ki4.getProfile().mid
#Amid5 = ki5.getProfile().mid
#Amid6 = ki6.getProfile().mid
#Amid7 = ki7.getProfile().mid
#Amid8 = ki8.getProfile().mid
#Amid9 = ki9.getProfile().mid
#Amid10 = ki10.getProfile().mid
protectname = []
protecturl = []
protection = []
autocancel = {}
autoinvite = []
autoleaveroom = []
targets = []
mid = cl.getProfile().mid
Bots = ["ua0a448a1719f1649b0d9fa0343d0a5e0",Amid1]
self = ["ua0a448a1719f1649b0d9fa0343d0a5e0",Amid1]
admin = "ua0a448a1719f1649b0d9fa0343d0a5e0"
admsa = "ua0a448a1719f1649b0d9fa0343d0a5e0"
owner = "ua0a448a1719f1649b0d9fa0343d0a5e0"
adminMID = "ua0a448a1719f1649b0d9fa0343d0a5e0"
Creator="ua0a448a1719f1649b0d9fa0343d0a5e0"
wait = {
"alwayRead":False,
"detectMention":True,
"kickMention":False,
"steal":False,
'pap':{},
'invite':{},
"spam":{},
'contact':False,
'autoJoin':True,
'autoCancel':{"on":True, "members":1},
'leaveRoom':True,
'timeline':True,
'autoAdd':False,
'message':"Thanks for add Me By:™ചচ✾ъπ່७✾ざণاعနัю❍ีざနT",
"lang":"JP",
"comment":"AutoLike by Phet",
"commentOn":False,
"acommentOn":False,
"bcommentOn":False,
"ccommentOn":False,
"Protectcancl":False,
"pautoJoin":False,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"cName":"™ചচ✾ъπ່७✾ざণاعနัю❍ีざန",
"likeOn":False,
"pname":False,
"blacklist":{},
"whitelist":{},
"wblacklist":False,
"dblacklist":False,
"qr":False,
"Backup":False,
"protectionOn":False,
"winvite":False,
"ainvite":False,
"binvite":False,
"protect":False,
"cancelprotect":False,
"inviteprotect":False,
"linkprotect":False,
"Hhx1":False,
"Hhx2":False,
"Hhx3":False,
"Notifed":False,
"Notifedbot":False,
"atjointicket":False,
"pnharfbot":{},
"pname":{},
"pro_name":{},
"posts":False,
}
wait2 = {
"readPoint":{},
"readMember":{},
"setTime":{},
"ROM":{}
}
mimic = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
settings = {
"simiSimi":{}
}
res = {
'num':{},
'us':{},
'au':{},
}
setTime = {}
setTime = wait2['setTime']
mulai = time.time()
blacklistFile='blacklist.txt'
pendinglistFile='pendinglist.txt'
contact = cl.getProfile()
mybackup = cl.getProfile()
mybackup.displayName = contact.displayName
mybackup.statusMessage = contact.statusMessage
mybackup.pictureStatus = contact.pictureStatus
#contact = ki1.getProfile()
#backup = ki1.getProfile()
#backup.displayName = contact.displayName
#backup.statusMessage = contact.statusMessage
#backup.pictureStatus = contact.pictureStatus
#contact = ki2.getProfile()
#backup = ki2.getProfile()
#backup.displayName = contact.displayName
#backup.statusMessage = contact.statusMessage
#backup.pictureStatus = contact.pictureStatus
#contact = ki3.getProfile()
#backup = ki3.getProfile()
#backup.displayName = contact.displayName
#backup.statusMessage = contact.statusMessage
#backup.pictureStatus = contact.pictureStatus
#contact = ki4.getProfile()
#backup = ki4.getProfile()
#backup.displayName = contact.displayName
#backup.statusMessage = contact.statusMessage
#backup.pictureStatus = contact.pictureStatus
#contact = ki5.getProfile()
#backup = ki5.getProfile()
#backup.displayName = contact.displayName
#backup.statusMessage = contact.statusMessage
#backup.pictureStatus = contact.pictureStatus
#contact = ki6.getProfile()
#backup = ki6.getProfile()
#backup.displayName = contact.displayName
#backup.statusMessage = contact.statusMessage
#backup.pictureStatus = contact.pictureStatus
#contact = ki7.getProfile()
#backup = ki7.getProfile()
#backup.displayName = contact.displayName
#backup.statusMessage = contact.statusMessage
#backup.pictureStatus = contact.pictureStatus
#contact = ki8.getProfile()
#backup = ki8.getProfile()
#backup.displayName = contact.displayName
#backup.statusMessage = contact.statusMessage
#backup.pictureStatus = contact.pictureStatus
#contact = ki9.getProfile()
#backup = ki9.getProfile()
#backup.displayName = contact.displayName
#backup.statusMessage = contact.statusMessage
#backup.pictureStatus = contact.pictureStatus
#contact = ki10.getProfile()
#backup = ki10.getProfile()
#backup.displayName = contact.displayName
#backup.statusMessage = contact.statusMessage
#backup.pictureStatus = contact.pictureStatus
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def sendImageWithUrl(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download image failure.')
try:
self.sendImage(to_, path)
except Exception as e:
raise e
def yt(query):
with requests.session() as s:
isi = []
if query == "":
query = "S1B tanysyz"
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html5lib')
for a in soup.select('.yt-lockup-title > a[title]'):
if '&list=' not in a['href']:
if 'watch?v' in a['href']:
b = a['href'].replace('watch?v=', '')
isi += ['youtu.be' + b]
return isi
def _images_get_next_item(s):
start_line = s.find('rg_di')
if start_line == -1: #If no links are found then give an error!
end_quote = 0
link = "no_links"
return link, end_quote
else:
start_line = s.find('"class="rg_meta"')
start_content = s.find('"ou"',start_line+90)
end_content = s.find(',"ow"',start_content-90)
content_raw = str(s[start_content+6:end_content-1])
return content_raw, end_content
#Getting all links with the help of '_images_get_next_image'
def _images_get_all_items(page):
items = []
while True:
item, end_content = _images_get_next_item(page)
if item == "no_links":
break
else:
items.append(item) #Append all the links in the list named 'Links'
time.sleep(0.1) #Timer could be used to slow down the request for image downloads
page = page[end_content:]
return items
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
def summon(to, nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print "[Command] Tag All"
try:
cl.sendMessage(msg)
except Exception as error:
print error
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
return '%02d Jam %02d Menit %02d Detik' % (hours, mins, secs)
def cms(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...
tex = ["+","@","/",">",";","^","%","$","^","サテラ:","サテラ:","サテラ:","サテラ:"]
for texX in tex:
for command in commands:
if string ==command:
return True
return False
def sendMessage(self, messageObject):
return self.Talk.client.sendMessage(0,messageObject)
def sendText(self, Tomid, text):
msg = Message()
msg.to = Tomid
msg.text = text
return self.Talk.client.sendMessage(0, msg)
def sendImage(self, to_, path):
M = Message(to=to_, text=None, contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M2 = self._client.sendMessage(0,M)
M_id = M2.id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('https://obs-sg.line-apps.com/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
return True
def sendImage2(self, to_, path):
M = Message(to=to_,contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M_id = self._client.sendMessage(M).id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = cl.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
return True
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def NOTIFIED_READ_MESSAGE(op):
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・" + Name
wait2['ROM'][op.param1][op.param2] = "・" + Name
else:
pass
except:
pass
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
if op.type == 11:
if op.param3 == '1':
if op.param1 in wait['pname']:
try:
G = cl.getGroup(op.param1)
except:
try:
G = ki1.getGroup(op.param1)
except:
try:
G = ki2.getGroup(op.param1)
except:
try:
G = ki3.getGroup(op.param1)
except:
try:
G = ki4.getGroup(op.param1)
except:
try:
G = ki5.getGroup(op.param1)
except:
pass
G.name = wait['pro_name'][op.param1]
try:
cl.updateGroup(G)
except:
try:
ki1.updateGroup(G)
except:
try:
ki2.updateGroup(G)
except:
try:
ki2.updateGroup(G)
except:
try:
ki3.updateGroup(G)
except:
try:
ki4.updateGroup(G)
except:
pass
if op.param2 in ken:
pass
else:
try:
ki1.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ki1.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ki2.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ki3.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ki4.kickoutFromGroup(op.param1,[op.param2])
except:
pass
cl.sendText(op.param1,"Group Name Lock")
ki1.sendText(op.param1,"Haddeuh dikunci Pe'a")
ki2.sendText(op.param1,"Wekawekaweka (Har Har)")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
if op.type == 13:
if op.param3 in mid:
if op.param2 in mid:
G = cl.getGroup(op.param1)
G.preventJoinByTicket = False
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Amid1:
G = ki1.getGroup(op.param1)
G.preventJoinByTicket = False
ki1.updateGroup(X)
Ti = ki1.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki1.updateGroup(X)
Ti = ki1.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Amid2:
X = ki2.getGroup(op.param1)
X.preventJoinByTicket = False
ki2.updateGroup(X)
Ti = ki2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki2.updateGroup(X)
Ti = ki2.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Amid3:
X = ki3.getGroup(op.param1)
X.preventJoinByTicket = False
ki3.updateGroup(X)
Ti = ki3.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki3.updateGroup(X)
Ti = ki3.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Amid4:
G = ki4.getGroup(op.param1)
G.preventJoinByTicket = False
ki4.updateGroup(X)
Ti = ki4.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki4.updateGroup(X)
Ti = ki4.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Amid5:
G = ki5.getGroup(op.param1)
G.preventJoinByTicket = False
ki5.updateGroup(X)
Ti = ki5.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki5.updateGroup(X)
Ti = ki5.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Amid6:
G = ki6.getGroup(op.param1)
G.preventJoinByTicket = False
ki6.updateGroup(X)
Ti = ki6.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki6.updateGroup(X)
Ti = ki6.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Amid7:
G = ki7.getGroup(op.param1)
G.preventJoinByTicket = False
ki7.updateGroup(X)
Ti = ki7.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki7.updateGroup(X)
Ti = ki7.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Amid8:
G = ki8.getGroup(op.param1)
G.preventJoinByTicket = False
ki8.updateGroup(X)
Ti = ki8.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki8.updateGroup(X)
Ti = ki8.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Amid9:
G = ki9.getGroup(op.param1)
G.preventJoinByTicket = False
ki9.updateGroup(X)
Ti = ki9.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki9.updateGroup(X)
Ti = ki9.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Amid10:
G = ki10.getGroup(op.param1)
G.preventJoinByTicket = False
ki10.updateGroup(X)
Ti = ki10.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki10.updateGroup(X)
Ti = ki10.reissueGroupTicket(op.param1)
if op.param3 in Amid1:
if op.param2 in Amid2:
X = ki2.getGroup(op.param1)
X.preventJoinByTicket = False
ki2.updateGroup(X)
Ti = ki1.reissueGroupTicket(op.param1)
ki1.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki2.updateGroup(X)
Ti = ki2.reissueGroupTicket(op.param1)
if op.param3 in Amid2:
if op.param2 in Amid3:
X = ki3.getGroup(op.param1)
X.preventJoinByTicket = False
ki3.updateGroup(X)
Ti = ki2.reissueGroupTicket(op.param1)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki3.updateGroup(X)
Ti = ki3.reissueGroupTicket(op.param1)
if op.param3 in Amid3:
if op.param2 in Amid4:
X = ki4.getGroup(op.param1)
X.preventJoinByTicket = False
ki4.updateGroup(X)
Ti = ki4.reissueGroupTicket(op.param1)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki4.updateGroup(X)
Ti = ki4.reissueGroupTicket(op.param1)
if op.param3 in Amid4:
if op.param2 in Amid5:
X = ki5.getGroup(op.param1)
X.preventJoinByTicket = False
ki5.updateGroup(X)
Ti = ki5.reissueGroupTicket(op.param1)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki5.updateGroup(X)
Ti = ki5.reissueGroupTicket(op.param1)
if op.param3 in Amid5:
if op.param2 in Amid6:
X = ki6.getGroup(op.param1)
X.preventJoinByTicket = False
ki6.updateGroup(X)
Ti = ki6.reissueGroupTicket(op.param1)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki6.updateGroup(X)
Ti = ki6.reissueGroupTicket(op.param1)
if op.param3 in Amid6:
if op.param2 in Amid7:
X = ki7.getGroup(op.param1)
X.preventJoinByTicket = False
ki7.updateGroup(X)
Ti = ki7.reissueGroupTicket(op.param1)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki.updateGroup(X)
Ti = ki7.reissueGroupTicket(op.param1)
if op.param3 in Amid7:
if op.param2 in Amid8:
X = ki8.getGroup(op.param1)
X.preventJoinByTicket = False
ki8.updateGroup(X)
Ti = ki8.reissueGroupTicket(op.param1)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki8.updateGroup(X)
Ti = ki8.reissueGroupTicket(op.param1)
if op.param3 in Amid8:
if op.param2 in Amid9:
X = ki9.getGroup(op.param1)
X.preventJoinByTicket = False
ki9.updateGroup(X)
Ti = ki9.reissueGroupTicket(op.param1)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki9.updateGroup(X)
Ti = ki9.reissueGroupTicket(op.param1)
if op.param3 in Amid9:
if op.param2 in Amid10:
X = ki10.getGroup(op.param1)
X.preventJoinByTicket = False
ki7.updateGroup(X)
Ti = ki10.reissueGroupTicket(op.param1)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki10.updateGroup(X)
Ti = ki10.reissueGroupTicket(op.param1)
if op.param3 in Amid10:
if op.param2 in Amid1:
X = ki.getGroup(op.param1)
X.preventJoinByTicket = False
ki.updateGroup(X)
Ti = ki.reissueGroupTicket(op.param1)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki.updateGroup(X)
Ti = ki.reissueGroupTicket(op.param1)
#===========================================
if op.type == 32:
if not op.param2 in Bots:
if wait["protectionOn"] == True:
try:
klist=[ki1,ki2,ki3,ki4,ki5,ki6,ki7,ki8,ki9,ki10]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1, [op.param3])
except Exception, e:
print e
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
cl.sendText(op.param1, "Your invitation was declined\n\n[SELFBOT PHET HACK BOT]]\n\nhttp://line.me/ti/p/09T2waRE7l")
else:
cl.acceptGroupInvitation(op.param1)
cl.sendText(op.param1, "Your invitation was declined\n\n[SELFBOT PHET HACK BOT]]\n\nhttp://line.me/ti/p/09T2waRE7l")
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if Amid1 in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
ki1.rejectGroupInvitation(op.param1)
else:
ki1.acceptGroupInvitation(op.param1)
else:
ki1.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
ki1.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
ki1.cancelGroupInvitation(op.param1, matched_list)
if Amid2 in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
ki2.rejectGroupInvitation(op.param1)
else:
ki2.acceptGroupInvitation(op.param1)
else:
ki2.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
ki2.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
ki2.cancelGroupInvitation(op.param1, matched_list)
if op.type == 11:
if not op.param2 in Bots:
if wait["qr"] == True:
try:
klist=[ki1,ki2,ki3,ki4,ki5,ki6,ki7,ki8,ki9,ki10]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
G.preventJoinByTicket = True
kicker.updateGroup(G)
except Exception, e:
print e
if op.type == 11:
if not op.param2 in Bots:
if wait["protectionOn"] == True:
try:
klist=[ki1,ki2,ki3,ki4,ki5,ki6,ki7,ki8,ki9,ki10]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
G.preventJoinByTicket = True
kicker.updateGroup(G)
kicker.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = True
kicker.updateGroup(G)
except Exception, e:
print e
if op.type == 13:
G = cl.getGroup(op.param1)
I = G.creator
if not op.param2 in Bots:
if wait["protectionOn"] == True:
klist=[ki1,ki2,ki3,ki4,ki5,ki6,ki7,ki8,ki9,ki10]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
if G is not None:
gInviMids = [contact.mid for contact in G.invitee]
kicker.cancelGroupInvitation(op.param1, gInviMids)
if op.type == 19:
if not op.param2 in Bots:
try:
gs = ki1.getGroup(op.param1)
gs = ki2.getGroup(op.param1)
targets = [op.param2]
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
pass
except Exception, e:
print e
if not op.param2 in Bots:
if wait["Backup"] == True:
try:
random.choice(KAC).inviteIntoGroup(op.param1, [op.param3])
except Exception, e:
print e
if not op.param2 in Bots:
if wait["protectionOn"] == True:
try:
klist=[ki1,ki2,ki3,ki4,ki5,ki6,ki7,ki8,ki9,ki10]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
G.preventJoinByTicket = False
kicker.updateGroup(G)
invsend = 0
Ticket = kicker.reissueGroupTicket(op.param1)
kl1.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.1)
X = kicker.getGroup(op.param1)
X.preventJoinByTicket = True
kl1.kickoutFromGroup(op.param1,[op.param2])
kicker.kickoutFromGroup(op.param1,[op.param2])
kl1.leaveGroup(op.param1)
kicker.updateGroup(X)
except Exception, e:
print e
if not op.param2 in Bots:
try:
gs = ki1.getGroup(op.param1)
gs = ki2.getGroup(op.param1)
targets = [op.param2]
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
pass
except Exception, e:
print e
if not op.param2 in Bots:
if wait["Backup"] == True:
try:
random.choice(KAC).inviteIntoGroup(op.param1, [op.param3])
except Exception, e:
print e
if op.type == 19:
if mid in op.param3:
if op.param2 in Bots:
pass
try:
ki1.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
G = ki1.getGroup(op.param1)
G.preventJoinByTicket = False
ki1.updateGroup(G)
Ti = ki1.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid1 in op.param3:
if op.param2 in Bots:
pass
try:
ki2.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ki2.getGroup(op.param1)
X.preventJoinByTicket = False
ki2.updateGroup(X)
Ti = ki2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki1.getGroup(op.param1)
X.preventJoinByTicket = True
ki1.updateGroup(X)
Ticket = ki1.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid2 in op.param3:
if op.param2 in Bots:
pass
try:
ki3.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ki3.getGroup(op.param1)
X.preventJoinByTicket = False
ki3.updateGroup(X)
Ti = ki3.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki2.getGroup(op.param1)
X.preventJoinByTicket = True
ki2.updateGroup(X)
Ticket = ki2.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid3 in op.param3:
if op.param2 in Bots:
pass
try:
ki4.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ki4.getGroup(op.param1)
X.preventJoinByTicket = False
ki4.updateGroup(X)
Ti = ki4.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki3.getGroup(op.param1)
X.preventJoinByTicket = True
ki3.updateGroup(X)
Ticket = ki3.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid4 in op.param3:
if op.param2 in Bots:
pass
try:
ki5.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ki5.getGroup(op.param1)
X.preventJoinByTicket = False
ki5.updateGroup(X)
Ti = ki5.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki4.getGroup(op.param1)
X.preventJoinByTicket = True
ki4.updateGroup(X)
Ticket = ki4.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid5 in op.param3:
if op.param2 in Bots:
pass
try:
ki6.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ki6.getGroup(op.param1)
X.preventJoinByTicket = False
ki6.updateGroup(X)
Ti = ki6.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki5.getGroup(op.param1)
X.preventJoinByTicket = True
ki5.updateGroup(X)
Ticket = ki5.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid6 in op.param3:
if op.param2 in Bots:
pass
try:
ki7.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ki7.getGroup(op.param1)
X.preventJoinByTicket = False
ki7.updateGroup(X)
Ti = ki7.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki6.getGroup(op.param1)
X.preventJoinByTicket = True
ki6.updateGroup(X)
Ticket = ki6.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid7 in op.param3:
if op.param2 in Bots:
pass
try:
ki8.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ki8.getGroup(op.param1)
X.preventJoinByTicket = False
ki8.updateGroup(X)
Ti = ki8.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki7.getGroup(op.param1)
X.preventJoinByTicket = True
ki7.updateGroup(X)
Ticket = ki7.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid8 in op.param3:
if op.param2 in Bots:
pass
try:
ki9.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ki9.getGroup(op.param1)
X.preventJoinByTicket = False
ki9.updateGroup(X)
Ti = ki9.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki8.getGroup(op.param1)
X.preventJoinByTicket = True
ki8.updateGroup(X)
Ticket = ki8.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid9 in op.param3:
if op.param2 in Bots:
pass
try:
ki10.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ki10.getGroup(op.param1)
X.preventJoinByTicket = False
ki10.updateGroup(X)
Ti = ki10.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki9.getGroup(op.param1)
X.preventJoinByTicket = True
ki9.updateGroup(X)
Ticket = ki9.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid10 in op.param3:
if op.param2 in Bots:
pass
try:
ki1.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ki1.getGroup(op.param1)
X.preventJoinByTicket = False
ki1.updateGroup(X)
Ti = ki1.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki10.getGroup(op.param1)
X.preventJoinByTicket = True
ki10.updateGroup(X)
Ticket = ki10.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if op.type == 13:
if mid in op.param3:
if wait["pautoJoin"] == True:
cl.acceptGroupInvitation(op.param1)
else:
cl.rejectGroupInvitation(op.param1)
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 26:
msg = op.message
if msg.toType == 0:
msg.to = msg.from_
if msg.from_ == mid:
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
cl.acceptGroupInvitationByTicket(list_[1],list_[2])
G = cl.getGroup(list_[1])
G.preventJoinByTicket = True
cl.updateGroup(G)
except:
cl.sendText(msg.to, "error")
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata["postEndUrl"]
cl.like(url[25:58], url[66:], likeType=1001)
#-----------------------------------------------
#if op.type == 17:
# group = cl.getGroup(op.param1)
# cb = Message()
# cb.to = op.param1
# cb.text = cl.getContact(op.param2).displayName +"\n🌟ยินดีต้อนรับเข้าสู่🌟\n👉"+group.name
# cl.sendMessage(cb)
#if op.type == 15:
# group = cl.getGroup(op.param1)
# cb = Message()
# cb.to = op.param1
# cb.text = cl.getContact(op.param2).displayName + "\n😭😭ไปแล้วหรอคิดถึงก็กลับมา\n"+group.name+"ใหม่นะ😢"
# cl.sendMessage(cb)
#------------------------------------------------------------------------------------
if op.type == 26:
msg = op.message
if msg.to in settings["simiSimi"]:
if settings["simiSimi"][msg.to] == True:
if msg.text is not None:
text = msg.text
r = requests.get("http://api.ntcorp.us/chatbot/v1/?text=" + text.replace(" ","+") + "&key=beta1.nt")
data = r.text
data = json.loads(data)
if data['status'] == 200:
if data['result']['result'] == 100:
cl.sendText(msg.to, "[ChatBOT] " + data['result']['response'].encode('utf-8'))
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention"] == True:
contact = cl.getContact(msg.from_)
cName = contact.displayName
balas = [""]
ret_ = "เรียกทำไม เดี๋ยวมา " + random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
cl.sendText(msg.to,ret_)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["kickMention"] == True:
contact = cl.getContact(msg.from_)
cName = contact.displayName
balas = ["Dont Tag Me!! Im Busy",cName + " Ngapain Ngetag?",cName + " Nggak Usah Tag-Tag! Kalo Penting Langsung Pc Aja","-_-","Alin lagi off", cName + " Kenapa Tag saya?","SPAM PC aja " + cName, "Jangan Suka Tag gua " + cName, "Kamu siapa " + cName + "?", "Ada Perlu apa " + cName + "?","Tenggelamkan tuh yang suka tag pake BOT","Tersummon -_-"]
ret_ = "[Auto Respond] " + random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
cl.sendText(msg.to,ret_)
cl.kickoutFromGroup(msg.to,[msg.from_])
break
if msg.contentType == 13:
if wait["steal"] == True:
_name = msg.contentMetadata["displayName"]
copy = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
print "[Target] Stealed"
break
else:
targets.append(copy)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + msg.contentMetadata["mid"] + "\n\nBio :\n" + contact.statusMessage)
cl.sendText(msg.to,"Profile Picture " + contact.displayName)
cl.sendImageWithUrl(msg.to,image)
cl.sendText(msg.to,"Cover " + contact.displayName)
cl.sendImageWithUrl(msg.to,path)
wait["steal"] = False
break
except:
pass
if wait["alwayRead"] == True:
if msg.toType == 0:
cl.sendChatChecked(msg.from_,msg.id)
else:
cl.sendChatChecked(msg.to,msg.id)
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"already")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"decided not to comment")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Done deleted")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"It is not in the black list")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"Done already")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"Done done aded")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Done deleted")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"It is not in the black list")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text in ["Help","คำสั่ง"]:
print "\nHelp pick up..."
if wait["lang"] == "JP":
cl.sendText(msg.to, helpMessage + "")
else:
cl.sendText(msg.to,helpt)
elif msg.text in ["Help2","คำสั่ง2"]:
print "\nHelp pick up..."
if wait["lang"] == "JP":
cl.sendText(msg.to, helpMessage2 + "")
else:
cl.sendText(msg.to,helpt)
elif msg.text in ["Help3","คำสั่ง3"]:
print "\nHelp pick up..."
if wait["lang"] == "JP":
cl.sendText(msg.to, helpMessage3 + "")
else:
cl.sendText(msg.to,helpt)
elif msg.text in ["Help4","คำสั่ง4"]:
print "\nHelp pick up..."
if wait["lang"] == "JP":
cl.sendText(msg.to, helpMessage4 + "")
else:
cl.sendText(msg.to,helpt)
elif ("Gn:" in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gn:","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
elif "Kick:" in msg.text:
midd = msg.text.replace("Kick:"," ")
klist=[ki7,ki6,ki5,ki1,cl]
kicker = random.choice(klist)
kicker.kickoutFromGroup(msg.to,[midd])
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["winvite"] == True:
if msg.from_ == admin:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
cl.sendText(msg.to,"-> " + _name + " was here")
break
elif invite in wait["blacklist"]:
cl.sendText(msg.to,"Sorry, " + _name + " On Blacklist")
cl.sendText(msg.to,"Call my daddy to use command !, \n➡Unban: " + invite)
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
cl.inviteIntoGroup(msg.to,[target])
cl.sendText(msg.to,"Done Invite : \n➡" + _name)
wait["winvite"] = False
break
except:
try:
ki1.findAndAddContactsByMid(invite)
ki1.inviteIntoGroup(op.param1,[invite])
wait["winvite"] = False
except:
cl.sendText(msg.to,"Negative, Error detected")
wait["winvite"] = False
break
if msg.contentType == 13:
if wait['ainvite'] == True:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
ki1.sendText(msg.to, _name + " สมาชิกอยู่ในกลุ่มเเล้ว")
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
ki1.findAndAddContactsByMid(target)
ki1.inviteIntoGroup(msg.to,[target])
ki1.sendText(msg.to,"Invite " + _name)
wait['ainvite'] = False
break
except:
ki1.sendText(msg.to,"Error")
wait['ainvite'] = False
break
if msg.contentType == 13:
if wait['binvite'] == True:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
ki2.sendText(msg.to, _name + " สมาชิกอยู่ในกลุ่มเเล้ว")
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
ki2.findAndAddContactsByMid(target)
ki2.inviteIntoGroup(msg.to,[target])
ki2.sendText(msg.to,"Invite " + _name)
wait['binvite'] = False
break
except:
ki2.sendText(msg.to,"Error")
wait['binvite'] = False
break
elif "Contact" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.to}
cl.sendMessage(msg)
elif msg.text.lower() == 'hack bot':
msg.contentType = 13
msg.contentMetadata = {'mid': Amid1}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid2}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid3}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid4}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid5}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid6}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid7}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid8}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid9}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid10}
cl.sendMessage(msg)
elif msg.text.lower() == 'ร่าง':
msg.contentType = 13
msg.contentMetadata = {'mid': Amid1}
ki1.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid2}
ki2.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid3}
ki3.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid4}
ki4.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid5}
ki5.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid6}
ki6.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid7}
ki7.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid8}
ki8.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid9}
ki9.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid10}
ki10.sendMessage(msg)
elif "คท" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
elif "vdo:" in msg.text.lower():
if msg.toType == 2:
query = msg.text.split(":")
try:
if len(query) == 3:
isi = yt(query[2])
hasil = isi[int(query[1])-1]
cl.sendText(msg.to, hasil)
else:
isi = yt(query[1])
cl.sendText(msg.to, isi[0])
except Exception as e:
cl.sendText(msg.to, str(e))
elif 'ยูทูป ' in msg.text:
try:
textToSearch = (msg.text).replace('ยูทูป ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
cl.sendText(msg.to,'https://www.youtube.com' + results['href'])
except:
cl.sendText(msg.to,"Could not find it")
#========================================
elif "Hack3 @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Hack3 @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
elif "Hack-mid:" in msg.text:
umid = msg.text.replace("Hack-mid:","")
contact = cl.getContact(umid)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithURL(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
elif "Hack2 " in msg.text:
if msg.toType == 2:
msg.contentType = 0
steal0 = msg.text.replace("Hack2 ","")
steal1 = steal0.lstrip()
steal2 = steal1.replace("@","")
steal3 = steal2.rstrip()
_name = steal3
group = cl.getGroup(msg.to)
targets = []
for g in group.members:
if _name == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Gak da orange")
else:
for target in targets:
try:
contact = cl.getContact(target)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithURL(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
except:
cl.sendText(msg.to,"Error!")
break
else:
cl.sendText(msg.to,"Tidak bisa dilakukan di luar grup")
#===============================================
elif msg.text in ["55"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "100",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
ki2.sendMessage(msg)
elif msg.text in ["Lol"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "10",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
ki2.sendMessage(msg)
elif "youname " in msg.text.lower():
txt = msg.text.replace("youname ", "")
cl.kedapkedip(msg.to,txt)
print "[Command] Kedapkedip"
elif "Bl " in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Done Banned")
print "[Command] Bannad"
except:
pass
#----------------------------------------------------------------------------
#------------------------------- UNBAN BY TAG -------------------------------
elif "Wl " in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Done Unbanned")
print "[Command] Unbannad"
except:
pass
# elif msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True:
# text = msg.text
# if text is not None:
# cl.sendText(msg.to,text)
# else:
# if msg.contentType == 7:
# msg.contentType = 7
# msg.text = None
# msg.contentMetadata = {
# "STKID": "6",
# "STKPKGID": "1",
# "STKVER": "100" }
# cl.sendMessage(msg)
# elif msg.contentType == 13:
# msg.contentType = 13
# msg.contentMetadata = {'mid': msg.contentMetadata["mid"]}
# cl.sendMessage(msg)
elif "Mimic:" in msg.text:
if msg.from_ in admin:
cmd = msg.text.replace("Mimic:","")
if cmd == "on":
if mimic["status"] == False:
mimic["status"] = True
cl.sendText(msg.to,"Mimic on\n\nเปิดการเลียนเเบบ")
else:
cl.sendText(msg.to,"Mimic already on\n\nเปิดการเลียนเเบบ")
elif cmd == "off":
if mimic["status"] == True:
mimic["status"] = False
cl.sendText(msg.to,"Mimic off\n\nปิดการเลียนเเบบ")
else:
cl.sendText(msg.to,"Mimic already off\n\nปิดการเลียนเเบบ")
elif "add:" in cmd:
target0 = msg.text.replace("Mimic:add:","")
target1 = target0.lstrip()
target2 = target1.replace("@","")
target3 = target2.rstrip()
_name = target3
gInfo = cl.getGroup(msg.to)
targets = []
for a in gInfo.members:
if _name == a.displayName:
targets.append(a.mid)
if targets == []:
cl.sendText(msg.to,"No targets\n\nเกิดผิดพลาด")
else:
for target in targets:
try:
mimic["target"][target] = True
cl.sendText(msg.to,"Success added target")
cl.sendMessageWithMention(msg.to,target)
break
except:
cl.sendText(msg.to,"โปรเเกรมเลียนเเบบทำงาน")
break
elif "del:" in cmd:
target0 = msg.text.replace("Mimic:del:","")
target1 = target0.lstrip()
target2 = target1.replace("@","")
target3 = target2.rstrip()
_name = target3
gInfo = cl.getGroup(msg.to)
targets = []
for a in gInfo.members:
if _name == a.displayName:
targets.append(a.mid)
if targets == []:
cl.sendText(msg.to,"No targets\n\nเกิดข้อผิดพลาด")
else:
for target in targets:
try:
del mimic["target"][target]
cl.sendText(msg.to,"Success deleted target")
cl.sendMessageWithMention(msg.to,target)
break
except:
cl.sendText(msg.to,"คุณลบการเลียนเเบบผู้ใช้นี้")
break
elif cmd == "list":
if mimic["target"] == {}:
cl.sendText(msg.to,"No target")
else:
lst = "<<List Target>>"
total = len(mimic["target"])
for a in mimic["target"]:
if mimic["target"][a] == True:
stat = "On"
else:
stat = "Off"
lst += "\n-> " + cl.getContact(a).displayName + " | " + stat
cl.sendText(msg.to,lst + "\nTotal: " + total)
#----------------------------------------------------------------------------
elif msg.text.lower() in ["botkill"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
cl.sendText(msg.to,"There was no blacklist user")
return
for jj in matched_list:
ki1.kickoutFromGroup(msg.to,[jj])
pass
elif msg.text.lower() in ["แอดมิน","mee"]:
msg.contentType = 13
adm = 'u0ffe4a5e9e4e06d8f67d5fa50fecf41f'
msg.contentMetadata = {'mid': adm}
cl.sendMessage(msg)
cl.sendText(msg.to,"Add Line https://line.me/ti/p/AUQfKOI4vv")
elif msg.text in ["ของขวัญ","Gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '1'}
msg.text = None
cl.sendMessage(msg)
#VPS STUFF - VPS NEEDED TO RUN THIS COMMAND :)
elif msg.text in ["vps","kernel","Vps"]:
if msg.from_ in admin:
botKernel = subprocess.Popen(["uname","-svmo"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel)
print "[Command]Kernel executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
print "[Error]Command denied - Admin permission required"
elif msg.text.lower() in ["เช็คแอด","เชคแอด"]:
try:
group = cl.getGroup(msg.to)
GS = group.creator.mid
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': GS}
cl.sendMessage(M)
except:
W = group.members[0].mid
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': W}
cl.sendMessage(M)
cl.sendText(msg.to,"old user")
elif 'ขอเพลง ' in msg.text:
try:
textToSearch = (msg.text).replace('ขอเพลง ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
cl.sendText(msg.to,'https://www.youtube.com' + results['href'])
except:
cl.sendText(msg.to,"Could not find it")
elif "#set" in msg.text:
cl.sendText(msg.to, "Let's see who lazy to type")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
wait2['ROM'][msg.to] = {}
print wait2
elif "#read" in msg.text:
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "people who reading%s\n is this\n\n\nDate and time I started it:\n[%s]" % (wait2['readMember'][msg.to],setTime[msg.to]))
else:
cl.sendText(msg.to, "read point not set\nReading point setting you send it it will send an esxisting one")
elif msg.text in ["Myginfoid"]:
gid = cl.getGroupIdsJoined()
g = ""
for i in gid:
g += "[%s]:%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,g)
elif msg.text in ["P1 invite","P1 Invite"]:
wait["ainvite"] = True
ki.sendText(msg.to,"Send Contact")
elif msg.text in ["P2 invite","P2 Invite"]:
wait["binvite"] = True
kk.sendText(msg.to,"Send Contact")
#==================================================
elif "#ประกาศ:" in msg.text:
bctxt = msg.text.replace("#ประกาศ:", "")
a = cl.getGroupIdsJoined()
for manusia in a:
cl.sendText(manusia, (bctxt))
elif msg.text.lower() == 'bann':
blockedlist = cl.getBlockedContactIds()
cl.sendText(msg.to, "Please wait...")
kontak = cl.getContacts(blockedlist)
num=1
msgs="User Blocked List\n"
for ids in kontak:
msgs+="\n%i. %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n\nTotal %i blocked user(s)" % len(kontak)
cl.sendText(msg.to, msgs)
elif "#หำ1:" in msg.text:
string = msg.text.replace("#หำ1:","")
if len(string.decode('utf-8')) <= 20:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
elif msg.text in ["มาหำ","#Kicker","#kicker","Kicker","kicker","•••","โม่"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ticket)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
ki1.sendText(msg.to,"[Clone Selfbot team]")
ki2.sendText(msg.to,"[Do not think will try.]")
ki3.sendText(msg.to,"[Clone Selfbot team]")
ki1.sendText(msg.to,"Hello " + str(ginfo.name) + "\n\n[By:™ചচ✾ъπ່७✾ざণاعနัю❍ีざန]")
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki1.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki1.updateGroup(G)
elif msg.text in ["บุก"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki1.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki1.updateGroup(G)
elif msg.text in ["ออก","บอทออก","Bye","#bye"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki1.sendText(msg.to,"Bye~Bye " + str(ginfo.name) + "\n\n[By:™ചচ✾ъπ່७✾ざণاعနัю❍ีざန]")
ki1.leaveGroup(msg.to)
ki2.sendText(msg.to,"Bye~Bye " + str(ginfo.name) + "\n\n[By:™ചচ✾ъπ່७✾ざণاعနัю❍ีざန]")
ki2.leaveGroup(msg.to)
ki3.sendText(msg.to,"Bye~Bye " + str(ginfo.name) + "\n\n[By:™ചচ✾ъπ່७✾ざণاعနัю❍ีざန]")
ki3.leaveGroup(msg.to)
ki4.sendText(msg.to,"Bye~Bye " + str(ginfo.name) + "\n\n[By:™ചচ✾ъπ່७✾ざণاعနัю❍ีざန]")
ki4.leaveGroup(msg.to)
ki5.sendText(msg.to,"Bye~Bye " + str(ginfo.name) + "\n\n[By:™ചচ✾ъπ່७✾ざণاعနัю❍ีざန]")
ki5.leaveGroup(msg.to)
ki6.sendText(msg.to,"Bye~Bye " + str(ginfo.name) + "\n\n[By:™ചচ✾ъπ່७✾ざণاعနัю❍ีざန]")
ki6.leaveGroup(msg.to)
ki7.sendText(msg.to,"Bye~Bye " + str(ginfo.name) + "\n\n[By:™ചচ✾ъπ່७✾ざণاعနัю❍ีざန]")
ki7.leaveGroup(msg.to)
ki8.sendText(msg.to,"Bye~Bye " + str(ginfo.name) + "\n\n[By:™ചচ✾ъπ່७✾ざণاعနัю❍ีざန]")
ki8.leaveGroup(msg.to)
ki9.sendText(msg.to,"Bye~Bye " + str(ginfo.name) + "\n\n[By:™ചচ✾ъπ່७✾ざণاعနัю❍ีざန]")
ki9.leaveGroup(msg.to)
ki10.sendText(msg.to,"Bye~Bye " + str(ginfo.name) + "\n\n[By:™ചচ✾ъπ່७✾ざণاعနัю❍ีざန]")
ki10.leaveGroup(msg.to)
except:
pass
elif msg.text.lower() == '#byeall':
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki1.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
ki3.leaveGroup(msg.to)
ki4.leaveGroup(msg.to)
ki5.leaveGroup(msg.to)
ki6.leaveGroup(msg.to)
ki7.leaveGroup(msg.to)
ki8.leaveGroup(msg.to)
ki9.leaveGroup(msg.to)
ki10.leaveGroup(msg.to)
except:
pass
elif "#v10" in msg.text:
cl.sendText(msg.to,"""[SELFBOT PHET HACK BOT]\n\n
Phet Tema Hack Bot
คำสั่งบอท siri
คำนี้เป็นการล็อกห้องสั่งแล้วทุกคนจะทำอะไรไม่ได้นอกจากเจ้าของห้องทำได้คนเดียวเช่น•เปิดลิงค์•เชิญเพื่อน•เปลี่ยนรูปกลุ่ม•เปลี่ยนชื่อกลุ่มไรแบบนี้• บอทจะไม่เตะเเอทมินทุกกรณี
มีตั้งเเต่ชุดบอท 12-37 บอท
ชุดล๊อกห้อง
ล๊อกกันรันสติ๊กเกอร์
Set:StampLimitation:on
ล๊อกชื่อกลุ่ม
Set:changenamelock:on
ล๊อกการเชิญของสมาชิก
Set:blockinvite:on
ล๊อกแอทมินกลุ่ม
Set:ownerlock:on
ล๊อกรูปกลุ่ม
Set:iconlock:on
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
set:changeowner
เปลี่ยนเจ้าของห้องสั่งแล้วส่งคอลแทคคนที่จะเป็นเจ้าของห้องคนต่อไปลงในกลุ่ม
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
set:addblacklist
บัญชีดำแบ็คลิสคนไม่ให้เข้ากลุ่มสั่งแล้วส่งคอลแทคคนที่เราจะแบ็คลิสลงในกลุ่ม
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
set:addwhitelist
บัญชีขาวแก้ดำสั่งแล้วส่งคอลแทคคนที่เราจะแก้แบ๊คลิสลงในกลุ่ม
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
Set:blockinvite:off ปลดล็อกการเชิญ
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
Set:blockinvite:on ล็อกการเชิญ
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
Siri:inviteurl เปิดลิงค์
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
Siri:DenyURLInvite ปิดลิงค์
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
Siri:cancelinvite ยกเลิกค้างเชิญสั่ง2ครั้ง
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
Siri:groupcreator เช็คเจ้าของบ้านตัวจริง
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
Siri:extracreator เช็คเจ้าของบ้านคนสำรอง
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
set:changeextraowner
เพิ่มเจ้าของบ้านคนที2หรือเรียกคนสำรองสั่งแล้วส่งคอลแทคคนที่จะเป็นคนสำรองลงในกลุ่ม
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
Set:turncreator
สลับให้เจ้าของบ้านคนที่2เป็นตัวจริง
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
ดูคนอ่าน
สั่งตั้งค่าก่อนแล้วค่อยสั่งอ่านคน
Setlastpoint ตั้งค่า
Viewlastseen สั่งอ่าน
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
สนใจติดต่อที่
http://line.me/ti/p/fcpea251
By:✾แมวเป้✍Գန້さັএπັஞ➢
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
""")
#==================================================
elif msg.text in ["Invite"]:
if msg.from_ in admin:
wait["winvite"] = True
cl.sendText(msg.to,"send contact")
elif msg.text in ["เชิญ"]:
if msg.from_ in admin:
wait["winvite"] = True
cl.sendText(msg.to,"send contact")
elif msg.text in ["Invite off"]:
if msg.from_ in admin:
wait["winvite"] = False
cl.sendText(msg.to,"Done..")
elif msg.text in ["Bot1 invite contact","1เชิญ"]:
if msg.from_ in admin:
wait["ainvite"] = True
ki1.sendText(msg.to,"send contact")
elif msg.text in ["Bot2 invite contact","2เชิญ"]:
if msg.from_ in admin:
wait["binvite"] = True
ki2.sendText(msg.to,"send contact")
elif ("Ktc " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"] [0] ["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
cl.inviteIntoGroup(msg.to,[target])
cl.cancelGroupInvitation(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
elif '123zzz' in msg.text.lower():
key = msg.text[-33:]
cl.findAndAddContactsByMid(key)
cl.inviteIntoGroup(msg.to, [key])
contact = cl.getContact(key)
elif msg.text in ["ยกเลิก3"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
cl.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No one is inviting。")
else:
cl.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["บอทยกเลิก3"]:
if msg.toType == 2:
klist=[ki1,ki2,ki3,ki4,ki5,ki6,ki7]
kicker = random.choice(klist)
G = kicker.getGroup(msg.to)
if G.invitee is not None:
gInviMids = [contact.mid for contact in G.invitee]
kicker.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
kicker.sendText(msg.to,"No one is inviting")
else:
kicker.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
kicker.sendText(msg.to,"Can not be used outside the group")
else:
kicker.sendText(msg.to,"Not for use less than group")
elif msg.text in ["#Link on"]:
if msg.toType == 2:
uye = random.choice(KAC)
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
uye.updateGroup(X)
if wait["lang"] == "JP":
uye.sendText(msg.to,"done")
else:
uye.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
uye.sendText(msg.to,"Can not be used outside the group")
else:
uye.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Link on"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Link off"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = True
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text.lower() == 'ginfo':
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
msg.contentType = 13
msg.contentMetadata = {'mid': ginfo.creator.mid}
cl.sendText(msg.to,"[Nama]\n" + str(ginfo.name) + "\n[Group Id]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\nAnggota:" + str(len(ginfo.members)) + "\nInvitation:" + sinvitee + "")
cl.sendMessage(msg)
elif msg.text in ["!Glist","Myginfo"]:
gs = cl.getGroupIdsJoined()
L = "☫『 Groups List 』☫\n"
for i in gs:
L += "[⭐] %s \n" % (cl.getGroup(i).name + " | [ " + str(len (cl.getGroup(i).members)) + " ]")
cl.sendText(msg.to, L + "\nTotal Group : [ " + str(len(gs)) +" ]")
elif msg.text in ["Selfbot"]:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
cl.sendText(msg.to,"[SELFBOT PHET HACK BOT]")
elif "ไอดี" == msg.text:
key = msg.to
cl.sendText(msg.to, key)
elif ("Hack " in msg.text):
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mi = cl.getContact(key1)
cl.sendText(msg.to,"Mid:" + key1)
elif "Mid:" in msg.text:
mmid = msg.text.replace("Mid:","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
elif "Phet Keyy" in msg.text:
cl.sendText(msg.to,""" [{PHET HACK BOT}] \n\n key Only Kicker \n\n[Kb1 in]\n[1Aditname:]\n[B Cancel]\n[kick @]\n[Ban @]\n[kill]\n[BotChat]\n[Respons]\n[Pb1 Gift]\n[Pb1 bye]\n\n
❦❧〖฿❂Ŧ〗☞ᵀËÄM ທஇລ❂ق B❂T✓
❦❧ ᵀËÄM ℓℓπ้ी૪ B❂T ✓
❦❧ ᵀËÄM ທஇລ❂قB❂T ✓
☠Ҝŋ β☢ȶȶ ƿℓαÿєᴿ☠
✍ Ŧ€₳M ж Ħ₳ʗҜ฿❂Ŧ ✈
Ŧ€₳M ✍ ທஇລ❂قীள้௭ิњ ✈
☢Ŧ€₳M≈ನန้ণএ≈฿❂Ŧ☢
・⋆ ざঝণのঝ ⋆ ・
♤ のю४ণধபӘທ ♤
🇹?? ฿ΘŧŧĽÎη℮Ŧђάίłάήđ 🇹🇭
[By.🐯 हईທຮຮๅજईह 🐯]
[By.β•`BF.บั้ม•`]
[By.Gυ Tєʌм HʌcκBoт]
[By.❦〖Ᵽɧëȶ〗☞ᵀËÄM ທஇລ❂ق B❂T✓]
""")
elif msg.text.lower() == 'ยกเลิก':
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
cl.cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"🇹🇭ทำการยกเลิกค้างเชิญหมดเรียบร้อยแล้ว🇹🇭")
elif msg.text.lower() == 'ยกเลิก1':
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
ki1.cancelGroupInvitation(msg.to,[_mid])
ki1.sendText(msg.to,"🇹🇭ทำการยกเลิกค้างเชิญหมดเรียบร้อยแล้ว🇹🇭")
cl.sendText(msg.to,"🇹🇭ลูกน้องเรายกเลิกให้ทันใจมั๊ย🇹🇭")
elif "Me @" in msg.text:
msg.contentType = 13
_name = msg.text.replace("Me @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
msg.contentMetadata = {'mid': g.mid}
cl.sendMessage(msg)
else:
pass
elif "#cb" in msg.text:
nk0 = msg.text.replace("#cb","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"😏")
pass
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"😏")
except:
cl.sendText(msg.to,"😏")
elif "#Banall" in msg.text:
nk0 = msg.text.replace("#Banall","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Target Locked")
except:
cl.sendText(msg.to,"Error")
elif "#Unbanall" in msg.text:
nk0 = msg.text.replace("#Unbanall","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Target Unlocked")
except:
cl.sendText(msg.to,"Error")
elif msg.text in ["mid","Mid","มิด"]:
cl.sendText(msg.to,mid)
elif msg.text == "กลุ่ม":
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "ไม่พบผู้สร้างกลุ่ม"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
u = "[ปิด]"
else:
u = "[เปิด]"
cl.sendText(msg.to,"[ชื่อของกลุ่ม]:\n" + str(ginfo.name) + "\n[Gid]:\n" + msg.to + "\n[ผู้สร้างกลุ่ม:]\n" + gCreator + "\n[ลิ้งค์รูปกลุ่ม]:\nhttp://dl.profile.line.naver.jp/0hnKqOolu-MWRMNh1YC39OM3BzPwk7GCAsIll6UGxjbgdlDn4zd1d9UWozOgdjVXI3dFArAGoxb1Ay/" + ginfo.pictureStatus + "\n[จำนวนสมาชิก]:" + str(len(ginfo.members)) + "คน\n[จำนวนค้างเชิญ]:" + sinvitee + "คน\n[สถานะลิ้งค์]:" + u + "URL [By: เพชร ทีมทดลองบอท]")
else:
cl.sendText(msg.to,"Nama Gourp:\n" + str(ginfo.name) + "\nGid:\n" + msg.to + "\nCreator:\n" + gCreator + "\nProfile:\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif "Bot1@@" in msg.text:
group = cl.getGroup(msg.to)
k = len(group.members)//100
for j in xrange(k+1):
msg = Message(to=msg.to)
txt = u''
s=0
d=[]
for i in group.members[j*200 : (j+1)*200]:
d.append({"S":str(s), "E" :str(s+8), "M":i.mid})
s += 9
txt += u'@Krampus\n'
msg.text = txt
msg.contentMetadata = {u'MENTION':json.dumps({"MENTIONEES":d})}
ki1.sendMessage(msg)
elif msg.text in ["Bot?","เทส"]:
ki1.sendText(msg.to,"Bot 1 🇹🇭")
ki2.sendText(msg.to,"Bot 2 🇹🇭")
ki3.sendText(msg.to,"Bot 3 🇹🇭")
ki4.sendText(msg.to,"Bot 4 🇹🇭")
ki5.sendText(msg.to,"Bot 5 🇹🇭")
ki6.sendText(msg.to,"Bot 6 🇹🇭")
ki7.sendText(msg.to,"Bot 7 🇹🇭")
ki8.sendText(msg.to,"Bot 8 🇹🇭")
ki9.sendText(msg.to,"Bot 9 🇹🇭")
ki10.sendText(msg.to,"Bot 10 🇹🇭")
#เทส
elif "Phet Say " in msg.text:
bctxt = msg.text.replace("Phet Say ","")
ki1.sendText(msg.to,(bctxt))
ki2.sendText(msg.to,(bctxt))
ki3.sendText(msg.to,(bctxt))
ki4.sendText(msg.to,(bctxt))
ki5.sendText(msg.to,(bctxt))
ki6.sendText(msg.to,(bctxt))
ki7.sendText(msg.to,(bctxt))
ki8.sendText(msg.to,(bctxt))
ki9.sendText(msg.to,(bctxt))
ki10.sendText(msg.to,(bctxt))
elif "All mid" == msg.text:
ki1.sendText(msg.to,Amid1)
ki2.sendText(msg.to,Amid2)
ki3.sendText(msg.to,Amid3)
ki4.sendText(msg.to,Amid4)
ki5.sendText(msg.to,Amid5)
ki6.sendText(msg.to,Amid6)
ki7.sendText(msg.to,Amid7)
ki8.sendText(msg.to,Amid8)
ki9.sendText(msg.to,Amid9)
ki10.sendText(msg.to,Amid10)
elif msg.text in ["Protect:on","Protect on","เปิดป้องกัน"]:
if wait["protectionOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already on\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Qr:off","Qr off"]:
if wait["qr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Qr:on","Qr on"]:
if wait["qr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Protect:off","Protect off","ปิดป้องกัน"]:
if wait["protectionOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif "Namelock:on" in msg.text:
if msg.to in wait['pname']:
cl.sendText(msg.to,"Done..")
else:
cl.sendText(msg.to,"bone..")
wait['pname'][msg.to] = True
wait['pro_name'][msg.to] = cl.getGroup(msg.to).name
elif "Namelock:off" in msg.text:
if msg.to in wait['pname']:
cl.sendText(msg.to,"Done..")
del wait['pname'][msg.to]
else:
cl.sendText(msg.to,"bone..")
elif "Blockinvite:on" == msg.text:
gid = msg.to
autocancel[gid] = "poni"
cl.sendText(msg.to,"Done..")
elif "Blockinvite:off" == msg.text:
try:
del autocancel[msg.to]
cl.sendText(msg.to,"Done..")
except:
pass
elif "Cn: " in msg.text:
string = msg.text.replace("Cn: ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Name " + string + " Done Bosqu")
elif msg.text in ["invite:on"]:
if msg.from_ in admin:
wait["winvite"] = True
cl.sendText(msg.to,"send contact")
elif "Mc " in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
cl.sendText(msg.to,"Mc: " + key1)
elif "Mc: " in msg.text:
mmid = msg.text.replace("Mc: ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
ki1.sendMessage(msg)
ki2.sendMessage(msg)
ki3.sendMessage(msg)
ki4.sendMessage(msg)
ki5.sendMessage(msg)
ki6.sendMessage(msg)
ki7.sendMessage(msg)
ki8.sendMessage(msg)
ki9.sendMessage(msg)
ki10.sendMessage(msg)
elif msg.text in ["K on","Contact:on","Contact on","K:on","ข้อมูลเปิด"]:
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah on Bosqu")
else:
cl.sendText(msg.to,"Ok Bosqu")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"เปิดตรวจสอบข้อมูล")
else:
cl.sendText(msg.to,"Ok Bosqu")
elif msg.text in ["contact v"]:
if msg.from_ in admin:
wait["winvite"] = True
random.choice(KAC).sendText(msg.to,"send contact")
elif msg.text in ["K:off","Contact:off","Contact off","K off","ข้อมูลปิด"]:
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah off Bosqu")
else:
cl.sendText(msg.to,"Ok Bosqu ")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"ปิดตรวจสอบข้อมูล")
else:
cl.sendText(msg.to,"Ok Bosqu")
elif msg.text in ["Auto join on","Join on","Join:on","Auto join:on","Poin on"]:
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah on Bosqu")
else:
cl.sendText(msg.to,"Ok Bosqu")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah on Bosqu")
else:
cl.sendText(msg.to,"Ok Bosqu")
elif msg.text in ["Join off","Auto join off","Auto join:off","Join:off","Poin off"]:
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah off Bosqu")
else:
cl.sendText(msg.to,"Ok Bosqu")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah off Bosqu")
else:
cl.sendText(msg.to,"Ok Bosqu")
elif "Gcancel:" in msg.text:
try:
strnum = msg.text.replace("Gcancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation refused turned off\nTo turn on please specify the number of people and send")
else:
cl.sendText(msg.to,"关了邀请拒绝。要时开请指定人数发送")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + " The group of people and below decided to automatically refuse invitation")
else:
cl.sendText(msg.to,strnum + "使人以下的小组用自动邀请拒绝")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Value is wrong")
else:
cl.sendText(msg.to,"Bizarre ratings")
elif msg.text in ["Leave:on","Auto leave on","Auto leave:on","Leave on","ออกแชท"]:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"要了开。")
elif msg.text in ["Leave:off","Auto leave off","Auto leave:off","Leave off","ไม่ออกแชท"]:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already")
elif msg.text in ["共有:オン","Share on","Share:on"]:
if wait["timeline"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"要了开。")
elif msg.text in ["共有:オフ","Share off","Share:off"]:
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"要了关断。")
elif msg.text in ["Auto like:on","Like on"]:
if wait["likeOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done。")
else:
wait["likeOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already。")
elif msg.text in ["Like off","Auto like:off"]:
if wait["likeOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done。")
else:
wait["likeOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already。")
#========================================
#========================================
elif msg.text in ["เชคค่า"]:
print "Setting pick up..."
md = "By:™✾แมวเป้✍Գန້さັএπັஞ➢\n\n"
if wait["likeOn"] == True: md+=" Auto like : on \n"
else:md+=" Auto like : off \n"
if wait["alwayRead"] == True: md+=" Read : on \n"
else:md+=" Read : off \n"
if wait["detectMention"] == True: md+=" Autorespon : on \n"
else:md+=" Autorespon : off \n"
if wait["kickMention"] == True: md+=" Autokick: on \n"
else:md+=" Autokick : off \n"
if wait["Notifed"] == True: md+=" Notifed : on \n"
else:md+=" Notifed : off \n"
if wait["Notifedbot"] == True: md+=" Notifedbot : on \n"
else:md+=" Notifedbot : off \n"
if wait["acommentOn"] == True: md+=" Hhx1 : on \n"
else:md+=" Hhx1 : off \n"
if wait["bcommentOn"] == True: md+=" Hhx2 : on \n"
else:md+=" Hhx2 : off \n"
if wait["ccommentOn"] == True: md+=" Hhx3 : on \n"
else:md+=" Hhx3 : off \n"
if wait["Protectcancl"] == True: md+=" Cancel : on \n"
else:md+=" Cancel : off \n"
if wait["winvite"] == True: md+=" Invite : on \n"
else:md+=" Invite : off \n"
if wait["pname"] == True: md+=" Namelock : on \n"
else:md+=" Namelock : off \n"
if wait["contact"] == True: md+=" Contact : on \n"
else: md+=" Contact : off \n"
if wait["autoJoin"] == True: md+=" Auto join : on \n"
else: md +=" Auto join : off \n"
if wait["autoCancel"]["on"] == True:md+=" Group cancel :" + str(wait["autoCancel"]["members"]) + " \n"
else: md+= " Group cancel : off \n"
if wait["leaveRoom"] == True: md+=" Auto leave : on \n"
else: md+=" Auto leave : off \n"
if wait["timeline"] == True: md+=" Share : on \n"
else:md+=" Share : off \n"
if wait["clock"] == True: md+=" Clock Name : on \n"
else:md+=" Clock Name : off \n"
if wait["autoAdd"] == True: md+=" Auto add : on \n"
else:md+=" Auto add : off \n"
if wait["commentOn"] == True: md+=" Comment : on \n"
else:md+=" Comment : off \n"
if wait["Backup"] == True: md+=" Backup : on \n"
else:md+=" Backup : off \n"
if wait["qr"] == True: md+=" Protect QR : on \n"
else:md+=" Protect QR : off \n"
cl.sendText(msg.to,md)
msg.contentType = 13
msg.contentMetadata = {'mid': admsa}
cl.sendMessage(msg)
#========================================
#------------------------------------------------
elif msg.text in ["Gcreator:inv","เชิญเเอดมิน"]:
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
gCreator = ginfo.creator.mid
try:
cl.findAndAddContactsByMid(gCreator)
cl.inviteIntoGroup(msg.to,[gCreator])
print "success inv gCreator"
except:
pass
#-----------------------------------------------
elif msg.text in ["Backup:on","Backup on","เปิดการเชิญกลับ"]:
if wait["Backup"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah on Bos\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Backup On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["Backup"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Backup On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Sudah on Bos\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Backup:off","Backup off","ปิดการเชิญกลับ"]:
if wait["Backup"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah off Bos\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Backup Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["Backup"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Backup Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Sudah off Bos\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Reject","ลบรัน"]:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Semua Spam Undangan Telah Di Tolak")
else:
cl.sendText(msg.to,"拒绝了全部的邀请。")
elif msg.text in ["Y1 rgroups","Y1 rgroup"]:
gid = ki.getGroupIdsInvited()
for i in gid:
ki.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki.sendText(msg.to,"Bot All invitations is clean")
else:
ki.sendText(msg.to,"拒绝了全部的邀请。")
elif msg.text in ["Add:on","Auto add on","Auto add:on","Add on"]:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah on Bosqu")
else:
cl.sendText(msg.to,"Ok Bosqu")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ok Bosqu")
else:
cl.sendText(msg.to,"Sudah on Bosqu")
elif msg.text in ["Add:off","Auto add off","Auto add:off","Add off"]:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah off Bosqu")
else:
cl.sendText(msg.to,"Ok Bosqu")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ok Bosqu")
else:
cl.sendText(msg.to,"Sudah off Bosqu")
#========================================
#========================================
elif "Message set:" in msg.text:
wait["message"] = msg.text.replace("Message set:","")
cl.sendText(msg.to,"message changed\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif "Add message: " in msg.text:
wait["message"] = msg.text.replace("Add message: ","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"message changed\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"done。\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Message","Com"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,"message change to\n\n" + wait["message"])
else:
cl.sendText(msg.to,"The automatic appending information is set as follows。\n\n" + wait["message"])
elif "Coms set:" in msg.text:
c = msg.text.replace("Coms set:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"String that can not be changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif "Add comment: " in msg.text:
c = msg.text.replace("Add comment: ","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"String that can not be changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif msg.text in ["Com on","Comment:on"]:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Already on")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Com off","Comment:off"]:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Already off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Already off")
elif msg.text in ["Comment","Coms"]:
cl.sendText(msg.to,"message changed to\n\n" + str(wait["comment"]))
elif msg.text in ["HHX1","Hhx1"]:
cl.sendText(msg.to,"[เช็คข้อความต้อนรับของคุณ]\n\n" + str(wait["acomment"]))
elif msg.text in ["HHX2","Hhx2"]:
cl.sendText(msg.to,"[เช็คข้อความกล่าวถึงคนออกจากกลุ่ม]\n\n" + str(wait["bcomment"]))
elif msg.text in ["HHX3","Hhx3"]:
cl.sendText(msg.to,"[เช็คข้อความกล่าวถึงคนลบสมาชิก]\n\n" + str(wait["ccomment"]))
elif "Hhx1:" in msg.text:
c = msg.text.replace("Hhx1:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"เกิดข้อผิดพลาด..!!")
else:
wait["acomment"] = c
cl.sendText(msg.to,"➠ ตั้งค่าข้อความต้อนรับ🇹🇭👌\n\n" + c)
elif "Hhx2:" in msg.text:
c = msg.text.replace("Hhx2:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"เกิดข้อผิดพลาด..!!")
else:
wait["bcomment"] = c
cl.sendText(msg.to,"➠ ตั้งค่าข้อความกล่าวถึงคนออกจากกลุ่ม🇹🇭👌\n\n" + c)
elif "Hhx3:" in msg.text:
c = msg.text.replace("Hhx3:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"เกิดข้อผิดพลาด..!!")
else:
wait["ccomment"] = c
cl.sendText(msg.to,"➠ ตั้งค่าข้อความกล่าวถึงคนลบสมาชิก🇹🇭👌\n\n" + c)
elif msg.text in ["Hhx1 on"]:
if wait["acommentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ เปิดข้อความต้อนรับเเล้ว🇹🇭👌")
else:
cl.sendText(msg.to,"Already on")
else:
wait["acommentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ เปิดข้อความต้อนรับเเล้ว🇹🇭👌")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Hhx2 on"]:
if wait["bcommentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ เปิดข้อความกล่าวถึงคนออกจากกลุ่ม🇹🇭👌")
else:
cl.sendText(msg.to,"Already on")
else:
wait["bcommentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ เปิดข้อความกล่าวถึงคนออกจากกลุ่ม🇹🇭👌")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Hhx3 on"]:
if wait["ccommentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ เปิดข้อความกล่าวถึงคนลบสมาชิก🇹🇭👌")
else:
cl.sendText(msg.to,"Already on")
else:
wait["ccommentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ เปิดข้อความกล่าวถึงคนลบสมาชิก🇹🇭👌")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Hhx1 off"]:
if wait["acommentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ ปิดข้อความต้อนรับเเล้ว🇹🇭👌")
else:
cl.sendText(msg.to,"Already off")
else:
wait["acommentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ ปิดข้อความต้อนรับเเล้ว🇹🇭👌")
else:
cl.sendText(msg.to,"Already off")
elif msg.text in ["Hhx2 off"]:
if wait["bcommentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ ปิดข้อความกล่าวถึงคนออกจากกลุ่ม🇹🇭👌")
else:
cl.sendText(msg.to,"Already off")
else:
wait["bcommentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ ปิดข้อความกล่าวถึงคนออกจากกลุ่ม🇹🇭👌")
else:
cl.sendText(msg.to,"Already off")
elif msg.text in ["Hhx3 off"]:
if wait["ccommentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ ปิดข้อความกล่าวถึงคนลบสมาชิก🇹🇭👌")
else:
cl.sendText(msg.to,"Already off")
else:
wait["ccommentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ ปิดข้อความกล่าวถึงคนลบสมาชิก🇹🇭👌")
else:
cl.sendText(msg.to,"Already off")
elif msg.text in ["Gurl"]:
if msg.toType == 2:
uye = random.choice(KAC)
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
uye.updateGroup(x)
gurl = uye.reissueGroupTicket(msg.to)
uye.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
uye.sendText(msg.to,"Can not be used outside the group")
else:
uye.sendText(msg.to,"Not for use less than group")
elif "Ambil QR: " in msg.text:
if msg.toType == 2:
gid = msg.text.replace("Ambil QR: ","")
gurl = cl.reissueGroupTicket(gid)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
cl.sendText(msg.to,"Not for use less than group")
elif "Y1 gurl: " in msg.text:
if msg.toType == 2:
gid = msg.text.replace("Y1 gurl: ","")
x = ki.getGroup(gid)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
ki.updateGroup(x)
gurl = ki.reissueGroupTicket(gid)
ki.sendText(msg.to,"line://ti/g/" + gurl)
else:
ki.sendText(msg.to,"Not for use less than group")
elif "Y2 gurl: " in msg.text:
if msg.toType == 2:
gid = msg.text.replace("Y2 gurl: ","")
x = kk.getGroup(gid)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
kk.updateGroup(x)
gurl = kk.reissueGroupTicket(gid)
kk.sendText(msg.to,"line://ti/g/" + gurl)
else:
kk.sendText(msg.to,"Not for use less than group")
#========================================
elif msg.text in ["Comment bl "]:
wait["wblack"] = True
cl.sendText(msg.to,"add to comment bl")
elif msg.text in ["Comment wl "]:
wait["dblack"] = True
cl.sendText(msg.to,"wl to comment bl")
elif msg.text in ["Comment bl confirm"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"confirmed")
else:
cl.sendText(msg.to,"Blacklist s")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "・" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text in ["Clock:on","Clock on","Jam on","Jam:on"]:
if wait["clock"] == True:
cl.sendText(msg.to,"already on")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"༺%H:%M༻")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"done")
elif msg.text in ["Clock:off","Clock off","Jam off","Jam:off"]:
if wait["clock"] == False:
cl.sendText(msg.to,"already off")
else:
wait["clock"] = False
cl.sendText(msg.to,"done")
elif "Cc: " in msg.text:
n = msg.text.replace("Cc: ","")
if len(n.decode("utf-8")) > 13:
cl.sendText(msg.to,"changed")
else:
wait["cName"] = n
cl.sendText(msg.to,"Changed to:\n\n" + n)
elif msg.text in ["Up"]:
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"༺%H:%M༻")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"Refresh to update")
else:
cl.sendText(msg.to,"Please turn on the name clock")
#========================================
elif "Hack3 @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Hack3 @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendImageWithUrl(msg.to, path)
except:
pass
print "[Command]dp executed"
elif "Hack2mid:" in msg.text:
umid = msg.text.replace("Hack2mid:","")
contact = cl.getContact(umid)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithUrl(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
elif "Hack2 " in msg.text:
if msg.toType == 2:
msg.contentType = 0
steal0 = msg.text.replace("Hack2 ","")
steal1 = steal0.lstrip()
steal2 = steal1.replace("@","")
steal3 = steal2.rstrip()
_name = steal3
group = cl.getGroup(msg.to)
targets = []
for g in group.members:
if _name == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Gak da orange")
else:
for target in targets:
try:
contact = cl.getContact(target)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithUrl(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
except:
cl.sendText(msg.to,"Error!")
break
else:
cl.sendText(msg.to,"Tidak bisa dilakukan di luar grup")
#===============================================
elif msg.text in ["Sp","sp","Speed","speed"]:
cl.sendText(msg.to, "ประมวลผลความเร็ว....")
start = time.time()
time.sleep(0.001)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif msg.text in ["Bs","bs","bot speed","Bot speed","Bot Speed"]:
ki1.sendText(msg.to, "ประมวลผลความเร็ว....")
start = time.time()
time.sleep(0.001)
elapsed_time = time.time() - start
ki1.sendText(msg.to, "%sseconds" % (elapsed_time))
ki2.sendText(msg.to, "%sseconds" % (elapsed_time))
ki3.sendText(msg.to, "%sseconds" % (elapsed_time))
ki4.sendText(msg.to, "%sseconds" % (elapsed_time))
ki5.sendText(msg.to, "%sseconds" % (elapsed_time))
ki6.sendText(msg.to, "%sseconds" % (elapsed_time))
ki7.sendText(msg.to, "%sseconds" % (elapsed_time))
ki8.sendText(msg.to, "%sseconds" % (elapsed_time))
ki9.sendText(msg.to, "%sseconds" % (elapsed_time))
ki10.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif msg.text in ["Keybot"]:
ki.sendText(msg.to, "[SELFBOT PHET HACK BOT]\n\n❂͜͡☆➣ Namelock on\n❂͜͡☆➣ Namelock off\n❂͜͡☆➣ Blockinvite on\n❂͜͡☆➣ Blockinvite off\n❂͜͡☆➣ Backup on\n❂͜͡☆➣ Backup off\n\n[By.เพชร ทีมทดลองบอท]")
#========================================
elif msg.text in ["Botbb"]:
try:
ki1.updateDisplayPicture(backup.pictureStatus)
ki1.updateProfile(backup)
ki2.updateDisplayPicture(backup.pictureStatus)
ki2.updateProfile(backup)
ki3.updateDisplayPicture(backup.pictureStatus)
ki3.updateProfile(backup)
ki4.updateDisplayPicture(backup.pictureStatus)
ki4.updateProfile(backup)
ki5.updateDisplayPicture(backup.pictureStatus)
ki5.updateProfile(backup)
ki6.updateDisplayPicture(backup.pictureStatus)
ki6.updateProfile(backup)
ki7.updateDisplayPicture(backup.pictureStatus)
ki7.updateProfile(backup)
ki8.updateDisplayPicture(backup.pictureStatus)
ki8.updateProfile(backup)
ki9.updateDisplayPicture(backup.pictureStatus)
ki9.updateProfile(backup)
ki10.updateDisplayPicture(backup.pictureStatus)
ki10.updateProfile(backup)
cl.sendText(msg.to, "Backup Sukses Bosqu")
except Exception as e:
cl.sendText(msg.to, str (e))
elif msg.text in ["คืน"]:
try:
cl.updateDisplayPicture(mybackup.pictureStatus)
cl.updateProfile(mybackup)
cl.sendText(msg.to, "🇹🇭คืนร่างเดิมแล้ว🇹🇭")
except Exception as e:
cl.sendText(msg.to, str (e))
#=================================================
elif msg.text == "#mid on":
cl.sendText(msg.to, "Done..")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
wait2['ROM'][msg.to] = {}
print wait2
elif msg.text == "#mid off":
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "%s\n\n%s\nReadig point creation:\n [%s]\n" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
else:
cl.sendText(msg.to, "Ketik Lurking dulu dudul Baru bilang result Point.")
#========================================
#-------------------Fungsi spam finish----------------------------
elif "Hackginfo" in msg.text:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
cl.sendImageWithUrl(msg.to,path)
elif "#Turn off bots" in msg.text:
if msg.from_ in admsa:
try:
import sys
sys.exit()
except:
pass
#-----------------------------------------------
elif msg.text in ["Url","ลิ้ง"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"[SELFBO PHET HACK BOT]\n\nline://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Notifed on","เปิดแจ้งเตือน","M on"]:
if msg.from_ in admin:
if wait["Notifed"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Notifed On\n\nเปิดเเจ้งเเตือนของคุณเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nเปิดเเจ้งเเตือนของคุณเเล้ว")
else:
wait["Notifed"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Notifed On\n\nเปิดเเจ้งเเตือนของคุณเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nเปิดเเจ้งเเตือนของคุณเเล้ว")
elif msg.text in ["Notifed off","ปิดแจ้งเตือน","M off"]:
if msg.from_ in admin:
if wait["Notifed"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Notifed Off\n\nปิดเเจ้งเเตือนของคุณเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nปิดเเจ้งเเตือนของคุณเเล้ว")
else:
wait["Notifed"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Notifed Off\n\nปิดเเจ้งเเตือนของคุณเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nปิดเเจ้งเเตือนของคุณเเล้ว")
elif msg.text in ["Notifedbot on","เปิดเเจ้งเตือนบอท","Mbot on"]:
if msg.from_ in admin:
if wait["Notifedbot"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"All bot Notifed On\n\nเปิดเเจ้งเเตือนบอทเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nเปิดเเจ้งเเตือนบอทเเล้ว")
else:
wait["Notifedbot"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"All bot Notifed On\n\nเปิดเเจ้งเเตือนบอทเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nเปิดเเจ้งเเตือนบอทเเล้ว")
elif msg.text in ["Notifedbot off","ปิดแจ้งเตือนบอท","Mbot off"]:
if msg.from_ in admin:
if wait["Notifedbot"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"All bot Notifed Off\n\nปิดเเจ้งเเตือนบอทเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nปิดเเจ้งเเตือนบอทเเล้ว")
else:
wait["Notifedbot"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"All bot Notifed Off\n\nปิดเเจ้งเเตือนบอทเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nปิดเเจ้งเเตือนบอทเเล้ว")
#=================================================
elif "Pea " in msg.text:
if msg.from_ in admin:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Pea "+str(txt[1])+" "+str(jmlh)+ " ","")
tulisan = jmlh * (teks+"\n")
#Keke cantik <3
if txt[1] == "on":
if jmlh <= 10000:
for x in range(jmlh):
cl.sendText(msg.to, teks)
else:
cl.sendText(msg.to, "Out of range! ")
elif txt[1] == "off":
if jmlh <= 10000:
cl.sendText(msg.to, tulisan)
else:
cl.sendText(msg.to, "Out of range! ")
#-----------------------------------------------
elif "มิด @" in msg.text:
_name = msg.text.replace("มิด @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(msg.to, g.mid)
else:
pass
#-------------------------------------------------
elif msg.text in ["เปิดป้องกันหมด","ป้องกันหมด"]:
cl.sendText(msg.to,"Clone SelfBot Team")
cl.sendText(msg.to,"Please wait......")
cl.sendText(msg.to,"Turn on all protection")
cl.sendText(msg.to,"Qr:on")
cl.sendText(msg.to,"Backup:on")
cl.sendText(msg.to,"Read:on")
cl.sendText(msg.to,"Respon:on")
cl.sendText(msg.to,"Responkick:on")
cl.sendText(msg.to,"Protect:on")
cl.sendText(msg.to,"Namelock:on")
cl.sendText(msg.to,"Blockinvite:on")
elif msg.text in ["ปิดป้องกันหมด","ไม่ป้องกันเลย"]:
cl.sendText(msg.to,"Clone SelfBot Team")
cl.sendText(msg.to,"Please wait......")
cl.sendText(msg.to,"Turn off all protection")
cl.sendText(msg.to,"Qr:off")
cl.sendText(msg.to,"Backup:off")
cl.sendText(msg.to,"Read:off")
cl.sendText(msg.to,"Respon:off")
cl.sendText(msg.to,"Responkick:off")
cl.sendText(msg.to,"Protect:off")
cl.sendText(msg.to,"Namelock:off")
cl.sendText(msg.to,"Blockinvite:off")
cl.sendText(msg.to,"Link off")
elif msg.text in ["ทีมงาน","ทีมทดลองบอท"]:
msg.contentType = 13
cl.sendText(msg.to, "[SELFBOT PHET HACK BOT]\n\n[☢Ŧ€₳M≈ನန้ণএ≈฿❂Ŧ☢]\n[By.ทีมงานทีมทดลองบอท]")
cl.sendText(msg.to, "ผู้จัดการทีมงาน:🐯हईທຮຮๅજईह🐯")
msg.contentMetadata = {'mid': 'u820d01252fdcf2a539fa194bcfc3400e'}
cl.sendMessage(msg)
cl.sendText(msg.to, "รองผู้จัดการทีมงาน:β•`BF.บั้ม•`")
msg.contentMetadata = {'mid': 'u49974a7c78af9f3a8fec3e1dc7c646a9'}
cl.sendMessage(msg)
cl.sendText(msg.to, "ประธานใหญ่:เพชร ทีมทดลองบอท")
msg.contentMetadata = {'mid': 'u00f827ce6641038d7c9b6704a9777dfa'}
cl.sendMessage(msg)
cl.sendText(msg.to, "ประธาน:ᴳᴜ ᵀᴇᵃᴍ ᴴa̴ᶜᴋ ᴮᴏᵀ")
msg.contentMetadata = {'mid': 'u6eb517fae5d8de8d1845325e995196a7'}
cl.sendMessage(msg)
cl.sendText(msg.to, "รองประธาน:💫ীန้ສقัπั௭❁💫")
msg.contentMetadata = {'mid': 'u765bec541d4f21cf0afdceb69b4b2ebd'}
cl.sendMessage(msg)
cl.sendText(msg.to, "รปภ.:✍Ŧ€₳M☬ж☬Ħ₳ʗҜ฿❂Ŧ✈๛")
msg.contentMetadata = {'mid': 'u409892727431e6e682114336a3be2784'}
cl.sendMessage(msg)
cl.sendText(msg.to, "ตัวเเทนสมาชิก:🍃🍁NothingEid🍁🍃")
msg.contentMetadata = {'mid': 'ue9e8dbdbfa31491ddc82ed73950b45f0'}
cl.sendMessage(msg)
cl.sendText(msg.to, "ตัวเเทนสมาชิก:Ĵöɱ💎Sтɪcκєʀᴸᶤᶰᵉ")
msg.contentMetadata = {'mid': 'u76be42d134b394580644e1eed2bed029'}
cl.sendMessage(msg)
#========================================
elif msg.text in ["มึงตาย","()"]:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.to+"',"}
cl.sendMessage(msg)
elif 'เตะแม่ง' in msg.text:
if msg.toType == 2:
print "Kickall ok"
_name = msg.text.replace("เตะแม่ง","")
gs = ki1.getGroup(msg.to)
gs = ki2.getGroup(msg.to)
gs = ki3.getGroup(msg.to)
gs = ki4.getGroup(msg.to)
gs = ki5.getGroup(msg.to)
gs = ki6.getGroup(msg.to)
gs = ki7.getGroup(msg.to)
gs = ki8.getGroup(msg.to)
gs = ki9.getGroup(msg.to)
gs = ki10.getGroup(msg.to)
ki1.sendText(msg.to, "Hello all...😁😁 {}")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found.")
# ki.sendText(msg.to,"Not found.")
else:
for target in targets:
if not target in Bots:
try:
klist=[ki1,ki2,ki3,ki4,ki5,ki5,ki6,ki7,ki8,ki9,ki10]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
pass
# ki3.sendText(msg,to,"Nuke Finish")
# ki2.sendText(msg,to,"
elif msg.text.lower() == '#rebootbotall':
if msg.toType == 2:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
cl.sendText(msg.to,"waitting...")
ki1.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
ki3.leaveGroup(msg.to)
ki4.leaveGroup(msg.to)
ki5.leaveGroup(msg.to)
ki6.leaveGroup(msg.to)
ki7.leaveGroup(msg.to)
ki8.leaveGroup(msg.to)
ki9.leaveGroup(msg.to)
ki10.leaveGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ticket)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki1.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki1.updateGroup(G)
elif msg.text.lower() == '#boot#':
if msg.toType == 2:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
cl.sendText(msg.to,"waitting...")
ki1.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
ki3.leaveGroup(msg.to)
ki4.leaveGroup(msg.to)
ki5.leaveGroup(msg.to)
ki6.leaveGroup(msg.to)
ki7.leaveGroup(msg.to)
ki8.leaveGroup(msg.to)
ki9.leaveGroup(msg.to)
ki10.leaveGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ticket)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
ki1.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
ki3.leaveGroup(msg.to)
ki4.leaveGroup(msg.to)
ki5.leaveGroup(msg.to)
ki6.leaveGroup(msg.to)
ki7.leaveGroup(msg.to)
ki8.leaveGroup(msg.to)
ki9.leaveGroup(msg.to)
ki10.leaveGroup(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ticket)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
ki1.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
ki3.leaveGroup(msg.to)
ki4.leaveGroup(msg.to)
ki5.leaveGroup(msg.to)
ki6.leaveGroup(msg.to)
ki7.leaveGroup(msg.to)
ki8.leaveGroup(msg.to)
ki9.leaveGroup(msg.to)
ki10.leaveGroup(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ticket)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
ki1.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
ki3.leaveGroup(msg.to)
ki4.leaveGroup(msg.to)
ki5.leaveGroup(msg.to)
ki6.leaveGroup(msg.to)
ki7.leaveGroup(msg.to)
ki8.leaveGroup(msg.to)
ki9.leaveGroup(msg.to)
ki10.leaveGroup(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ticket)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
ki1.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
ki3.leaveGroup(msg.to)
ki4.leaveGroup(msg.to)
ki5.leaveGroup(msg.to)
ki6.leaveGroup(msg.to)
ki7.leaveGroup(msg.to)
ki8.leaveGroup(msg.to)
ki9.leaveGroup(msg.to)
ki10.leaveGroup(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ticket)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
ki1.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
ki3.leaveGroup(msg.to)
ki4.leaveGroup(msg.to)
ki5.leaveGroup(msg.to)
ki6.leaveGroup(msg.to)
ki7.leaveGroup(msg.to)
ki8.leaveGroup(msg.to)
ki9.leaveGroup(msg.to)
ki10.leaveGroup(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ticket)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki1.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki1.updateGroup(G)
elif msg.text in ["Kill"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
random.choice(KAC).sendText(msg.to,"Fuck You")
return
for jj in matched_list:
try:
klist=[ki1,ki2,ki3,ki4,ki5,ki5,ki6,ki7,ki8,ki9,ki10]
kicker = random.choice(klist)
kicker.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
elif ("PK4 " in msg.text):
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki6.kickoutFromGroup(msg.to,[target])
except:
ki6.sendText(msg.to,"Error")
elif "KK2 " in msg.text:
nk0 = msg.text.replace("KK2 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
gs.preventJoinByTicket = False
cl.updateGroup(gs)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
ki2.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki2.leaveGroup(msg.to)
gs = cl.getGroup(msg.to)
gs.preventJoinByTicket = True
cl.updateGroup(gs)
gs.preventJoinByTicket(gs)
cl.updateGroup(gs)
elif "KK1 " in msg.text:
nk0 = msg.text.replace("KK1 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
gs.preventJoinByTicket = False
cl.updateGroup(gs)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
ki1.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki1.leaveGroup(msg.to)
gs = cl.getGroup(msg.to)
gs.preventJoinByTicket = True
cl.updateGroup(gs)
gs.preventJoinByTicket(gs)
cl.updateGroup(gs)
#-----------------------------------------------------------
elif "contactjoin:" in msg.text:
try:
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
name = "".join([random.choice(source_str) for x in xrange(10)])
amid = msg.text.replace("contactjoin:","")
cl.sendText(msg.to,str(cl.channel.createAlbumF(msg.to,name,amid)))
except Exception as e:
try:
cl.sendText(msg.to,str(e))
except:
pass
elif ("PK2 " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki2.kickoutFromGroup(msg.to,[target])
except:
ki2.sendText(msg.to,"Error")
elif ("PK3 " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki5.kickoutFromGroup(msg.to,[target])
except:
ki5.sendText(msg.to,"Error")
elif "Phet@@" in msg.text:
group = cl.getGroup(msg.to)
k = len(group.members)//100
for j in xrange(k+1):
msg = Message(to=msg.to)
txt = u''
s=0
d=[]
for i in group.members[j*100 : (j+1)*100]:
d.append({"S":str(s), "E" :str(s+8), "M":i.mid})
s += 9
txt += u'@Krampus\n'
msg.text = txt
msg.contentMetadata = {u'MENTION':json.dumps({"MENTIONEES":d})}
cl.sendMessage(msg)
elif "รวม" in msg.text:
group = cl.getGroup(msg.to)
k = len(group.members)//100
for j in xrange(k+1):
msg = Message(to=msg.to)
txt = u''
s=0
d=[]
for i in group.members[j*100 : (j+1)*100]:
d.append({"S":str(s), "E" :str(s+8), "M":i.mid})
s += 9
txt += u'@Krampus\n'
msg.text = txt
msg.contentMetadata = {u'MENTION':json.dumps({"MENTIONEES":d})}
cl.sendMessage(msg)
elif ("เตะ " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
elif "Blacklist @" in msg.text:
_name = msg.text.replace("Blacklist @","")
_kicktarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _kicktarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Success Boss")
except:
cl.sendText(msg.to,"error")
elif "Ban @" in msg.text:
if msg.toType == 2:
print "[BL]ok"
_name = msg.text.replace("Ban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found.")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Success Masuk daftar orang bejat Boss")
except:
cl.sendText(msg.to,"Error")
elif "Unban @" in msg.text:
if msg.toType == 2:
print "[WL]ok"
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found.")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Sudah di keluarkan dari daftar bejat Boss")
except:
cl.sendText(msg.to,"There was no blacklist user")
elif msg.text in ["Clear ban","ล้างดำ"]:
wait["blacklist"] = {}
cl.sendText(msg.to,"clear")
elif msg.text in ["Ban"]:
wait["wblacklist"] = True
cl.sendText(msg.to,"send contact to ban")
elif msg.text in ["Unban"]:
wait["dblacklist"] = True
cl.sendText(msg.to,"send contact to ban")
elif msg.text in ["Banlist","Mcheck"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"Nothing double thumbs up")
else:
cl.sendText(msg.to,"Daftar Banlist")
mc = "[⎈]Blacklist [⎈]\n"
for mi_d in wait["blacklist"]:
mc += "[✗] " + cl.getContact(mi_d).displayName + " \n"
cl.sendText(msg.to, mc + "")
elif msg.text in ["Me ban","Cekban","Mcheck mid"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = "[⎈]Mid Blacklist [⎈]"
for mm in matched_list:
cocoa += "\n" + mm + "\n"
cl.sendText(msg.to,cocoa + "")
#=============================================
elif msg.text in ["Simisimi on","Simisimi:on"]:
settings["simiSimi"][msg.to] = True
cl.sendText(msg.to,"Success activated simisimi")
elif msg.text in ["Simisimi off","Simisimi:off"]:
settings["simiSimi"][msg.to] = False
cl.sendText(msg.to,"Success deactive simisimi")
elif msg.text in ["Read on","Read:on"]:
wait['alwayRead'] = True
cl.sendText(msg.to,"Auto Sider ON")
elif msg.text in ["Read off","Read:off"]:
wait['alwayRead'] = False
cl.sendText(msg.to,"Auto Sider OFF")
elif msg.text in ["Tag on","Autorespon:on","Respon on","Respon:on"]:
wait["detectMention"] = True
cl.sendText(msg.to,"Auto Respon ON")
elif msg.text in ["Tag off","Autorespon:off","Respon off","Respon:off"]:
wait["detectMention"] = False
cl.sendText(msg.to,"Auto Respon OFF")
elif msg.text in ["Kicktag on","Autokick:on","Responkick on","Responkick:on"]:
wait["kickMention"] = True
cl.sendText(msg.to,"Auto Kick ON")
elif msg.text in ["Kicktag off","Autokick:off","Responkick off","Responkick:off"]:
wait["kickMention"] = False
cl.sendText(msg.to,"Auto Kick OFF")
elif msg.text in ["Cancel on","cancel on"]:
if msg.from_ in admin:
if wait["Protectcancl"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Cancel off","cancel off"]:
if msg.from_ in admin:
if wait["Protectcancl"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
#==============================================================================#
#==============================================================================#
elif "Phackmid:" in msg.text:
saya = msg.text.replace("Phackmid:","")
msg.contentType = 13
msg.contentMetadata = {"mid":saya}
cl.sendMessage(msg)
contact = cl.getContact(saya)
cu = cl.channel.getCover(saya)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
cl.sendText(msg.to,"Profile Picture " + contact.displayName)
cl.sendImageWithURL(msg.to,image)
cl.sendText(msg.to,"Cover " + contact.displayName)
cl.sendImageWithUrl(msg.to,path)
except:
pass
elif "#Phackgid:" in msg.text:
saya = msg.text.replace("#Phackgid:","")
gid = cl.getGroupIdsJoined()
for i in gid:
h = cl.getGroup(i).id
group = cl.getGroup(i)
if h == saya:
try:
creator = group.creator.mid
msg.contentType = 13
msg.contentMetadata = {'mid': creator}
md = "Nama Grup :\n" + group.name + "\n\nID Grup :\n" + group.id
if group.preventJoinByTicket is False: md += "\n\nKode Url : Diizinkan"
else: md += "\n\nKode Url : Diblokir"
if group.invitee is None: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : 0 Orang"
else: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : " + str(len(group.invitee)) + " Orang"
cl.sendText(msg.to,md)
cl.sendMessage(msg)
cl.sendImageWithUrl(msg.to,"http://dl.profile.line.naver.jp/"+ group.pictureStatus)
except:
creator = "Error"
elif msg.text in ["Friendlist","เช็คเพื่อนทั้งหมด","เพื่อนทั้งหมด","Fyall"]:
contactlist = cl.getAllContactIds()
kontak = cl.getContacts(contactlist)
num=1
msgs="═════════List Friend═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Friend═════════\n\nTotal Friend : %i" % len(kontak)
cl.sendText(msg.to, msgs)
elif msg.text in ["Memlist","Nameall"]:
kontak = cl.getGroup(msg.to)
group = kontak.members
num=1
msgs="═════════List Member═════════-"
for ids in group:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Member═════════\n\nTotal Members : %i" % len(group)
cl.sendText(msg.to, msgs)
elif "Friendinfo: " in msg.text:
saya = msg.text.replace('Friendinfo: ','')
gid = cl.getAllContactIds()
for i in gid:
h = cl.getContact(i).displayName
contact = cl.getContact(i)
cu = cl.channel.getCover(i)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
if h == saya:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
cl.sendText(msg.to,"Profile Picture " + contact.displayName)
cl.sendImageWithURL(msg.to,image)
cl.sendText(msg.to,"Cover " + contact.displayName)
cl.sendImageWithUrl(msg.to,path)
elif "#Friendpict:" in msg.text:
saya = msg.text.replace('#Friendpict:','')
gid = cl.getAllContactIds()
for i in gid:
h = cl.getContact(i).displayName
gna = cl.getContact(i)
if h == saya:
cl.sendImageWithUrl(msg.to,"http://dl.profile.line.naver.jp/"+ gna.pictureStatus)
elif msg.text in ["Blocklist","บลอค","Pbann"]:
blockedlist = cl.getBlockedContactIds()
kontak = cl.getContacts(blockedlist)
num=1
msgs="═════════List Blocked═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Blocked═════════\n\nTotal Blocked : %i" % len(kontak)
cl.sendText(msg.to, msgs)
elif msg.text in ["#Myginfoall"]:
gruplist = cl.getGroupIdsJoined()
kontak = cl.getGroups(gruplist)
num=1
msgs="═════════List Grup═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.name)
num=(num+1)
msgs+="\n═════════List Grup═════════\n\nTotal Grup : %i" % len(kontak)
cl.sendText(msg.to, msgs)
elif msg.text in ["#Myginfogidall"]:
gruplist = cl.getGroupIdsJoined()
kontak = cl.getGroups(gruplist)
num=1
msgs="═════════List GrupMid═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.id)
num=(num+1)
msgs+="\n═════════List GrupMid═════════\n\nTotal Grup : %i" % len(kontak)
cl.sendText(msg.to, msgs)
elif "1991258ชื่อกลุ่ม" in msg.text:
saya = msg.text.replace('1991258ชื่อกลุ่ม','')
gid = cl.getGroup(msg.to)
cl.sendText(msg.to, "[Nama Grup : ]\n" + gid.name)
elif "Gid" in msg.text:
saya = msg.text.replace('Gid','')
gid = cl.getGroup(msg.to)
cl.sendText(msg.to, "[ID Grup : ]\n" + gid.id)
elif msg.text in ["#Meginfoall"]:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "%s\n" % (cl.getGroup(i).name +" ? ["+str(len(cl.getGroup(i).members))+"]")
cl.sendText(msg.to,"-- List Groups --\n\n"+ h +"\nTotal groups =" +" ["+str(len(gid))+"]")
elif msg.text in ["tag","Tag","แทก"]:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
summon(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, len(nama)-1):
nm2 += [nama[j]]
summon(msg.to, nm2)
if jml > 200 and jml < 500:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, 199):
nm2 += [nama[j]]
summon(msg.to, nm2)
for k in range(200, 299):
nm3 += [nama[k]]
summon(msg.to, nm3)
for l in range(300, 399):
nm4 += [nama[l]]
summon(msg.to, nm4)
for m in range(400, len(nama)-1):
nm5 += [nama[m]]
summon(msg.to, nm5)
if jml > 500:
print "Terlalu Banyak Men 500+"
cnt = Message()
cnt.text = "จำนวนสมาชิกห้องนี้ 👉 :\n" + str(jml) + " คน"
cnt.to = msg.to
cl.sendMessage(cnt)
elif "lurk on" == msg.text.lower():
if msg.to in wait2['readPoint']:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
wait2['ROM'][msg.to] = {}
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
cl.sendText(msg.to,"Lurking already on\nเปิดการอ่านอัตโนมัต")
else:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
wait2['ROM'][msg.to] = {}
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
cl.sendText(msg.to, "เปิดการอ่านอัตโนมัต\nSet reading point:\n" + datetime.now().strftime('%H:%M:%S'))
print wait2
elif "lurk off" == msg.text.lower():
if msg.to not in wait2['readPoint']:
cl.sendText(msg.to,"Lurking already off\nปิดการอ่านอัตโนมัต")
else:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
cl.sendText(msg.to, "ปิดการอ่านอัตโนมัต\nDelete reading point:\n" + datetime.now().strftime('%H:%M:%S'))
elif "lurkers" == msg.text.lower():
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
cl.sendText(msg.to, "Lurkers:\nNone")
else:
chiya = []
for rom in wait2["ROM"][msg.to].items():
chiya.append(rom[1])
cmem = cl.getContacts(chiya)
zx = ""
zxc = ""
zx2 = []
xpesan = 'Lurkers:\n'
for x in range(len(cmem)):
xname = str(cmem[x].displayName)
pesan = ''
pesan2 = pesan+"@a\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':cmem[x].mid}
zx2.append(zx)
zxc += pesan2
msg.contentType = 0
print zxc
msg.text = xpesan+ zxc + "\nLurking time: %s\nCurrent time: %s"%(wait2['setTime'][msg.to],datetime.now().strftime('%H:%M:%S'))
lol ={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}
print lol
msg.contentMetadata = lol
try:
cl.sendMessage(msg)
except Exception as error:
print error
pass
else:
cl.sendText(msg.to, "Lurking has not been set.")
elif msg.text in ["เปิดอ่าน","R on","ตั้งเวลา","นน"]:
cl.sendText(msg.to,"lurk on")
elif msg.text in ["ปิดอ่าน","R off"]:
cl.sendText(msg.to,"lurk off")
elif msg.text in ["อ่าน","Ry","ออ"]:
cl.sendText(msg.to,"lurkers")
elif msg.text in ["Ry20"]:
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"llurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
elif ("Micadd " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
mimic["target"][target] = True
cl.sendText(msg.to,"Target ditambahkan!")
break
except:
cl.sendText(msg.to,"Fail !")
break
elif ("Micdel " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del mimic["target"][target]
cl.sendText(msg.to,"Target dihapuskan!")
break
except:
cl.sendText(msg.to,"Fail !")
break
elif msg.text in ["Miclist","Heckmic"]:
if mimic["target"] == {}:
cl.sendText(msg.to,"nothing")
else:
mc = "Target mimic user\n"
for mi_d in mimic["target"]:
mc += "• "+cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif "Mimic target " in msg.text:
if mimic["copy"] == True:
siapa = msg.text.replace("Mimic target ","")
if siapa.rstrip(' ') == "me":
mimic["copy2"] = "me"
cl.sendText(msg.to,"Mimic change to me")
elif siapa.rstrip(' ') == "target":
mimic["copy2"] = "target"
cl.sendText(msg.to,"Mimic change to target")
else:
cl.sendText(msg.to,"I dont know")
elif "Phetmic " in msg.text:
cmd = msg.text.replace("Phetmic ","")
if cmd == "on":
if mimic["status"] == False:
mimic["status"] = True
cl.sendText(msg.to,"Reply Message on")
else:
cl.sendText(msg.to,"Sudah on")
elif cmd == "off":
if mimic["status"] == True:
mimic["status"] = False
cl.sendText(msg.to,"Reply Message off")
else:
cl.sendText(msg.to,"Sudah off")
elif "Setimage: " in msg.text:
wait["pap"] = msg.text.replace("Setimage: ","")
cl.sendText(msg.to, "Pap telah di Set")
elif msg.text in ["Papimage","Papim","Pap"]:
cl.sendImageWithUrl(msg.to,wait["pap"])
elif "Setvideo: " in msg.text:
wait["pap"] = msg.text.replace("Setvideo: ","")
cl.sendText(msg.to,"Video Has Ben Set To")
elif msg.text in ["Papvideo","Papvid"]:
cl.sendVideoWithUrl(msg.to,wait["pap"])
#==============================================================================#
elif msg.text in ["Sk"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "100",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "10",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "9",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "7",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "6",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "4",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "3",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "110",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "101",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "247",
"STKPKGID": "3",
"STKVER": "100" }
ki1.sendMessage(msg)
elif msg.text.lower() == 'mymid':
cl.sendText(msg.to,mid)
elif "Timeline: " in msg.text:
tl_text = msg.text.replace("Timeline: ","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif "Myname: " in msg.text:
string = msg.text.replace("Myname: ","")
if len(string.decode('utf-8')) <= 10000000000:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Changed " + string + "")
elif "Mybio: " in msg.text:
string = msg.text.replace("Mybio: ","")
if len(string.decode('utf-8')) <= 10000000000:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Changed " + string)
elif msg.text in ["Myname","Mename"]:
h = cl.getContact(mid)
cl.sendText(msg.to,"===[DisplayName]===\n" + h.displayName)
elif msg.text in ["Mybio","Mey1"]:
h = cl.getContact(mid)
cl.sendText(msg.to,"===[StatusMessage]===\n" + h.statusMessage)
elif msg.text in ["Mypict","Mey2"]:
h = cl.getContact(mid)
cl.sendImageWithUrl(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["Myvid","Mey3"]:
h = cl.getContact(mid)
cl.sendVideoWithUrl(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["Urlpict","Mey4"]:
h = cl.getContact(mid)
cl.sendText(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["Mycover","Mey5"]:
h = cl.getContact(mid)
cu = cl.channel.getCover(mid)
path = str(cu)
cl.sendImageWithUrl(msg.to, path)
elif msg.text in ["Urlcover","Mey6"]:
h = cl.getContact(mid)
cu = cl.channel.getCover(mid)
path = str(cu)
cl.sendText(msg.to, path)
elif "Getmid @" in msg.text:
_name = msg.text.replace("Getmid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(msg.to, g.mid)
else:
pass
elif "#22Getinfo" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nHeader :\n" + str(cu))
except:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\n" + str(cu))
elif "Ph4" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
except:
cl.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
elif "Ph2" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
except:
cl.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
elif "mh2" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
cl.sendText(msg.to,"Profile Picture " + contact.displayName)
cl.sendImageWithUrl(msg.to,image)
cl.sendText(msg.to,"Cover " + contact.displayName)
cl.sendImageWithUrl(msg.to,path)
except:
pass
elif "#picall" in msg.text:
nk0 = msg.text.replace("#picall","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"!!..ผิดพลาด")
pass
else:
for target in targets:
try:
contact = cl.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendImageWithUrl(msg.to, path)
except Exception as e:
raise e
elif "#pictall" in msg.text:
nk0 = msg.text.replace("#pictall","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"!!..ผิดพลาด")
pass
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
path = str(cu)
cl.sendImageWithUrl(msg.to, path)
except Exception as e:
raise e
elif "#phethackall" in msg.text:
nk0 = msg.text.replace("#phethackall","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"!!..ผิดพลาด")
pass
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
cl.sendText(msg.to,"Profile Picture " + contact.displayName)
cl.sendImageWithUrl(msg.to,image)
cl.sendText(msg.to,"Cover " + contact.displayName)
cl.sendImageWithUrl(msg.to, path)
except Exception as e:
raise e
elif "Ph3vdo @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Ph3vdo @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendVideoWithUrl(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "Ph3url @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Ph3url @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendText(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "2url @" in msg.text:
print "[Command]cover executing"
_name = msg.text.replace("2url @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendImageWithUrl(msg.to, path)
except Exception as e:
raise e
print "[Command]cover executed"
elif "Ph2url @" in msg.text:
print "[Command]cover executing"
_name = msg.text.replace("Ph2url @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendText(msg.to, path)
except Exception as e:
raise e
print "[Command]cover executed"
elif "แปลงร่าง @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("แปลงร่าง @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
cl.CloneContactProfile(target)
cl.sendText(msg.to, "แปลงร่างสำเร็จ")
except Exception as e:
print e
elif msg.text in ["Mybb"]:
try:
cl.updateDisplayPicture(backup.pictureStatus)
cl.updateProfile(backup)
cl.sendText(msg.to, "Refreshed.")
except Exception as e:
cl.sendText(msg.to, str(e))
elif "Botcopy @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("Botcopy @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki1.CloneContactProfile(target)
ki1.sendText(msg.to, "Copied.")
ki2.CloneContactProfile(target)
ki2.sendText(msg.to, "Copied.")
ki3.CloneContactProfile(target)
ki3.sendText(msg.to, "Copied.")
ki4.CloneContactProfile(target)
ki4.sendText(msg.to, "Copied.")
ki5.CloneContactProfile(target)
ki5.sendText(msg.to, "Copied.")
ki6.CloneContactProfile(target)
ki6.sendText(msg.to, "Copied.")
ki7.CloneContactProfile(target)
ki7.sendText(msg.to, "Copied.")
ki8.CloneContactProfile(target)
ki8.sendText(msg.to, "Copied.")
ki9.CloneContactProfile(target)
ki9.sendText(msg.to, "Copied.")
ki10.CloneContactProfile(target)
ki10.sendText(msg.to, "Copied.")
except Exception as e:
print e
#==============================================================================#
elif "[Auto Respond]" in msg.text:
cl.sendImageWithUrl(msg.to, "http://dl.profile.line.naver.jp/0hlGvN3GXvM2hLNx8goPtMP3dyPQU8GSIgJVUpCTpiPVtiA3M2clJ-C2hia11mUn04cAJ-DWljOVBj")
elif "Fancytext: " in msg.text:
txt = msg.text.replace("Fancytext: ", "")
cl.kedapkedip(msg.to,txt)
print "[Command] Kedapkedip"
elif "Tx: " in msg.text:
txt = msg.text.replace("Tx: ", "")
cl.kedapkedip(msg.to,txt)
print "[Command] Kedapkedip"
elif "Bx: " in msg.text:
txt = msg.text.replace("Bx: ", "")
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
print "[Command] Kedapkedip"
elif "Tx10: " in msg.text:
txt = msg.text.replace("Tx10: ", "")
cl.kedapkedip(msg.to,txt)
cl.kedapkedip(msg.to,txt)
cl.kedapkedip(msg.to,txt)
cl.kedapkedip(msg.to,txt)
cl.kedapkedip(msg.to,txt)
cl.kedapkedip(msg.to,txt)
cl.kedapkedip(msg.to,txt)
cl.kedapkedip(msg.to,txt)
cl.kedapkedip(msg.to,txt)
cl.kedapkedip(msg.to,txt)
cl.kedapkedip(msg.to,txt)
print "[Command] Kedapkedip"
elif "Tr-id " in msg.text:
isi = msg.text.replace("Tr-id ","")
translator = Translator()
hasil = translator.translate(isi, dest='id')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Tr-en " in msg.text:
isi = msg.text.replace("Tr-en ","")
translator = Translator()
hasil = translator.translate(isi, dest='en')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Tr-ar" in msg.text:
isi = msg.text.replace("Tr-ar ","")
translator = Translator()
hasil = translator.translate(isi, dest='ar')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Tr-jp" in msg.text:
isi = msg.text.replace("Tr-jp ","")
translator = Translator()
hasil = translator.translate(isi, dest='ja')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Tr-ko" in msg.text:
isi = msg.text.replace("Tr-ko ","")
translator = Translator()
hasil = translator.translate(isi, dest='ko')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Id@en" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'en'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO ENGLISH----\n" + "" + result + "\n------SUKSES-----")
elif "En@id" in msg.text:
bahasa_awal = 'en'
bahasa_tujuan = 'id'
kata = msg.text.replace("En@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM EN----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----")
elif "Id@jp" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'ja'
kata = msg.text.replace("Id@jp ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO JP----\n" + "" + result + "\n------SUKSES-----")
elif "Jp@id" in msg.text:
bahasa_awal = 'ja'
bahasa_tujuan = 'id'
kata = msg.text.replace("Jp@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM JP----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----")
elif "Id@th" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'th'
kata = msg.text.replace("Id@th ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO TH----\n" + "" + result + "\n------SUKSES-----")
elif "Th@id" in msg.text:
bahasa_awal = 'th'
bahasa_tujuan = 'id'
kata = msg.text.replace("Th@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM TH----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----")
elif "Id@jp" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'ja'
kata = msg.text.replace("Id@jp ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO JP----\n" + "" + result + "\n------SUKSES-----")
elif "Id@ar" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'ar'
kata = msg.text.replace("Id@ar ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO AR----\n" + "" + result + "\n------SUKSES-----")
elif "Ar@id" in msg.text:
bahasa_awal = 'ar'
bahasa_tujuan = 'id'
kata = msg.text.replace("Ar@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM AR----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----")
elif "Id@ko" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'ko'
kata = msg.text.replace("Id@ko ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO KO----\n" + "" + result + "\n------SUKSES-----")
elif "Ko@id" in msg.text:
bahasa_awal = 'ko'
bahasa_tujuan = 'id'
kata = msg.text.replace("Ko@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM KO----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----")
elif msg.text.lower() == 'welcome':
ginfo = cl.getGroup(msg.to)
cl.sendText(msg.to,"Selamat Datang Di Grup " + str(ginfo.name))
jawaban1 = ("ยินดีต้อนรับเข้าสู่กลุ่ม " + str(ginfo.name))
cl.sendText(msg.to,"Owner Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName )
tts = gTTS(text=jawaban1, lang='th')
tts.save('hasil.mp3')
cl.sendAudioWithUrl(msg.to,'hasil.mp3')
elif "Say-id " in msg.text:
say = msg.text.replace("Say-id ","")
lang = 'id'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudioWithUrl(msg.to,"hasil.mp3")
elif "Say-en " in msg.text:
say = msg.text.replace("Say-en ","")
lang = 'en'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudioWithUrl(msg.to,"hasil.mp3")
elif "Say-jp " in msg.text:
say = msg.text.replace("Say-jp ","")
lang = 'ja'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudioWithUrl(msg.to,"hasil.mp3")
elif "Say-ar " in msg.text:
say = msg.text.replace("Say-ar ","")
lang = 'ar'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudioWithUrl(msg.to,"hasil.mp3")
elif "Say-ko " in msg.text:
say = msg.text.replace("Say-ko ","")
lang = 'ko'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudioWithUrl(msg.to,"hasil.mp3")
elif "Kapan " in msg.text:
tanya = msg.text.replace("Kapan ","")
jawab = ("kapan kapan","besok","satu abad lagi","Hari ini","Tahun depan","Minggu depan","Bulan depan","Sebentar lagi")
jawaban = random.choice(jawab)
tts = gTTS(text=jawaban, lang='id')
tts.save('tts.mp3')
cl.sendAudioWithUrl(msg.to,'tts.mp3')
elif "Apakah " in msg.text:
tanya = msg.text.replace("Apakah ","")
jawab = ("Ya","Tidak","Mungkin","Bisa jadi")
jawaban = random.choice(jawab)
tts = gTTS(text=jawaban, lang='id')
tts.save('tts.mp3')
cl.sendAudioWithUrl(msg.to,'tts.mp3')
elif '#dy ' in msg.text:
try:
textToSearch = (msg.text).replace('#dy ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
ght = ('https://www.youtube.com' + results['href'])
cl.sendVideoWithUrl(msg.to, ght)
except:
cl.sendText(msg.to,"Could not find it")
elif 'mp4 ' in msg.text:
try:
textToSearch = (msg.text).replace('mp4 ',"").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
ght = ('https://www.youtube.com' + results['href'])
cl.sendVideoWithUrl(msg.to, ght)
except:
cl.sendText(msg.to, "Could not find it")
elif "Lirik " in msg.text:
try:
songname = msg.text.lower().replace("Lirik ","")
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'Lyric Lagu ('
hasil += song[0]
hasil += ')\n\n'
hasil += song[5]
cl.sendText(msg.to, hasil)
except Exception as wak:
cl.sendText(msg.to, str(wak))
elif "/vk " in msg.text:
try:
wiki = msg.text.lower().replace("/vk ","")
wikipedia.set_lang("th")
pesan="Title ("
pesan+=wikipedia.page(wiki).title
pesan+=")\n\n"
pesan+=wikipedia.summary(wiki, sentences=1)
pesan+="\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except:
try:
pesan="Over Text Limit! Please Click link\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except Exception as e:
cl.sendText(msg.to, str(e))
elif "Music " in msg.text:
try:
songname = msg.text.lower().replace("Music ","")
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'This is Your Music\n'
hasil += 'Judul : ' + song[0]
hasil += '\nDurasi : ' + song[1]
hasil += '\nLink Download : ' + song[4]
cl.sendText(msg.to, hasil)
cl.sendText(msg.to, "Please Wait for audio...")
cl.sendAudioWithUrl(msg.to, song[4])
except Exception as njer:
cl.sendText(msg.to, str(njer))
elif "#Image " in msg.text:
search = msg.text.replace("Image ","")
url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search
raw_html = (download_page(url))
items = []
items = items + (_images_get_all_items(raw_html))
path = random.choice(items)
print path
try:
cl.sendImageWithUrl(msg.to,path)
except:
pass
elif "#ค้นหารูปภาพ:" in msg.text:
search = msg.text.replace("ค้นหารูปภาพ:","")
url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search
raw_html = (download_page(url))
items = []
items = items + (_images_get_all_items(raw_html))
path = random.choice(items)
print path
try:
cl.sendImageWithUrl(msg.to,path)
except:
pass
elif "#Profileig " in msg.text:
try:
instagram = msg.text.replace("#Profileig ","")
response = requests.get("https://www.instagram.com/"+instagram+"?__a=1")
data = response.json()
namaIG = str(data['user']['full_name'])
bioIG = str(data['user']['biography'])
mediaIG = str(data['user']['media']['count'])
verifIG = str(data['user']['is_verified'])
usernameIG = str(data['user']['username'])
followerIG = str(data['user']['followed_by']['count'])
profileIG = data['user']['profile_pic_url_hd']
privateIG = str(data['user']['is_private'])
followIG = str(data['user']['follows']['count'])
link = "Link: " + "https://www.instagram.com/" + instagram
text = "Name : "+namaIG+"\nUsername : "+usernameIG+"\nBiography : "+bioIG+"\nFollower : "+followerIG+"\nFollowing : "+followIG+"\nPost : "+mediaIG+"\nVerified : "+verifIG+"\nPrivate : "+privateIG+"" "\n" + link
cl.sendImageWithUrl(msg.to, profileIG)
cl.sendText(msg.to, str(text))
except Exception as e:
cl.sendText(msg.to, str(e))
elif "Checkdate " in msg.text:
tanggal = msg.text.replace("Checkdate ","")
r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)
data=r.text
data=json.loads(data)
lahir = data["data"]["lahir"]
usia = data["data"]["usia"]
ultah = data["data"]["ultah"]
zodiak = data["data"]["zodiak"]
cl.sendText(msg.to,"============ I N F O R M A S I ============\n"+"Date Of Birth : "+lahir+"\nAge : "+usia+"\nUltah : "+ultah+"\nZodiak : "+zodiak+"\n============ I N F O R M A S I ============")
elif msg.text in ["Kalender","Time","Waktu"]:
timeNow = datetime.now()
timeHours = datetime.strftime(timeNow,"(%H:%M)")
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
inihari = datetime.today()
hr = inihari.strftime('%A')
bln = inihari.strftime('%m')
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): blan = bulan[k-1]
rst = hasil + ", " + inihari.strftime('%d') + " - " + blan + " - " + inihari.strftime('%Y') + "\nJam : [ " + inihari.strftime('%H:%M:%S') + " ]"
cl.sendText(msg.to, rst)
#==============================================================================#
elif msg.text.lower() == 'ifconfig':
botKernel = subprocess.Popen(["ifconfig"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO NetStat===")
elif msg.text.lower() == 'system':
botKernel = subprocess.Popen(["df","-h"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO SYSTEM===")
elif msg.text.lower() == 'kernel':
botKernel = subprocess.Popen(["uname","-srvmpio"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO KERNEL===")
elif msg.text.lower() == 'cpu':
botKernel = subprocess.Popen(["cat","/proc/cpuinfo"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO CPU===")
elif msg.text in ["Pmcheck","เชคดำ","เช็คดำ"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"Tidak Ada Blacklist")
else:
cl.sendText(msg.to,"Daftar Banlist")
num=1
msgs="══════════List Blacklist═════════"
for mi_d in wait["blacklist"]:
msgs+="\n[%i] %s" % (num, cl.getContact(mi_d).displayName)
num=(num+1)
msgs+="\n══════════List Blacklist═════════\n\nTotal Blacklist : %i" % len(wait["blacklist"])
cl.sendText(msg.to, msgs)
elif msg.text in ["Mcheckcontact","Cb"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"Tidak Ada Blacklist")
else:
cl.sendText(msg.to,"Daftar Blacklist")
h = ""
for i in wait["blacklist"]:
h = cl.getContact(i)
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': i}
cl.sendMessage(M)
elif msg.text in ["Midban","Mid ban"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
num=1
cocoa = "══════════List Blacklist═════════"
for mm in matched_list:
cocoa+="\n[%i] %s" % (num, mm)
num=(num+1)
cocoa+="\n═════════List Blacklist═════════\n\nTotal Blacklist : %i" % len(matched_list)
cl.sendText(msg.to,cocoa)
elif msg.text.lower() == '1kill':
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
ki1.sendText(msg.to,"Tidak ada Daftar Blacklist")
return
for jj in matched_list:
try:
ki1.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
#==============================================#
elif msg.text in ["in on"]:
if msg.from_ in admin:
if wait["pautoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["pautoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["in off"]:
if msg.from_ in admin:
if wait["pautoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["pautoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
elif "Hack4" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to,"[name]\n" + contact.displayName + "\n[mid]\n" + contact.mid + "\n[statusmessage]\n" + contact.statusMessage + "\n[profilePicture]\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[homePicture]\n" + str(cu))
except:
cl.sendText(msg.to,"[name]\n" + contact.displayName + "\n[mid]\n" + contact.mid + "\n[statusmessage]\n" + contact.statusMessage + "\n[homePicture]\n" + str(cu))
#=============================================
elif msg.text in ["!Sp"]:
start = time.time()
cl.sendText(msg.to, "Waiting...")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sTamii Server" % (elapsed_time))
# ----------------- BAN MEMBER BY TAG 2TAG ATAU 10TAG MEMBER
elif ("Bl " in msg.text):
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Succes Banned Bos")
except:
pass
#-------------Fungsi Respon Start---------------------#
elif msg.text in ["#Cinvite"]:
if msg.from_ in admin:
wait["winvite"] = True
cl.sendText(msg.to,"send contact 😉")
elif "Gift @" in msg.text:
_name = msg.text.replace("Gift @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
msg.contentType = 2
msg.contentMetadata={'PRDID': '89131c1a-e549-4bd5-9e60-e24de0d2e252',
'PRDTYPE': 'THEME',
'MSGTPL': '10'}
msg.text = None
cl.sendMessage(msg,g)
cl.sendText(msg.to, "Done...")
elif msg.text in ["Mchecky"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"nothing")
else:
cl.sendText(msg.to,"Blacklist user\nมีบัญชีดำของคุณอยู่กลุ่มนี้")
xname = ""
for mi_d in wait["blacklist"]:
xname = cl.getContact(mi_d).displayName + ""
xlen = str(len(xname)+1)
msg.contentType = 0
msg.text = "@"+xname+" "
msg.contentMetadata ={'MENTION':'{"MENTIONEES":[{"S":"0","E":'+json.dumps(xlen)+',"M":'+json.dumps(mm)+'}]}','EMTVER':'4'}
try:
cl.sendMessage(msg)
except Exception as error:
print error
elif "มอง" in msg.text:
group = cl.getGroup(msg.to)
k = len(group.members)//100
for j in xrange(k+1):
msg = Message(to=msg.to)
txt = u''
s=0
d=[]
for i in group.members[j*100 : (j+1)*100]:
d.append({"S":str(s), "E" :str(s+8), "M":i.mid})
s += 9
txt += "@Krampus\n"
msg.text = txt
msg.contentMetadata = {u'MENTION':json.dumps({"MENTIONEES":d})}
cl.sendMessage(msg)
elif msg.text in ["Name me","Men","ชื่อ"]:
G = cl.getProfile()
X = G.displayName
cl.sendText(msg.to,X)
elif "siri " in msg.text.lower():
query = msg.text.lower().replace("siri ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'https://google-translate-proxy.herokuapp.com/api/tts'
params = {'language': 'th', 'speed': '1', 'query': query}
r = s.get(url, params=params)
mp3 = r.url
cl.sendAudioWithUrl(msg.to, mp3)
elif "siri:" in msg.text.lower():
query = msg.text.lower().replace("siri:","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'https://google-translate-proxy.herokuapp.com/api/tts'
params = {'language': 'th', 'speed': '1', 'query': query}
r = s.get(url, params=params)
mp3 = r.url
cl.sendAudioWithUrl(msg.to, mp3)
elif "siri-en " in msg.text.lower():
query = msg.text.lower().replace("siri-en ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'https://google-translate-proxy.herokuapp.com/api/tts'
params = {'language': 'en', 'speed': '1', 'query': query}
r = s.get(url, params=params)
mp3 = r.url
cl.sendAudioWithUrl(msg.to, mp3)
elif "พูด " in msg.text.lower():
query = msg.text.lower().replace("พูด ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'https://google-translate-proxy.herokuapp.com/api/tts'
params = {'language': 'th', 'speed': '1', 'query': query}
r = s.get(url, params=params)
mp3 = r.url
cl.sendAudioWithUrl(msg.to, mp3)
elif msg.text in ["1in","Bot1 in"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki1.updateGroup(G)
print "kickers_Ok"
G.preventJoinByTicket(G)
ki1.updateGroup(G)
elif msg.text in ["2in","Bot2 in"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki2.updateGroup(G)
print "kickers_Ok"
G.preventJoinByTicket(G)
ki2.updateGroup(G)
elif msg.text in ["3in","Bot3 in"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki3.updateGroup(G)
print "kickers_Ok"
G.preventJoinByTicket(G)
ki3.updateGroup(G)
elif msg.text in ["4in","Bot4 in"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki4.updateGroup(G)
print "kickers_Ok"
G.preventJoinByTicket(G)
ki4.updateGroup(G)
elif msg.text in ["5in","Bot5 in"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki5.updateGroup(G)
print "kickers_Ok"
G.preventJoinByTicket(G)
ki5.updateGroup(G)
elif msg.text in ["6in","Bot6 in"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki6.updateGroup(G)
print "kickers_Ok"
G.preventJoinByTicket(G)
ki6.updateGroup(G)
elif msg.text in ["7in","Bot7 in"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki7.updateGroup(G)
print "kickers_Ok"
G.preventJoinByTicket(G)
ki7.updateGroup(G)
elif msg.text in ["8in","Bot8 in"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki8.updateGroup(G)
print "kickers_Ok"
G.preventJoinByTicket(G)
ki8.updateGroup(G)
elif msg.text in ["9in","Bot9 in"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki9.updateGroup(G)
print "kickers_Ok"
G.preventJoinByTicket(G)
ki9.updateGroup(G)
elif msg.text in ["10in","Bot10 in"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki10.updateGroup(G)
print "kickers_Ok"
G.preventJoinByTicket(G)
ki10.updateGroup(G)
elif '/w ' in msg.text.lower():
try:
wiki = msg.text.lower().replace("/w ","")
wikipedia.set_lang("th")
pesan="Wikipedia : "
pesan+=wikipedia.page(wiki).title
pesan+="\n\n"
pesan+=wikipedia.summary(wiki, sentences=1)
pesan+="\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except:
try:
pesan="Text Terlalu Panjang Silahkan Click link di bawah ini\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except Exception as e:
cl.sendText(msg.to, str(e))
elif "/go " in msg.text:
tanggal = msg.text.replace("/go ","")
r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)
data=r.text
data=json.loads(data)
lahir = data["data"]["lahir"]
usia = data["data"]["usia"]
ultah = data["data"]["ultah"]
zodiak = data["data"]["zodiak"]
cl.sendText(msg.to,"Tanggal Lahir : "+lahir+"\n\nUmur : "+usia+"\n\nUltah : "+ultah+"\n\nZodiak : "+zodiak)
elif "declined" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
cl.leaveGroup(msg.to)
except:
pass
elif "[Auto] " in msg.text:
msg.contentType = 13
_name = msg.text.replace("[Auto] ","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
msg.contentMetadata = {'mid': g.mid}
cl.sendMessage(msg)
else:
pass
elif "☜ʕ•ﻌ•ʔ " in msg.text:
msg.contentType = 13
_name = msg.text.replace("☜ʕ•ﻌ•ʔ ","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
msg.contentMetadata = {'mid': g.mid}
cl.sendMessage(msg)
else:
pass
if op.type == 25:
msg = op.message
if msg.text.lower() in ["pheytcg fgtagg all"]:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
mention(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, len(nama)):
nm2 += [nama[j]]
mention(msg.to, nm2)
if jml > 200 and jml < 300:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention(msg.to, nm2)
for k in range(201, len(nama)):
nm3 += [nama[k]]
mention(msg.to, nm3)
if jml > 300 and jml < 400:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
mention(msg.to, nm3)
for l in range(301, len(nama)):
nm4 += [nama[l]]
mention(msg.to, nm4)
if jml > 400 and jml < 500:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
mention(msg.to, nm3)
for l in range(301, 400):
nm4 += [nama[l]]
mention(msg.to, nm4)
for h in range(401, len(nama)):
nm5 += [nama[h]]
mention(msg.to, nm5)
if jml > 500:
cl.sendText(msg.to,'Member melebihi batas.')
cnt = Message()
cnt.text = "PHET TAG DONE : " + str(jml) + " Members"
cnt.to = msg.to
cl.sendMessage(cnt)
if op.type == 26:
msg = op.message
if msg.text.lower() in ["1123"]:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
mention(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, len(nama)):
nm2 += [nama[j]]
mention(msg.to, nm2)
if jml > 200 and jml < 300:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention(msg.to, nm2)
for k in range(201, len(nama)):
nm3 += [nama[k]]
mention(msg.to, nm3)
if jml > 300 and jml < 400:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
mention(msg.to, nm3)
for l in range(301, len(nama)):
nm4 += [nama[l]]
mention(msg.to, nm4)
if jml > 400 and jml < 500:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
mention(msg.to, nm3)
for l in range(301, 400):
nm4 += [nama[l]]
mention(msg.to, nm4)
for h in range(401, len(nama)):
nm5 += [nama[h]]
mention(msg.to, nm5)
if jml > 500:
cl.sendText(msg.to,'Member melebihi batas.')
cnt = Message()
cnt.text = "PHET TAG DONE : " + str(jml) + " Members"
cnt.to = msg.to
cl.sendMessage(cnt)
elif msg.text in ["แทก","tag","Tag"]:
cl.sendText(msg.to,"แทก")
elif msg.text in ["ใวรัส","ไว้รัส","ค้าง"]:
cl.sendText(msg.to,"มึงตาย")
elif msg.text in ["เป้","พี่เป้","แมวเป้"]:
cl.sendText(msg.to,"🇹🇭เรียกทำไมพ่องตายหรอ🇹🇭")
elif msg.text in ["me","คท","Me"]:
cl.sendText(msg.to,"🇹🇭เชคจังเชลมึงเนาหรอ🇹🇭")
elif msg.text in ["555","5555","5555"]:
cl.sendText(msg.to,"🇹🇭ขำไมบ้าหรอคับ🇹🇭")
elif msg.text in ["กำ","สัส","สาส","ควย","เหี้ย"]:
cl.sendText(msg.to,"🇹🇭พ่องตายแม่ตาย🇹🇭")
elif msg.text in ["บิน","ขอนะ","เตะ"]:
cl.sendText(msg.to,"จัดมาเลยครับ")
cl.sendText(msg.to,"ป้องกันหมด")
elif "." == msg.text.lower():
if msg.to in wait2['readPoint']:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
wait2['ROM'][msg.to] = {}
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
cl.sendText(msg.to,"Lurking already on\nเปิดการอ่านอัตโนมัตกรุณาพิมพ์ ➠ ..")
else:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
wait2['ROM'][msg.to] = {}
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
cl.sendText(msg.to, "โปรเเกรมเปิดการอ่านอัตโนมัต\nSet reading point:\n" + datetime.now().strftime('%H:%M:%S'))
print wait2
elif "/ปิดการอ่าน" == msg.text.lower():
if msg.to not in wait2['readPoint']:
cl.sendText(msg.to,"Lurking already off\nปิดการอ่านอัตโนมัต")
else:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
cl.sendText(msg.to, "ปิดการอ่านอัตโนมัต\nDelete reading point:\n" + datetime.now().strftime('%H:%M:%S'))
elif ".." == msg.text.lower():
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
cl.sendText(msg.to, "SELFBOT PHET HACK BOT\n\nLurkers:\nNone")
else:
chiya = []
for rom in wait2["ROM"][msg.to].items():
chiya.append(rom[1])
cmem = cl.getContacts(chiya)
zx = ""
zxc = ""
zx2 = []
xpesan = 'Lurkers:\n'
for x in range(len(cmem)):
xname = str(cmem[x].displayName)
pesan = ''
pesan2 = pesan+"@a\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':cmem[x].mid}
zx2.append(zx)
zxc += pesan2
msg.contentType = 0
print zxc
msg.text = xpesan+ zxc + "\nLurking time: %s\nCurrent time: %s"%(wait2['setTime'][msg.to],datetime.now().strftime('%H:%M:%S'))
lol ={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}
print lol
msg.contentMetadata = lol
try:
cl.sendMessage(msg)
except Exception as error:
print error
pass
else:
cl.sendText(msg.to, "กรุณาตั้งเวลาการอ่านใหม่อีกครั้งโปรดพิมพ์ ➠ .")
elif msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True:
text = msg.text
if text is not None:
cl.sendText(msg.to, "[อัตโนมัติ]: " + text)
else:
if msg.contentType == 7:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "6",
"STKPKGID": "1", "STKVER": "100" }
cl.sendMessage(msg)
if op.type == 15:
if wait["Notifed"] == True:
if op.param2 in Bots:
return
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "\n เเล้วพบใหม่นะ ")
print "MEMBER OUT GROUP"
if op.type == 17:
if wait["Notifed"] == True:
if op.param2 in Bots:
return
cl.sendText(op.param1,cl.getContact(op.param2).displayName + " ☜ʕ•ﻌ•ʔ ")
cl.sendText(op.param1, " ยินดีต้อนรับครับ \n สวัสดีครับผม \n อย่าลืมปิดเสียงการเเจ้งเตือนด้วยนะ \n\n[By.เพชร ทีมทดลองบอท]")
print "MEMBER HAS JOIN THE GROUP"
if op.type == 19:
if wait["Notifed"] == True:
if op.param2 in Bots:
return
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "\n ไม่น่าจะจุกเท่าไหร่หรอก ")
print "MEMBER HAS KICKOUT FROM THE GROUP"
if op.type == 15:
if wait["Notifedbot"] == True:
if op.param2 in Bots:
return
ki1.sendText(op.param1,cl.getContact(op.param2).displayName + "\n\n Bye~bye ")
ki2.sendText(op.param1,cl.getContact(op.param2).displayName + "\n\n Bye~bye ")
print "MEMBER OUT GROUP"
if op.type == 17:
if wait["Notifedbot"] == True:
if op.param2 in Bots:
return
ki1.sendText(op.param1,cl.getContact(op.param2).displayName + "\n\n\n\n[By. เพชร ทีมทดลองบอท]")
print "MEMBER HAS JOIN THE GROUP"
if op.type == 19:
if wait["Notifedbot"] == True:
if op.param2 in Bots:
return
ki1.sendText(op.param1,cl.getContact(op.param2).displayName + "\n ไม่น่าจะจุกเท่าไหร่หรอก ")
ki2.sendText(op.param1,cl.getContact(op.param2).displayName + "\n ไม่น่าจะจุกเท่าไหร่หรอก ")
print "MEMBER HAS KICKOUT FROM THE GROUP"
if op.type == 15:
if wait["bcommentOn"] == True:
if op.param2 in Bots:
return
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "\n" + str(wait["bcomment"]))
print "MEMBER OUT GROUP"
if op.type == 17:
if wait["acommentOn"] == True:
if op.param2 in Bots:
return
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "\n" + str(wait["acomment"]))
print "MEMBER HAS JOIN THE GROUP"
if op.type == 19:
if wait["ccommentOn"] == True:
if op.param2 in Bots:
return
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "\n" + str(wait["ccomment"]))
print "MEMBER HAS KICKOUT FROM THE GROUP"
if op.type == 13:
if wait["Protectcancl"] == True:
if op.param2 not in Bots:
group = cl.getGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
random.choice(KAC).cancelGroupInvitation(op.param1, gMembMids)
if op.param3 == "1":
if op.param1 in protectname:
group = cl.getGroup(op.param1)
try:
group.name = wait["pro_name"][op.param1]
cl.updateGroup(group)
cl.sendText(op.param1, "Groupname protect now")
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except Exception as e:
print e
pass
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
if op.param2 in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += op.param2
wait2['ROM'][op.param1][op.param2] = op.param2
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
else:
pass
except:
pass
if op.type == 59:
print op
except Exception as error:
print error
def autolike():
count = 1
while True:
try:
for posts in cl.activity(1)["result"]["posts"]:
if posts["postInfo"]["liked"] is False:
if wait["likeOn"] == True:
cl.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
if wait["commentOn"] == True:
if posts["userInfo"]["writerMid"] in wait["commentBlack"]:
pass
else:
cl.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
except:
count += 1
if(count == 50):
sys.exit(0)
else:
pass
thread1 = threading.Thread(target=autolike)
thread1.daemon = True
thread1.start()
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"༺%H:%M༻")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
adc.py | # Copyright 2019 The OpenRadar Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import codecs
import socket
import struct
from enum import Enum
import threading
import numpy as np
import time
from multiprocessing import Process, Queue
class CMD(Enum):
RESET_FPGA_CMD_CODE = '0100'
RESET_AR_DEV_CMD_CODE = '0200'
CONFIG_FPGA_GEN_CMD_CODE = '0300'
CONFIG_EEPROM_CMD_CODE = '0400'
RECORD_START_CMD_CODE = '0500'
RECORD_STOP_CMD_CODE = '0600'
PLAYBACK_START_CMD_CODE = '0700'
PLAYBACK_STOP_CMD_CODE = '0800'
SYSTEM_CONNECT_CMD_CODE = '0900'
SYSTEM_ERROR_CMD_CODE = '0a00'
CONFIG_PACKET_DATA_CMD_CODE = '0b00'
CONFIG_DATA_MODE_AR_DEV_CMD_CODE = '0c00'
INIT_FPGA_PLAYBACK_CMD_CODE = '0d00'
READ_FPGA_VERSION_CMD_CODE = '0e00'
def __str__(self):
return str(self.value)
# MESSAGE = codecs.decode(b'5aa509000000aaee', 'hex')
CONFIG_HEADER = '5aa5'
CONFIG_STATUS = '0000'
CONFIG_FOOTER = 'aaee'
# STATIC
MAX_PACKET_SIZE = 1514
BYTES_IN_PACKET = 1456
# BYTES_IN_PACKET = 1462
CMD_FPGA = '5AA50300060001010102031EAAEE'
CMD_RECORD = '5AA50B000600BE05350C0000AAEE'
CMD_START_RECORD = '5AA509000000AAEE'
CMD_START_RECORD_2 = 'd'
CMD_STOP_RECORD = ''
class AdcDataStreamer():
def __init__(self, data_recv_cfg, bytes_in_frame, timeout=1, udp_raw_data=False, udp_raw_data_dir=None,
log_error_func=print, log_warning_func=print, log_info_func=print):
self.data_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
# Bind data socket to fpga
print("DCA IP:", data_recv_cfg)
self.data_socket.bind(data_recv_cfg)
self.timeout = timeout
self.bytes_in_frame = bytes_in_frame
# print("bytes in frame:", bytes_in_frame)
self.uint16_in_frame = bytes_in_frame // 2
self.uint16_in_packet = BYTES_IN_PACKET // 2
self.startup = True
self.running = True
self.frame_byte_idx = 0
self.backup_file = open(udp_raw_data_dir, "wb+") if udp_raw_data else None
# self.ret_frame = np.zeros(self.uint16_in_frame, dtype=np.int16)
# self.ret_frame = np.zeros(self.bytes_in_frame, dtype=bytes)
# self.ret_frame = bytearray(self.bytes_in_frame)
self.ret_frame = np.zeros(self.bytes_in_frame, dtype=np.uint8)
self.last_byte_count = 0
self.last_packet_num = 0
self.last_frame_byte_idx = 0
self.lost_packages = 0
self.log_error_func = log_error_func
self.log_warning_func = log_warning_func
self.log_info_func = log_info_func
print("Bytes in Frame:", self.bytes_in_frame)
def is_set_up(self, timeout=1):
try:
self.data_socket.settimeout(timeout)
packet_num, byte_count, packet_data = self._read_data_packet()
return True
except socket.timeout as e:
return False
def retry_streaming(self):
self.log_warning_func("Retrying for new udp data...")
while not self.is_set_up(timeout=60):
self.log_warning_func("No data...")
packet_num, byte_count, packet_data = self._read_data_packet()
return packet_num, byte_count, packet_data
def stream(self, data_queue, time_queue):
# while self.running:
# self.data_socket.settimeout(2)
# all_data = []
# print("Start")
# while True:
# try:
# p_num, b_count, data = self._read_data_packet()
# print(data)
# all_data.extend(data)
# except Exception as e:
# print(e)
# data_queue.put(all_data)
# self.running = False
# return all_data
ret_frame = np.zeros(self.bytes_in_frame, dtype=np.uint8)
time_read = time.time()
while self.running:
while self.startup: # Wait for start of next frame
self.data_socket.settimeout(self.timeout)
try:
packet_num, byte_count, packet_data = self._read_data_packet()
except Exception as e:
self.log_error_func(e)
packet_num, byte_count, packet_data = self.retry_streaming()
self.last_byte_count = byte_count
self.last_packet_num = packet_num
if (byte_count + BYTES_IN_PACKET) % self.bytes_in_frame < BYTES_IN_PACKET: # got first bytes of new frame
# self.frame_byte_idx = (byte_count % self.bytes_in_frame) // 2 or BYTES_IN_PACKET // 2 # old
self.frame_byte_idx = ((byte_count + BYTES_IN_PACKET) % self.bytes_in_frame) or len(packet_data)
# if self.frame_byte_idx > len(packet_data):
# self.frame_byte_idx = len(packet_data)
# print("FrameByteIdx 1", self.frame_byte_idx)
# print("Range: ", 0, " ", self.frame_byte_idx)
# self.ret_frame[0:self.frame_byte_idx] = packet_data[-self.frame_byte_idx:]
ret_frame[0:self.frame_byte_idx] = packet_data[len(packet_data) - self.frame_byte_idx:]
# print(packet_data[len(packet_data) - self.frame_byte_idx:20])
self.startup = False
# break
self.data_socket.settimeout(self.timeout)
try:
packet_num, byte_count, packet_data = self._read_data_packet()
except Exception as e:
self.log_error_func(e)
packet_num, byte_count, packet_data = self.retry_streaming()
# print(packet_num, byte_count, len(packet_data))
# new_byte_count = byte_count - self.last_byte_count
# new_byte_idx = new_byte_count // 2
new_byte_idx = len(packet_data)
# print(new_byte_idx)
if self.last_packet_num + 1 == packet_num: # check if packets are being dropped
# if self.frame_byte_idx + new_byte_idx >= self.uint16_in_frame: # old
# print(self.frame_byte_idx + new_byte_idx, self.bytes_in_frame)
if self.frame_byte_idx + new_byte_idx >= self.bytes_in_frame: # got a whole frame, put it in queue
# overshoot_idx = self.frame_byte_idx + new_byte_idx - self.uint16_in_frame # old
# print("FrameByteIdx 2", self.frame_byte_idx)
# print("Range: ", self.frame_byte_idx, " end")
ret_frame[self.frame_byte_idx:] = \
packet_data[:self.bytes_in_frame - self.frame_byte_idx]
# packet_data[:self.uint16_in_frame - self.frame_byte_idx] # old
data_queue.put(ret_frame.tobytes())
time_queue.put(time_read)
added_bytes = self.bytes_in_frame - self.frame_byte_idx
self.frame_byte_idx = new_byte_idx - added_bytes
# to_add = new_byte_idx - self.frame_byte_idx
# overshoot_idx = self.frame_byte_idx + new_byte_idx - self.bytes_in_frame
# self.frame_byte_idx = new_byte_idx - overshoot_idx # new_byte_count - (self.bytes_in_frame // 2 - self.frame_byte_idx)
# print("FrameByteIdx 3", self.frame_byte_idx)
# print("Range: ", 0, " ", self.frame_byte_idx)
if self.frame_byte_idx > 0:
ret_frame[:self.frame_byte_idx] = packet_data[-self.frame_byte_idx:]
# else:
# self.frame_byte_idx = BYTES_IN_PACKET
time_read = time.time()
else:
# print("FrameByteIdx 4", self.frame_byte_idx)
# print("Range: ", self.frame_byte_idx, " ", self.frame_byte_idx + new_byte_idx)
ret_frame[self.frame_byte_idx:self.frame_byte_idx + new_byte_idx] = packet_data
self.frame_byte_idx += new_byte_idx
else:
self.lost_packages += 1
self.log_warning_func("Lost package count: {}".format(self.lost_packages))
self.startup = True
self.last_byte_count = byte_count
self.last_packet_num = packet_num
self.last_frame_byte_idx= self.frame_byte_idx
def setup(self):
pass
def _read_data_packet(self):
"""Helper function to read in a single ADC packet via UDP
Returns:
int: Current packet number, byte count of data that has already been read, raw ADC data in current packet
"""
data, addr = self.data_socket.recvfrom(MAX_PACKET_SIZE) # TODO handle timeouts?
packet_num = struct.unpack('<1l', data[:4])[0]
byte_count = struct.unpack('>Q', b'\x00\x00' + data[4:10][::-1])[0]
# packet_data = data[10:]
packet_data = np.frombuffer(data[10:], dtype=np.uint8)
if self.backup_file is not None: # backup raw UDP traffic
self.backup_file.write(packet_data)
# packet_data = np.frombuffer(data[10:], dtype=np.int16)
return packet_num, byte_count, packet_data
def close(self):
self.data_socket.close()
class DCA1000:
"""Software interface to the DCA1000 EVM board via ethernet.
Attributes:
static_ip (str): IP to receive data from the FPGA
adc_ip (str): IP to send configuration commands to the FPGA
data_port (int): Port that the FPGA is using to send data
config_port (int): Port that the FPGA is using to read configuration commands from
General steps are as follows:
1. Power cycle DCA1000 and XWR1xxx sensor
2. Open mmWaveStudio and setup normally until tab SensorConfig or use lua script
3. Make sure to connect mmWaveStudio to the board via ethernet
4. Start streaming data
5. Read in frames using class
Examples:
>>> dca = DCA1000()
>>> adc_data = dca.read(timeout=.1)
>>> frame = dca.organize(adc_data, 128, 4, 256)
"""
def __init__(self, static_ip='192.168.33.30', adc_ip='192.168.33.180', timeout=1,
data_port=4098, config_port=4096, num_loops_per_frame=16, num_rx=4, num_tx=3, num_adc_samples=240,
udp_raw_data=0, udp_raw_data_dir=None,
log_error_func=print, log_warning_func=print, log_info_func=print):
# Save network data
# self.static_ip = static_ip
# self.adc_ip = adc_ip
# self.data_port = data_port
# self.config_port = config_port
adc_params = {'chirps': num_loops_per_frame,
'rx': num_rx,
'tx': num_tx,
'samples': num_adc_samples,
'IQ': 2,
'bytes': 2}
# DYNAMIC
self.bytes_in_frame = (adc_params['chirps'] * adc_params['rx'] * adc_params['tx'] *
adc_params['IQ'] * adc_params['samples'] * adc_params['bytes'])
self.bytes_in_frame_clipped = (self.bytes_in_frame // BYTES_IN_PACKET) * BYTES_IN_PACKET
self.packets_in_frame = self.bytes_in_frame / BYTES_IN_PACKET
self.packets_in_frame_clipped = self.bytes_in_frame // BYTES_IN_PACKET
self.uint16_in_packet = BYTES_IN_PACKET // 2
self.uint16_in_frame = self.bytes_in_frame // 2
# Create configuration and data destinations
self.cfg_dest = (adc_ip, config_port)
self.cfg_recv = (static_ip, config_port)
self.data_recv = (static_ip, data_port)
# Create sockets
self.config_socket = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP)
# self.data_socket = socket.socket(socket.AF_INET,
# socket.SOCK_DGRAM,
# socket.IPPROTO_UDP)
# Bind data socket to fpga
# self.data_socket.bind(self.data_recv)
self.adc_data_streamer = AdcDataStreamer(self.data_recv, self.bytes_in_frame, timeout=timeout,
udp_raw_data=udp_raw_data, udp_raw_data_dir=udp_raw_data_dir,
log_error_func=log_error_func, log_warning_func=log_warning_func,
log_info_func=log_info_func)
# Bind config socket to fpga
self.config_socket.bind(self.cfg_recv)
self.data = []
self.packet_count = []
self.byte_count = []
self.frame_buff = []
self.curr_buff = None
self.last_frame = None
self.lost_packets = None
self.producer = None
self.data_queue = None
self.time_queue = None
def setup(self):
self.adc_data_streamer.setup()
def start_streaming(self, data_queue, time_queue=Queue(30)):
self.data_queue = data_queue
self.time_queue = time_queue
self.producer = Process(target=self.adc_data_streamer.stream, args=(data_queue, time_queue,))
self.producer.start()
def read_adc(self):
return self.data_queue.get(), self.time_queue.get()
def configure(self):
"""Initializes and connects to the FPGA
Returns:
None
"""
# SYSTEM_CONNECT_CMD_CODE
# 5a a5 09 00 00 00 aa ee
print(self._send_command(CMD.SYSTEM_CONNECT_CMD_CODE))
# READ_FPGA_VERSION_CMD_CODE
# 5a a5 0e 00 00 00 aa ee
print(self._send_command(CMD.READ_FPGA_VERSION_CMD_CODE))
# CONFIG_FPGA_GEN_CMD_CODE
# 5a a5 03 00 06 00 01 02 01 02 03 1e aa ee
print(self._send_command(CMD.CONFIG_FPGA_GEN_CMD_CODE, '0600', 'c005350c0000'))
# CONFIG_PACKET_DATA_CMD_CODE
# 5a a5 0b 00 06 00 c0 05 35 0c 00 00 aa ee
print(self._send_command(CMD.CONFIG_PACKET_DATA_CMD_CODE, '0600', 'c005350c0000'))
def close(self):
"""Closes the sockets that are used for receiving and sending data
Returns:
None
"""
self.adc_data_streamer.data_socket.close()
self.config_socket.close()
def read(self, timeout=30):
""" Read in a single packet via UDP
Args:
timeout (float): Time to wait for packet before moving on
Returns:
Full frame as array if successful, else None
"""
# Configure
self.data_socket.settimeout(timeout)
# Frame buffer
# ret_frame = np.zeros(UINT16_IN_FRAME, dtype=np.uint16)
ret_frame = np.zeros(self.uint16_in_frame, dtype=np.int16)
frame_byte_idx = 0
packets_read = 1
# Wait for start of next frame
before = 0
while True:
packet_num, byte_count, packet_data = self._read_data_packet()
# print(BYTES_IN_FRAME_CLIPPED, " : ", byte_count, " ; ", byte_count % BYTES_IN_FRAME_CLIPPED,
# " ... ", byte_count - before)
# before = byte_count
# if byte_count % self.bytes_in_frame_clipped == 0:
# if byte_count % self.bytes_in_frame < BYTES_IN_PACKET:
if byte_count % self.bytes_in_frame < 1514:
# frame_byte_idx = byte_count % self.bytes_in_frame
frame_byte_idx = byte_count % 1500
ret_frame[0:frame_byte_idx] = packet_data[-frame_byte_idx:]
break
# if byte_count % BYTES_IN_FRAME == 0:
# packets_read = 1
# ret_frame[0:self.uint16_in_packet] = packet_data
# break
# Read in the rest of the frame
while True:
# print("Wait for rest of frame")
packet_num, byte_count, packet_data = self._read_data_packet()
packets_read += 1
if byte_count % self.bytes_in_frame_clipped == 0:
self.lost_packets = self.packets_in_frame_clipped - packets_read
return ret_frame
curr_idx = ((packet_num - 1) % self.packets_in_frame_clipped)
try:
ret_frame[curr_idx * self.uint16_in_packet:(curr_idx + 1) * self.uint16_in_packet] = packet_data
except:
pass
if packets_read > self.packets_in_frame_clipped:
packets_read = 0
def _send_command(self, cmd, length='0000', body='', timeout=4):
"""Helper function to send a single commmand to the FPGA
Args:
cmd (CMD): Command code to send to the FPGA
length (str): Length of the body of the command (if any)
body (str): Body information of the command
timeout (int): Time in seconds to wait for socket data until timeout
Returns:
str: Response message
"""
# Create timeout exception
self.config_socket.settimeout(timeout)
# Create and send message
resp = ''
msg = codecs.decode(''.join((CONFIG_HEADER, str(cmd), length, body, CONFIG_FOOTER)), 'hex')
try:
self.config_socket.sendto(msg, self.cfg_dest)
resp, addr = self.config_socket.recvfrom(MAX_PACKET_SIZE)
except socket.timeout as e:
print(e)
return resp
def _send_custom_command(self, cmd, timeout=4):
"""Helper function to send a single commmand to the FPGA
Args:
cmd (CMD): Command code to send to the FPGA
length (str): Length of the body of the command (if any)
body (str): Body information of the command
timeout (int): Time in seconds to wait for socket data until timeout
Returns:
str: Response message
"""
# Create timeout exception
self.config_socket.settimeout(timeout)
# Create and send message
resp = ''
msg = codecs.decode(cmd, 'hex')
try:
print(self.config_socket.sendto(msg, self.cfg_dest))
resp, addr = self.config_socket.recvfrom(MAX_PACKET_SIZE)
except socket.timeout as e:
print(e)
return resp
def _read_data_packet(self):
"""Helper function to read in a single ADC packet via UDP
Returns:
int: Current packet number, byte count of data that has already been read, raw ADC data in current packet
"""
data, addr = self.data_socket.recvfrom(MAX_PACKET_SIZE)
packet_num = struct.unpack('<1l', data[:4])[0]
byte_count = struct.unpack('>Q', b'\x00\x00' + data[4:10][::-1])[0]
packet_data = np.frombuffer(data[10:], dtype=np.int16) # TODO
return packet_num, byte_count, packet_data
def _listen_for_error(self):
"""Helper function to try and read in for an error message from the FPGA
Returns:
None
"""
self.config_socket.settimeout(None)
msg = self.config_socket.recvfrom(MAX_PACKET_SIZE)
if msg == b'5aa50a000300aaee':
print('stopped:', msg)
def _stop_stream(self):
"""Helper function to send the stop command to the FPGA
Returns:
str: Response Message
"""
return self._send_command(CMD.RECORD_STOP_CMD_CODE)
# from numba import jit
# @staticmethod
# @jit()
def organize(raw_frame, num_chirps, num_rx, num_samples, num_loops_per_frame, num_tx):
"""Reorganizes raw ADC data into a full frame
Args:
raw_frame (ndarray): Data to format
num_chirps: Number of chirps included in the frame
num_rx: Number of receivers used in the frame
num_samples: Number of ADC samples included in each chirp
Returns:
ndarray: Reformatted frame of raw data of shape (num_chirps, num_rx, num_samples)
chirps are sorted as follows:
-e.g. one tx antenna at a time, 3 in total: [txA, txB, txC] == [tx0, tx2, tx1]
-e.g. multiple tx antenna at a time, 3 in total: [txA, txB, txC] == [[tx0, tx1], tx2, tx1],
order as per chirp config order send to xwr1xxx
-N = num_tx * num_chirps
-chirps = [ chirp1_txA, chirp1_txB, chirp1_txC,
chirp2_txA, chirp2_txB, chirp2_txC,
chirpN_txA, chirpN_txB, chirpN_txC ]
-e.g 3 TX Antennas (num_tx == 3), 16 chirps per Frame:
- Antennas and Chirps with 1 indexing (e.g tx1, tx2, tx3; Chirp1, Chirp2, ..., Chirp16):
chirps[28] == Chirp10 & num_tx2 == (10 - 1) * num_tx + (2 - 1)
chirps[X] == Chirp(X // 3 + 1) + num_tx(X mod 3 + 1)
- Antennas and Chirps with 0 indexing (e.g tx0, tx1, tx2; Chirp0, Chirp1, ..., Chirp15):
chirps[28] == Chirp9 & num_tx1 == 9 * num_tx + 1
chirps[X] == Chirp(X // 3) + num_tx(X mod 3)
"""
ret = np.zeros(len(raw_frame) // 2, dtype=complex)
# TODO reshape depending on antenna parameter etc
# Separate IQ data
ret[0::4] = raw_frame[0::8] + 1j * raw_frame[4::8]
ret[1::4] = raw_frame[1::8] + 1j * raw_frame[5::8]
ret[2::4] = raw_frame[2::8] + 1j * raw_frame[6::8]
ret[3::4] = raw_frame[3::8] + 1j * raw_frame[7::8]
# ret[0::2] = raw_frame[0::4] + 1j * raw_frame[2::4]
# ret[1::2] = raw_frame[1::4] + 1j * raw_frame[3::4]
ret = ret.reshape((num_chirps, num_samples, num_rx))
if num_chirps != num_tx * num_loops_per_frame: # only use n-TX antennas
num_tx_data = num_chirps // num_loops_per_frame
ret = np.delete(ret, slice(num_tx_data-1, None, num_tx_data), 0)
ret = ret.transpose((0, 2, 1))
return ret
|
util.py | import wx
import os
import re
import time
import base64
import calendar
import urllib2
import urlparse
import threading
# not using in live bus
#import feedparser
from htmlentitydefs import name2codepoint
from settings import settings
def set_icon(window):
bundle = wx.IconBundle()
bundle.AddIcon(wx.Icon('icons/16.png', wx.BITMAP_TYPE_PNG))
bundle.AddIcon(wx.Icon('icons/24.png', wx.BITMAP_TYPE_PNG))
bundle.AddIcon(wx.Icon('icons/32.png', wx.BITMAP_TYPE_PNG))
bundle.AddIcon(wx.Icon('icons/48.png', wx.BITMAP_TYPE_PNG))
bundle.AddIcon(wx.Icon('icons/256.png', wx.BITMAP_TYPE_PNG))
window.SetIcons(bundle)
def start_thread(func, *args):
thread = threading.Thread(target=func, args=args)
thread.setDaemon(True)
thread.start()
return thread
def scale_bitmap(bitmap, width, height, color):
bw, bh = bitmap.GetWidth(), bitmap.GetHeight()
if bw == width and bh == height:
return bitmap
if width < 0:
width = bw
if height < 0:
height = bh
buffer = wx.EmptyBitmap(bw, bh)
dc = wx.MemoryDC(buffer)
dc.SetBackground(wx.Brush(color))
dc.Clear()
dc.DrawBitmap(bitmap, 0, 0, True)
image = wx.ImageFromBitmap(buffer)
image = image.Scale(width, height, wx.IMAGE_QUALITY_HIGH)
result = wx.BitmapFromImage(image)
return result
def menu_item(menu, label, func, icon=None, kind=wx.ITEM_NORMAL):
item = wx.MenuItem(menu, -1, label, kind=kind)
if func:
menu.Bind(wx.EVT_MENU, func, id=item.GetId())
if icon:
item.SetBitmap(wx.Bitmap(icon))
menu.AppendItem(item)
return item
def select_choice(choice, data):
for index in range(choice.GetCount()):
if choice.GetClientData(index) == data:
choice.Select(index)
return
choice.Select(wx.NOT_FOUND)
def get_top_window(window):
result = None
while window:
result = window
window = window.GetParent()
return result
def get(obj, key, default):
value = obj.get(key, None)
return value or default
def abspath(path):
path = os.path.abspath(path)
path = 'file:///%s' % path.replace('\\', '/')
return path
def parse(url, username=None, password=None, etag=None, modified=None):
agent = settings.USER_AGENT
handlers = [get_proxy()]
if username and password:
url = insert_credentials(url, username, password)
# not using in live bus.
#return feedparser.parse(url, etag=etag, modified=modified, agent=agent, handlers=handlers)
return False
def is_valid_feed(data):
entries = get(data, 'entries', [])
title = get(data.feed, 'title', '')
link = get(data.feed, 'link', '')
return entries or title or link
def insert_credentials(url, username, password):
parts = urlparse.urlsplit(url)
netloc = parts.netloc
if '@' in netloc:
netloc = netloc[netloc.index('@')+1:]
netloc = '%s:%s@%s' % (username, password, netloc)
parts = list(parts)
parts[1] = netloc
return urlparse.urlunsplit(tuple(parts))
def encode_password(password):
return base64.b64encode(password) if password else None
def decode_password(password):
try:
return base64.b64decode(password) if password else None
except Exception:
return None
def get_proxy():
if settings.USE_PROXY:
url = decode_password(settings.PROXY_URL)
if url:
# User-configured Proxy
map = {
'http': url,
'https': url,
}
proxy = urllib2.ProxyHandler(map)
else:
# Windows-configured Proxy
proxy = urllib2.ProxyHandler()
else:
# No Proxy
proxy = urllib2.ProxyHandler({})
return proxy
def find_themes():
return ['default'] # TODO: more themes!
result = []
names = os.listdir('themes')
for name in names:
if name.startswith('.'):
continue
path = os.path.join('themes', name)
if os.path.isdir(path):
result.append(name)
return result
def guess_polling_interval(entries):
if len(entries) < 2:
return settings.DEFAULT_POLLING_INTERVAL
timestamps = []
for entry in entries:
timestamp = calendar.timegm(get(entry, 'date_parsed', time.gmtime()))
timestamps.append(timestamp)
timestamps.sort()
durations = [b - a for a, b in zip(timestamps, timestamps[1:])]
mean = sum(durations) / len(durations)
choices = [
60,
60*5,
60*10,
60*15,
60*30,
60*60,
60*60*2,
60*60*4,
60*60*8,
60*60*12,
60*60*24,
]
desired = mean / 2
if desired == 0:
interval = settings.DEFAULT_POLLING_INTERVAL
elif desired < choices[0]:
interval = choices[0]
else:
interval = max(choice for choice in choices if choice <= desired)
return interval
def time_since(t):
t = int(t)
now = int(time.time())
seconds = max(now - t, 0)
if seconds == 1:
return '1 second'
if seconds < 60:
return '%d seconds' % seconds
minutes = seconds / 60
if minutes == 1:
return '1 minute'
if minutes < 60:
return '%d minutes' % minutes
hours = minutes / 60
if hours == 1:
return '1 hour'
if hours < 24:
return '%d hours' % hours
days = hours / 24
if days == 1:
return '1 day'
return '%d days' % days
def split_time(seconds):
if seconds < 60:
return seconds, 0
minutes = seconds / 60
if minutes < 60:
return minutes, 1
hours = minutes / 60
days = hours / 24
if days and hours % 24 == 0:
return days, 3
return hours, 2
def split_time_str(seconds):
interval, units = split_time(seconds)
strings = ['second', 'minute', 'hour', 'day']
string = strings[units]
if interval != 1:
string += 's'
return '%d %s' % (interval, string)
def pretty_name(name):
name = ' '.join(s.title() for s in name.split('_'))
last = '0'
result = ''
for c in name:
if c.isdigit() and not last.isdigit():
result += ' '
result += c
last = c
return result
def replace_entities1(text):
entity = re.compile(r'&#(\d+);')
def func(match):
try:
return unichr(int(match.group(1)))
except Exception:
return match.group(0)
return entity.sub(func, text)
def replace_entities2(text):
entity = re.compile(r'&([a-zA-Z]+);')
def func(match):
try:
return unichr(name2codepoint[match.group(1)])
except Exception:
return match.group(0)
return entity.sub(func, text)
def remove_markup(text):
html = re.compile(r'<[^>]+>')
return html.sub(' ', text)
def format(text, max_length=400):
previous = ''
while text != previous:
previous = text
text = replace_entities1(text)
text = replace_entities2(text)
text = remove_markup(text)
text = ' '.join(text.split())
if len(text) > max_length:
text = text[:max_length].strip()
text = text.split()[:-1]
text.append('[...]')
text = ' '.join(text)
return text
|
generate_traffic.py | import requests
import argparse
import threading
import time
import json
import random
import uuid
global url
global keycloak
global username
global password
global bearer_token
global bad_request_rate
global match_filter_rate
bridges = []
processors = []
def create_bridge():
while True:
if (random.random() < bad_request_rate and bridges): # with probability `bad_request_rate` create a bad request
print('[bridge] Creating a bad request')
body = {'name': bridges[0]}
else: #create a valid request
print('[bridge] Creating a valid request')
body = {'name': str(uuid.uuid4())}
response = requests.post(url + '/api/v1/bridges', data=json.dumps(body),
headers={'Content-type': 'application/json', 'Authorization': 'Bearer {0}'.format(get_bearer_token())})
if (response.status_code == 401):
authenticate()
continue
elif(response.status_code == 201):
bridges.append(json.loads(response.text)['id'])
print('[bridge] response status code: {0}'.format(response.status_code))
print('[bridge] sleeping for 30 seconds')
time.sleep(30)
def create_processor():
while True:
if (len(bridges) == 0):
continue
if (random.random() < bad_request_rate): # with probability `bad_request_rate` create a bad request
print('[processor] Creating a bad request')
body = {'name': 'crashhhhhhhhhhh'}
else: #create a valid request
print('[processor] Creating a valid request')
body = {'name': str(uuid.uuid4()),
'action': {'name': 'myAction', 'parameters': {'topic': 'demoTopic'}, 'type': 'KafkaTopic'},
'filters': [{'key': 'data.api', 'type': 'StringEquals', 'value': 'PutBlockList'}],
'transformationTemplate': '{\'api\': \'{data.api}\''
}
bridge = random.choice(bridges)
response = requests.post(url + '/api/v1/bridges/' + bridge + '/processors', data=json.dumps(body),
headers={'Content-type': 'application/json', 'Authorization': 'Bearer {0}'.format(get_bearer_token())})
if (response.status_code == 401):
authenticate()
continue
elif(response.status_code == 201):
processors.append(json.loads(response.text)['id'])
print('[processor] response status code: {0}'.format(response.status_code))
print('[processor] sleeping for 20 seconds')
time.sleep(20)
def ingress():
while True:
if (len(processors) == 0):
continue
if (random.random() < bad_request_rate): # with probability `bad_request_rate` create a bad request
print('[ingress] Creating a bad request')
body = {'name': 'crashhhhhhhhhhh with a non valid cloud event'}
else: #create a valid request
print('[ingress] Creating a valid request')
body = get_cloud_event()
bridge = random.choice(bridges)
response = requests.post(url + '/ingress/events/' + bridge, data=json.dumps(body),
headers={'Content-type': 'application/json', 'Authorization': 'Bearer {0}'.format(get_bearer_token())})
if (response.status_code == 401):
authenticate()
else:
print('[ingress] response status code: {0}'.format(response.status_code))
time.sleep(10)
print('[ingress] sleeping for 10 seconds')
def authenticate():
global bearer_token
print('[auth] Authentication in progress')
token = requests.post(keycloak + '/auth/realms/event-bridge-fm/protocol/openid-connect/token',
data={'grant_type': 'password', 'username': username, 'password': password, 'client_id': 'event-bridge', 'client_secret': 'secret'},
headers={'Content-type': 'application/x-www-form-urlencoded'})
bearer_token = json.loads(token.text)['access_token']
print('[auth] Authenticated!')
def get_bearer_token():
global bearer_token
return bearer_token
def get_cloud_event():
# With probability `match_filter_rate` generate a data.api that is not PutBlockList
cloud_event = {
'specversion': '1.0',
'type': 'Microsoft.Storage.BlobCreated',
'source': 'mySource',
'id': '9aeb0fdf-c01e-0131-0922-9eb54906e209',
'time': '2019-11-18T15:13:39.4589254Z',
'subject': 'blobServices/default/containers/{storage-container}/blobs/{new-file}',
'dataschema': '#',
'data': {
'api': 'OtherOperation' if random.random() < match_filter_rate else 'PutBlockList',
'clientRequestId': '4c5dd7fb-2c48-4a27-bb30-5361b5de920a',
'requestId': '9aeb0fdf-c01e-0131-0922-9eb549000000',
'eTag': '0x8D76C39E4407333',
'contentType': 'image/png',
'contentLength': 30699,
'blobType': 'BlockBlob',
'url': 'https://gridtesting.blob.core.windows.net/testcontainer/{new-file}',
'sequencer': '000000000000000000000000000099240000000000c41c18',
'storageDiagnostics': {
'batchId': '681fe319-3006-00a8-0022-9e7cde000000'
}
}
}
return cloud_event
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate bridge traffic')
parser.add_argument('--manager', help='The url of the manager', required=True)
parser.add_argument('--keycloak', help='The keycloak url', required=True)
parser.add_argument('--username', help='The username', required=True)
parser.add_argument('--password', help='The password', required=True)
parser.add_argument('--bad_request_rate', help='The bad request rate', type=float, required=True)
parser.add_argument('--match_filter_rate', help='How many cloud events should match the filter (rate).', type=float, required=True)
# Initialize global vars
args = vars(parser.parse_args())
url = args['manager']
keycloak = args['keycloak']
username = args['username']
password = args['password']
bad_request_rate = args['bad_request_rate']
match_filter_rate = args['match_filter_rate']
bearer_token = ''
bridge_thread = threading.Thread(target=create_bridge, args=())
bridge_thread.start()
processor_thread = threading.Thread(target=create_processor, args=())
processor_thread.start()
ingress_thread = threading.Thread(target=ingress, args=())
ingress_thread.start()
|
test_itertools.py | import unittest
from test import support
from itertools import *
import weakref
from decimal import Decimal
from fractions import Fraction
import operator
import random
import copy
import pickle
from functools import reduce
import sys
import struct
import threading
import gc
maxsize = support.MAX_Py_ssize_t
minsize = -maxsize-1
def lzip(*args):
return list(zip(*args))
def onearg(x):
'Test function of one argument'
return 2*x
def errfunc(*args):
'Test function that raises an error'
raise ValueError
def gen3():
'Non-restartable source sequence'
for i in (0, 1, 2):
yield i
def isEven(x):
'Test predicate'
return x%2==0
def isOdd(x):
'Test predicate'
return x%2==1
def tupleize(*args):
return args
def irange(n):
for i in range(n):
yield i
class StopNow:
'Class emulating an empty iterable.'
def __iter__(self):
return self
def __next__(self):
raise StopIteration
def take(n, seq):
'Convenience function for partially consuming a long of infinite iterable'
return list(islice(seq, n))
def prod(iterable):
return reduce(operator.mul, iterable, 1)
def fact(n):
'Factorial'
return prod(range(1, n+1))
# root level methods for pickling ability
def testR(r):
return r[0]
def testR2(r):
return r[2]
def underten(x):
return x<10
picklecopiers = [lambda s, proto=proto: pickle.loads(pickle.dumps(s, proto))
for proto in range(pickle.HIGHEST_PROTOCOL + 1)]
class TestBasicOps(unittest.TestCase):
def pickletest(self, protocol, it, stop=4, take=1, compare=None):
"""Test that an iterator is the same after pickling, also when part-consumed"""
def expand(it, i=0):
# Recursively expand iterables, within sensible bounds
if i > 10:
raise RuntimeError("infinite recursion encountered")
if isinstance(it, str):
return it
try:
l = list(islice(it, stop))
except TypeError:
return it # can't expand it
return [expand(e, i+1) for e in l]
# Test the initial copy against the original
dump = pickle.dumps(it, protocol)
i2 = pickle.loads(dump)
self.assertEqual(type(it), type(i2))
a, b = expand(it), expand(i2)
self.assertEqual(a, b)
if compare:
c = expand(compare)
self.assertEqual(a, c)
# Take from the copy, and create another copy and compare them.
i3 = pickle.loads(dump)
took = 0
try:
for i in range(take):
next(i3)
took += 1
except StopIteration:
pass #in case there is less data than 'take'
dump = pickle.dumps(i3, protocol)
i4 = pickle.loads(dump)
a, b = expand(i3), expand(i4)
self.assertEqual(a, b)
if compare:
c = expand(compare[took:])
self.assertEqual(a, c);
def test_accumulate(self):
self.assertEqual(list(accumulate(range(10))), # one positional arg
[0, 1, 3, 6, 10, 15, 21, 28, 36, 45])
self.assertEqual(list(accumulate(iterable=range(10))), # kw arg
[0, 1, 3, 6, 10, 15, 21, 28, 36, 45])
for typ in int, complex, Decimal, Fraction: # multiple types
self.assertEqual(
list(accumulate(map(typ, range(10)))),
list(map(typ, [0, 1, 3, 6, 10, 15, 21, 28, 36, 45])))
self.assertEqual(list(accumulate('abc')), ['a', 'ab', 'abc']) # works with non-numeric
self.assertEqual(list(accumulate([])), []) # empty iterable
self.assertEqual(list(accumulate([7])), [7]) # iterable of length one
self.assertRaises(TypeError, accumulate, range(10), 5, 6) # too many args
self.assertRaises(TypeError, accumulate) # too few args
self.assertRaises(TypeError, accumulate, x=range(10)) # unexpected kwd arg
self.assertRaises(TypeError, list, accumulate([1, []])) # args that don't add
s = [2, 8, 9, 5, 7, 0, 3, 4, 1, 6]
self.assertEqual(list(accumulate(s, min)),
[2, 2, 2, 2, 2, 0, 0, 0, 0, 0])
self.assertEqual(list(accumulate(s, max)),
[2, 8, 9, 9, 9, 9, 9, 9, 9, 9])
self.assertEqual(list(accumulate(s, operator.mul)),
[2, 16, 144, 720, 5040, 0, 0, 0, 0, 0])
with self.assertRaises(TypeError):
list(accumulate(s, chr)) # unary-operation
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, accumulate(range(10))) # test pickling
self.pickletest(proto, accumulate(range(10), initial=7))
self.assertEqual(list(accumulate([10, 5, 1], initial=None)), [10, 15, 16])
self.assertEqual(list(accumulate([10, 5, 1], initial=100)), [100, 110, 115, 116])
self.assertEqual(list(accumulate([], initial=100)), [100])
with self.assertRaises(TypeError):
list(accumulate([10, 20], 100))
def test_chain(self):
def chain2(*iterables):
'Pure python version in the docs'
for it in iterables:
for element in it:
yield element
for c in (chain, chain2):
self.assertEqual(list(c('abc', 'def')), list('abcdef'))
self.assertEqual(list(c('abc')), list('abc'))
self.assertEqual(list(c('')), [])
self.assertEqual(take(4, c('abc', 'def')), list('abcd'))
self.assertRaises(TypeError, list,c(2, 3))
def test_chain_from_iterable(self):
self.assertEqual(list(chain.from_iterable(['abc', 'def'])), list('abcdef'))
self.assertEqual(list(chain.from_iterable(['abc'])), list('abc'))
self.assertEqual(list(chain.from_iterable([''])), [])
self.assertEqual(take(4, chain.from_iterable(['abc', 'def'])), list('abcd'))
self.assertRaises(TypeError, list, chain.from_iterable([2, 3]))
def test_chain_reducible(self):
for oper in [copy.deepcopy] + picklecopiers:
it = chain('abc', 'def')
self.assertEqual(list(oper(it)), list('abcdef'))
self.assertEqual(next(it), 'a')
self.assertEqual(list(oper(it)), list('bcdef'))
self.assertEqual(list(oper(chain(''))), [])
self.assertEqual(take(4, oper(chain('abc', 'def'))), list('abcd'))
self.assertRaises(TypeError, list, oper(chain(2, 3)))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, chain('abc', 'def'), compare=list('abcdef'))
def test_chain_setstate(self):
self.assertRaises(TypeError, chain().__setstate__, ())
self.assertRaises(TypeError, chain().__setstate__, [])
self.assertRaises(TypeError, chain().__setstate__, 0)
self.assertRaises(TypeError, chain().__setstate__, ([],))
self.assertRaises(TypeError, chain().__setstate__, (iter([]), []))
it = chain()
it.__setstate__((iter(['abc', 'def']),))
self.assertEqual(list(it), ['a', 'b', 'c', 'd', 'e', 'f'])
it = chain()
it.__setstate__((iter(['abc', 'def']), iter(['ghi'])))
self.assertEqual(list(it), ['ghi', 'a', 'b', 'c', 'd', 'e', 'f'])
def test_combinations(self):
self.assertRaises(TypeError, combinations, 'abc') # missing r argument
self.assertRaises(TypeError, combinations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, combinations, None) # pool is not iterable
self.assertRaises(ValueError, combinations, 'abc', -2) # r is negative
for op in [lambda a:a] + picklecopiers:
self.assertEqual(list(op(combinations('abc', 32))), []) # r > n
self.assertEqual(list(op(combinations('ABCD', 2))),
[('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
testIntermediate = combinations('ABCD', 2)
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)),
[('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
self.assertEqual(list(op(combinations(range(4), 3))),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
testIntermediate = combinations(range(4), 3)
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)),
[(0,1,3), (0,2,3), (1,2,3)])
def combinations1(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = list(range(r))
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
yield tuple(pool[i] for i in indices)
def combinations2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in permutations(range(n), r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def combinations3(iterable, r):
'Pure python version from cwr()'
pool = tuple(iterable)
n = len(pool)
for indices in combinations_with_replacement(range(n), r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(combinations(values, r))
self.assertEqual(len(result), 0 if r>n else fact(n) / fact(r) / fact(n-r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for c in result:
self.assertEqual(len(c), r) # r-length combinations
self.assertEqual(len(set(c)), r) # no duplicate elements
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assertTrue(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(list(c),
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(combinations1(values, r))) # matches first pure python version
self.assertEqual(result, list(combinations2(values, r))) # matches second pure python version
self.assertEqual(result, list(combinations3(values, r))) # matches second pure python version
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, combinations(values, r)) # test pickling
@support.bigaddrspacetest
def test_combinations_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
combinations("AA", 2**29)
# Test implementation detail: tuple re-use
@support.impl_detail("tuple reuse is specific to CPython")
def test_combinations_tuple_reuse(self):
self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1)
def test_combinations_with_replacement(self):
cwr = combinations_with_replacement
self.assertRaises(TypeError, cwr, 'abc') # missing r argument
self.assertRaises(TypeError, cwr, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, cwr, None) # pool is not iterable
self.assertRaises(ValueError, cwr, 'abc', -2) # r is negative
for op in [lambda a:a] + picklecopiers:
self.assertEqual(list(op(cwr('ABC', 2))),
[('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
testIntermediate = cwr('ABC', 2)
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)),
[('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
def cwr1(iterable, r):
'Pure python version shown in the docs'
# number items returned: (n+r-1)! / r! / (n-1)! when n>0
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
def cwr2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in product(range(n), repeat=r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def numcombs(n, r):
if not n:
return 0 if r else 1
return fact(n+r-1) / fact(r)/ fact(n-1)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(cwr(values, r))
self.assertEqual(len(result), numcombs(n, r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
regular_combs = list(combinations(values, r)) # compare to combs without replacement
if n == 0 or r <= 1:
self.assertEqual(result, regular_combs) # cases that should be identical
else:
self.assertTrue(set(result) >= set(regular_combs)) # rest should be supersets of regular combs
for c in result:
self.assertEqual(len(c), r) # r-length combinations
noruns = [k for k,v in groupby(c)] # combo without consecutive repeats
self.assertEqual(len(noruns), len(set(noruns))) # no repeats other than consecutive
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assertTrue(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(noruns,
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(cwr1(values, r))) # matches first pure python version
self.assertEqual(result, list(cwr2(values, r))) # matches second pure python version
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, cwr(values,r)) # test pickling
@support.bigaddrspacetest
def test_combinations_with_replacement_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
combinations_with_replacement("AA", 2**30)
# Test implementation detail: tuple re-use
@support.impl_detail("tuple reuse is specific to CPython")
def test_combinations_with_replacement_tuple_reuse(self):
cwr = combinations_with_replacement
self.assertEqual(len(set(map(id, cwr('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(cwr('abcde', 3))))), 1)
def test_permutations(self):
self.assertRaises(TypeError, permutations) # too few arguments
self.assertRaises(TypeError, permutations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, permutations, None) # pool is not iterable
self.assertRaises(ValueError, permutations, 'abc', -2) # r is negative
self.assertEqual(list(permutations('abc', 32)), []) # r > n
self.assertRaises(TypeError, permutations, 'abc', 's') # r is not an int or None
self.assertEqual(list(permutations(range(3), 2)),
[(0,1), (0,2), (1,0), (1,2), (2,0), (2,1)])
def permutations1(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
if r > n:
return
indices = list(range(n))
cycles = list(range(n-r+1, n+1))[::-1]
yield tuple(pool[i] for i in indices[:r])
while n:
for i in reversed(range(r)):
cycles[i] -= 1
if cycles[i] == 0:
indices[i:] = indices[i+1:] + indices[i:i+1]
cycles[i] = n - i
else:
j = cycles[i]
indices[i], indices[-j] = indices[-j], indices[i]
yield tuple(pool[i] for i in indices[:r])
break
else:
return
def permutations2(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
for indices in product(range(n), repeat=r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(permutations(values, r))
self.assertEqual(len(result), 0 if r>n else fact(n) / fact(n-r)) # right number of perms
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for p in result:
self.assertEqual(len(p), r) # r-length permutations
self.assertEqual(len(set(p)), r) # no duplicate elements
self.assertTrue(all(e in values for e in p)) # elements taken from input iterable
self.assertEqual(result, list(permutations1(values, r))) # matches first pure python version
self.assertEqual(result, list(permutations2(values, r))) # matches second pure python version
if r == n:
self.assertEqual(result, list(permutations(values, None))) # test r as None
self.assertEqual(result, list(permutations(values))) # test default r
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, permutations(values, r)) # test pickling
@support.bigaddrspacetest
def test_permutations_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
permutations("A", 2**30)
@support.impl_detail("tuple reuse is specific to CPython")
def test_permutations_tuple_reuse(self):
self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1)
def test_combinatorics(self):
# Test relationships between product(), permutations(),
# combinations() and combinations_with_replacement().
for n in range(6):
s = 'ABCDEFG'[:n]
for r in range(8):
prod = list(product(s, repeat=r))
cwr = list(combinations_with_replacement(s, r))
perm = list(permutations(s, r))
comb = list(combinations(s, r))
# Check size
self.assertEqual(len(prod), n**r)
self.assertEqual(len(cwr), (fact(n+r-1) / fact(r)/ fact(n-1)) if n else (not r))
self.assertEqual(len(perm), 0 if r>n else fact(n) / fact(n-r))
self.assertEqual(len(comb), 0 if r>n else fact(n) / fact(r) / fact(n-r))
# Check lexicographic order without repeated tuples
self.assertEqual(prod, sorted(set(prod)))
self.assertEqual(cwr, sorted(set(cwr)))
self.assertEqual(perm, sorted(set(perm)))
self.assertEqual(comb, sorted(set(comb)))
# Check interrelationships
self.assertEqual(cwr, [t for t in prod if sorted(t)==list(t)]) # cwr: prods which are sorted
self.assertEqual(perm, [t for t in prod if len(set(t))==r]) # perm: prods with no dups
self.assertEqual(comb, [t for t in perm if sorted(t)==list(t)]) # comb: perms that are sorted
self.assertEqual(comb, [t for t in cwr if len(set(t))==r]) # comb: cwrs without dups
self.assertEqual(comb, list(filter(set(cwr).__contains__, perm))) # comb: perm that is a cwr
self.assertEqual(comb, list(filter(set(perm).__contains__, cwr))) # comb: cwr that is a perm
self.assertEqual(comb, sorted(set(cwr) & set(perm))) # comb: both a cwr and a perm
def test_compress(self):
self.assertEqual(list(compress(data='ABCDEF', selectors=[1,0,1,0,1,1])), list('ACEF'))
self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF'))
self.assertEqual(list(compress('ABCDEF', [0,0,0,0,0,0])), list(''))
self.assertEqual(list(compress('ABCDEF', [1,1,1,1,1,1])), list('ABCDEF'))
self.assertEqual(list(compress('ABCDEF', [1,0,1])), list('AC'))
self.assertEqual(list(compress('ABC', [0,1,1,1,1,1])), list('BC'))
n = 10000
data = chain.from_iterable(repeat(range(6), n))
selectors = chain.from_iterable(repeat((0, 1)))
self.assertEqual(list(compress(data, selectors)), [1,3,5] * n)
self.assertRaises(TypeError, compress, None, range(6)) # 1st arg not iterable
self.assertRaises(TypeError, compress, range(6), None) # 2nd arg not iterable
self.assertRaises(TypeError, compress, range(6)) # too few args
self.assertRaises(TypeError, compress, range(6), None) # too many args
# check copy, deepcopy, pickle
for op in [lambda a:copy.copy(a), lambda a:copy.deepcopy(a)] + picklecopiers:
for data, selectors, result1, result2 in [
('ABCDEF', [1,0,1,0,1,1], 'ACEF', 'CEF'),
('ABCDEF', [0,0,0,0,0,0], '', ''),
('ABCDEF', [1,1,1,1,1,1], 'ABCDEF', 'BCDEF'),
('ABCDEF', [1,0,1], 'AC', 'C'),
('ABC', [0,1,1,1,1,1], 'BC', 'C'),
]:
self.assertEqual(list(op(compress(data=data, selectors=selectors))), list(result1))
self.assertEqual(list(op(compress(data, selectors))), list(result1))
testIntermediate = compress(data, selectors)
if result1:
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)), list(result2))
def test_count(self):
self.assertEqual(lzip('abc',count()), [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(lzip('abc',count(3)), [('a', 3), ('b', 4), ('c', 5)])
self.assertEqual(take(2, lzip('abc',count(3))), [('a', 3), ('b', 4)])
self.assertEqual(take(2, zip('abc',count(-1))), [('a', -1), ('b', 0)])
self.assertEqual(take(2, zip('abc',count(-3))), [('a', -3), ('b', -2)])
self.assertRaises(TypeError, count, 2, 3, 4)
self.assertRaises(TypeError, count, 'a')
self.assertEqual(take(10, count(maxsize-5)),
list(range(maxsize-5, maxsize+5)))
self.assertEqual(take(10, count(-maxsize-5)),
list(range(-maxsize-5, -maxsize+5)))
self.assertEqual(take(3, count(3.25)), [3.25, 4.25, 5.25])
self.assertEqual(take(3, count(3.25-4j)), [3.25-4j, 4.25-4j, 5.25-4j])
self.assertEqual(take(3, count(Decimal('1.1'))),
[Decimal('1.1'), Decimal('2.1'), Decimal('3.1')])
self.assertEqual(take(3, count(Fraction(2, 3))),
[Fraction(2, 3), Fraction(5, 3), Fraction(8, 3)])
BIGINT = 1<<1000
self.assertEqual(take(3, count(BIGINT)), [BIGINT, BIGINT+1, BIGINT+2])
c = count(3)
self.assertEqual(repr(c), 'count(3)')
next(c)
self.assertEqual(repr(c), 'count(4)')
c = count(-9)
self.assertEqual(repr(c), 'count(-9)')
next(c)
self.assertEqual(next(c), -8)
self.assertEqual(repr(count(10.25)), 'count(10.25)')
self.assertEqual(repr(count(10.0)), 'count(10.0)')
self.assertEqual(type(next(count(10.0))), float)
for i in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 10, sys.maxsize-5, sys.maxsize+5):
# Test repr
r1 = repr(count(i))
r2 = 'count(%r)'.__mod__(i)
self.assertEqual(r1, r2)
# check copy, deepcopy, pickle
for value in -3, 3, maxsize-5, maxsize+5:
c = count(value)
self.assertEqual(next(copy.copy(c)), value)
self.assertEqual(next(copy.deepcopy(c)), value)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, count(value))
#check proper internal error handling for large "step' sizes
count(1, maxsize+5); sys.exc_info()
def test_count_with_stride(self):
self.assertEqual(lzip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(lzip('abc',count(start=2,step=3)),
[('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(lzip('abc',count(step=-1)),
[('a', 0), ('b', -1), ('c', -2)])
self.assertRaises(TypeError, count, 'a', 'b')
self.assertEqual(lzip('abc',count(2,0)), [('a', 2), ('b', 2), ('c', 2)])
self.assertEqual(lzip('abc',count(2,1)), [('a', 2), ('b', 3), ('c', 4)])
self.assertEqual(lzip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(take(20, count(maxsize-15, 3)), take(20, range(maxsize-15, maxsize+100, 3)))
self.assertEqual(take(20, count(-maxsize-15, 3)), take(20, range(-maxsize-15,-maxsize+100, 3)))
self.assertEqual(take(3, count(10, maxsize+5)),
list(range(10, 10+3*(maxsize+5), maxsize+5)))
self.assertEqual(take(3, count(2, 1.25)), [2, 3.25, 4.5])
self.assertEqual(take(3, count(2, 3.25-4j)), [2, 5.25-4j, 8.5-8j])
self.assertEqual(take(3, count(Decimal('1.1'), Decimal('.1'))),
[Decimal('1.1'), Decimal('1.2'), Decimal('1.3')])
self.assertEqual(take(3, count(Fraction(2,3), Fraction(1,7))),
[Fraction(2,3), Fraction(17,21), Fraction(20,21)])
BIGINT = 1<<1000
self.assertEqual(take(3, count(step=BIGINT)), [0, BIGINT, 2*BIGINT])
self.assertEqual(repr(take(3, count(10, 2.5))), repr([10, 12.5, 15.0]))
c = count(3, 5)
self.assertEqual(repr(c), 'count(3, 5)')
next(c)
self.assertEqual(repr(c), 'count(8, 5)')
c = count(-9, 0)
self.assertEqual(repr(c), 'count(-9, 0)')
next(c)
self.assertEqual(repr(c), 'count(-9, 0)')
c = count(-9, -3)
self.assertEqual(repr(c), 'count(-9, -3)')
next(c)
self.assertEqual(repr(c), 'count(-12, -3)')
self.assertEqual(repr(c), 'count(-12, -3)')
self.assertEqual(repr(count(10.5, 1.25)), 'count(10.5, 1.25)')
self.assertEqual(repr(count(10.5, 1)), 'count(10.5)') # suppress step=1 when it's an int
self.assertEqual(repr(count(10.5, 1.00)), 'count(10.5, 1.0)') # do show float values lilke 1.0
self.assertEqual(repr(count(10, 1.00)), 'count(10, 1.0)')
c = count(10, 1.0)
self.assertEqual(type(next(c)), int)
self.assertEqual(type(next(c)), float)
for i in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 10, sys.maxsize-5, sys.maxsize+5):
for j in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 1, 10, sys.maxsize-5, sys.maxsize+5):
# Test repr
r1 = repr(count(i, j))
if j == 1:
r2 = ('count(%r)' % i)
else:
r2 = ('count(%r, %r)' % (i, j))
self.assertEqual(r1, r2)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, count(i, j))
def test_cycle(self):
self.assertEqual(take(10, cycle('abc')), list('abcabcabca'))
self.assertEqual(list(cycle('')), [])
self.assertRaises(TypeError, cycle)
self.assertRaises(TypeError, cycle, 5)
self.assertEqual(list(islice(cycle(gen3()),10)), [0,1,2,0,1,2,0,1,2,0])
# check copy, deepcopy, pickle
c = cycle('abc')
self.assertEqual(next(c), 'a')
#simple copy currently not supported, because __reduce__ returns
#an internal iterator
#self.assertEqual(take(10, copy.copy(c)), list('bcabcabcab'))
self.assertEqual(take(10, copy.deepcopy(c)), list('bcabcabcab'))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertEqual(take(10, pickle.loads(pickle.dumps(c, proto))),
list('bcabcabcab'))
next(c)
self.assertEqual(take(10, pickle.loads(pickle.dumps(c, proto))),
list('cabcabcabc'))
next(c)
next(c)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, cycle('abc'))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# test with partial consumed input iterable
it = iter('abcde')
c = cycle(it)
_ = [next(c) for i in range(2)] # consume 2 of 5 inputs
p = pickle.dumps(c, proto)
d = pickle.loads(p) # rebuild the cycle object
self.assertEqual(take(20, d), list('cdeabcdeabcdeabcdeab'))
# test with completely consumed input iterable
it = iter('abcde')
c = cycle(it)
_ = [next(c) for i in range(7)] # consume 7 of 5 inputs
p = pickle.dumps(c, proto)
d = pickle.loads(p) # rebuild the cycle object
self.assertEqual(take(20, d), list('cdeabcdeabcdeabcdeab'))
def test_cycle_setstate(self):
# Verify both modes for restoring state
# Mode 0 is efficient. It uses an incompletely consumed input
# iterator to build a cycle object and then passes in state with
# a list of previously consumed values. There is no data
# overlap between the two.
c = cycle('defg')
c.__setstate__((list('abc'), 0))
self.assertEqual(take(20, c), list('defgabcdefgabcdefgab'))
# Mode 1 is inefficient. It starts with a cycle object built
# from an iterator over the remaining elements in a partial
# cycle and then passes in state with all of the previously
# seen values (this overlaps values included in the iterator).
c = cycle('defg')
c.__setstate__((list('abcdefg'), 1))
self.assertEqual(take(20, c), list('defgabcdefgabcdefgab'))
# The first argument to setstate needs to be a tuple
with self.assertRaises(TypeError):
cycle('defg').__setstate__([list('abcdefg'), 0])
# The first argument in the setstate tuple must be a list
with self.assertRaises(TypeError):
c = cycle('defg')
c.__setstate__((tuple('defg'), 0))
take(20, c)
# The second argument in the setstate tuple must be an int
with self.assertRaises(TypeError):
cycle('defg').__setstate__((list('abcdefg'), 'x'))
self.assertRaises(TypeError, cycle('').__setstate__, ())
self.assertRaises(TypeError, cycle('').__setstate__, ([],))
def test_groupby(self):
# Check whether it accepts arguments correctly
self.assertEqual([], list(groupby([])))
self.assertEqual([], list(groupby([], key=id)))
self.assertRaises(TypeError, list, groupby('abc', []))
self.assertRaises(TypeError, groupby, None)
self.assertRaises(TypeError, groupby, 'abc', lambda x:x, 10)
# Check normal input
s = [(0, 10, 20), (0, 11,21), (0,12,21), (1,13,21), (1,14,22),
(2,15,22), (3,16,23), (3,17,23)]
dup = []
for k, g in groupby(s, lambda r:r[0]):
for elem in g:
self.assertEqual(k, elem[0])
dup.append(elem)
self.assertEqual(s, dup)
# Check normal pickled
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
dup = []
for k, g in pickle.loads(pickle.dumps(groupby(s, testR), proto)):
for elem in g:
self.assertEqual(k, elem[0])
dup.append(elem)
self.assertEqual(s, dup)
# Check nested case
dup = []
for k, g in groupby(s, testR):
for ik, ig in groupby(g, testR2):
for elem in ig:
self.assertEqual(k, elem[0])
self.assertEqual(ik, elem[2])
dup.append(elem)
self.assertEqual(s, dup)
# Check nested and pickled
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
dup = []
for k, g in pickle.loads(pickle.dumps(groupby(s, testR), proto)):
for ik, ig in pickle.loads(pickle.dumps(groupby(g, testR2), proto)):
for elem in ig:
self.assertEqual(k, elem[0])
self.assertEqual(ik, elem[2])
dup.append(elem)
self.assertEqual(s, dup)
# Check case where inner iterator is not used
keys = [k for k, g in groupby(s, testR)]
expectedkeys = set([r[0] for r in s])
self.assertEqual(set(keys), expectedkeys)
self.assertEqual(len(keys), len(expectedkeys))
# Check case where inner iterator is used after advancing the groupby
# iterator
s = list(zip('AABBBAAAA', range(9)))
it = groupby(s, testR)
_, g1 = next(it)
_, g2 = next(it)
_, g3 = next(it)
self.assertEqual(list(g1), [])
self.assertEqual(list(g2), [])
self.assertEqual(next(g3), ('A', 5))
list(it) # exhaust the groupby iterator
self.assertEqual(list(g3), [])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
it = groupby(s, testR)
_, g = next(it)
next(it)
next(it)
self.assertEqual(list(pickle.loads(pickle.dumps(g, proto))), [])
# Exercise pipes and filters style
s = 'abracadabra'
# sort s | uniq
r = [k for k, g in groupby(sorted(s))]
self.assertEqual(r, ['a', 'b', 'c', 'd', 'r'])
# sort s | uniq -d
r = [k for k, g in groupby(sorted(s)) if list(islice(g,1,2))]
self.assertEqual(r, ['a', 'b', 'r'])
# sort s | uniq -c
r = [(len(list(g)), k) for k, g in groupby(sorted(s))]
self.assertEqual(r, [(5, 'a'), (2, 'b'), (1, 'c'), (1, 'd'), (2, 'r')])
# sort s | uniq -c | sort -rn | head -3
r = sorted([(len(list(g)) , k) for k, g in groupby(sorted(s))], reverse=True)[:3]
self.assertEqual(r, [(5, 'a'), (2, 'r'), (2, 'b')])
# iter.__next__ failure
class ExpectedError(Exception):
pass
def delayed_raise(n=0):
for i in range(n):
yield 'yo'
raise ExpectedError
def gulp(iterable, keyp=None, func=list):
return [func(g) for k, g in groupby(iterable, keyp)]
# iter.__next__ failure on outer object
self.assertRaises(ExpectedError, gulp, delayed_raise(0))
# iter.__next__ failure on inner object
self.assertRaises(ExpectedError, gulp, delayed_raise(1))
# __eq__ failure
class DummyCmp:
def __eq__(self, dst):
raise ExpectedError
s = [DummyCmp(), DummyCmp(), None]
# __eq__ failure on outer object
self.assertRaises(ExpectedError, gulp, s, func=id)
# __eq__ failure on inner object
self.assertRaises(ExpectedError, gulp, s)
# keyfunc failure
def keyfunc(obj):
if keyfunc.skip > 0:
keyfunc.skip -= 1
return obj
else:
raise ExpectedError
# keyfunc failure on outer object
keyfunc.skip = 0
self.assertRaises(ExpectedError, gulp, [None], keyfunc)
keyfunc.skip = 1
self.assertRaises(ExpectedError, gulp, [None, None], keyfunc)
def test_filter(self):
self.assertEqual(list(filter(isEven, range(6))), [0,2,4])
self.assertEqual(list(filter(None, [0,1,0,2,0])), [1,2])
self.assertEqual(list(filter(bool, [0,1,0,2,0])), [1,2])
self.assertEqual(take(4, filter(isEven, count())), [0,2,4,6])
self.assertRaises(TypeError, filter)
self.assertRaises(TypeError, filter, lambda x:x)
self.assertRaises(TypeError, filter, lambda x:x, range(6), 7)
self.assertRaises(TypeError, filter, isEven, 3)
self.assertRaises(TypeError, next, filter(range(6), range(6)))
# check copy, deepcopy, pickle
ans = [0,2,4]
c = filter(isEven, range(6))
self.assertEqual(list(copy.copy(c)), ans)
c = filter(isEven, range(6))
self.assertEqual(list(copy.deepcopy(c)), ans)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
c = filter(isEven, range(6))
self.assertEqual(list(pickle.loads(pickle.dumps(c, proto))), ans)
next(c)
self.assertEqual(list(pickle.loads(pickle.dumps(c, proto))), ans[1:])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
c = filter(isEven, range(6))
self.pickletest(proto, c)
def test_filterfalse(self):
self.assertEqual(list(filterfalse(isEven, range(6))), [1,3,5])
self.assertEqual(list(filterfalse(None, [0,1,0,2,0])), [0,0,0])
self.assertEqual(list(filterfalse(bool, [0,1,0,2,0])), [0,0,0])
self.assertEqual(take(4, filterfalse(isEven, count())), [1,3,5,7])
self.assertRaises(TypeError, filterfalse)
self.assertRaises(TypeError, filterfalse, lambda x:x)
self.assertRaises(TypeError, filterfalse, lambda x:x, range(6), 7)
self.assertRaises(TypeError, filterfalse, isEven, 3)
self.assertRaises(TypeError, next, filterfalse(range(6), range(6)))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, filterfalse(isEven, range(6)))
def test_zip(self):
# XXX This is rather silly now that builtin zip() calls zip()...
ans = [(x,y) for x, y in zip('abc',count())]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(list(zip('abc', range(6))), lzip('abc', range(6)))
self.assertEqual(list(zip('abcdef', range(3))), lzip('abcdef', range(3)))
self.assertEqual(take(3,zip('abcdef', count())), lzip('abcdef', range(3)))
self.assertEqual(list(zip('abcdef')), lzip('abcdef'))
self.assertEqual(list(zip()), lzip())
self.assertRaises(TypeError, zip, 3)
self.assertRaises(TypeError, zip, range(3), 3)
self.assertEqual([tuple(list(pair)) for pair in zip('abc', 'def')],
lzip('abc', 'def'))
self.assertEqual([pair for pair in zip('abc', 'def')],
lzip('abc', 'def'))
@support.impl_detail("tuple reuse is specific to CPython")
def test_zip_tuple_reuse(self):
ids = list(map(id, zip('abc', 'def')))
self.assertEqual(min(ids), max(ids))
ids = list(map(id, list(zip('abc', 'def'))))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
# check copy, deepcopy, pickle
ans = [(x,y) for x, y in copy.copy(zip('abc',count()))]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
ans = [(x,y) for x, y in copy.deepcopy(zip('abc',count()))]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
ans = [(x,y) for x, y in pickle.loads(pickle.dumps(zip('abc',count()), proto))]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
testIntermediate = zip('abc',count())
next(testIntermediate)
ans = [(x,y) for x, y in pickle.loads(pickle.dumps(testIntermediate, proto))]
self.assertEqual(ans, [('b', 1), ('c', 2)])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, zip('abc', count()))
def test_ziplongest(self):
for args in [
['abc', range(6)],
[range(6), 'abc'],
[range(1000), range(2000,2100), range(3000,3050)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500), range(0)],
]:
target = [tuple([arg[i] if i < len(arg) else None for arg in args])
for i in range(max(map(len, args)))]
self.assertEqual(list(zip_longest(*args)), target)
self.assertEqual(list(zip_longest(*args, **{})), target)
target = [tuple((e is None and 'X' or e) for e in t) for t in target] # Replace None fills with 'X'
self.assertEqual(list(zip_longest(*args, **dict(fillvalue='X'))), target)
self.assertEqual(take(3,zip_longest('abcdef', count())), list(zip('abcdef', range(3)))) # take 3 from infinite input
self.assertEqual(list(zip_longest()), list(zip()))
self.assertEqual(list(zip_longest([])), list(zip([])))
self.assertEqual(list(zip_longest('abcdef')), list(zip('abcdef')))
self.assertEqual(list(zip_longest('abc', 'defg', **{})),
list(zip(list('abc')+[None], 'defg'))) # empty keyword dict
self.assertRaises(TypeError, zip_longest, 3)
self.assertRaises(TypeError, zip_longest, range(3), 3)
for stmt in [
"zip_longest('abc', fv=1)",
"zip_longest('abc', fillvalue=1, bogus_keyword=None)",
]:
try:
eval(stmt, globals(), locals())
except TypeError:
pass
else:
self.fail('Did not raise Type in: ' + stmt)
self.assertEqual([tuple(list(pair)) for pair in zip_longest('abc', 'def')],
list(zip('abc', 'def')))
self.assertEqual([pair for pair in zip_longest('abc', 'def')],
list(zip('abc', 'def')))
@support.impl_detail("tuple reuse is specific to CPython")
def test_zip_longest_tuple_reuse(self):
ids = list(map(id, zip_longest('abc', 'def')))
self.assertEqual(min(ids), max(ids))
ids = list(map(id, list(zip_longest('abc', 'def'))))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
def test_zip_longest_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, zip_longest("abc", "def"))
self.pickletest(proto, zip_longest("abc", "defgh"))
self.pickletest(proto, zip_longest("abc", "defgh", fillvalue=1))
self.pickletest(proto, zip_longest("", "defgh"))
def test_zip_longest_bad_iterable(self):
exception = TypeError()
class BadIterable:
def __iter__(self):
raise exception
with self.assertRaises(TypeError) as cm:
zip_longest(BadIterable())
self.assertIs(cm.exception, exception)
def test_bug_7244(self):
class Repeater:
# this class is similar to itertools.repeat
def __init__(self, o, t, e):
self.o = o
self.t = int(t)
self.e = e
def __iter__(self): # its iterator is itself
return self
def __next__(self):
if self.t > 0:
self.t -= 1
return self.o
else:
raise self.e
# Formerly this code in would fail in debug mode
# with Undetected Error and Stop Iteration
r1 = Repeater(1, 3, StopIteration)
r2 = Repeater(2, 4, StopIteration)
def run(r1, r2):
result = []
for i, j in zip_longest(r1, r2, fillvalue=0):
with support.captured_output('stdout'):
print((i, j))
result.append((i, j))
return result
self.assertEqual(run(r1, r2), [(1,2), (1,2), (1,2), (0,2)])
# Formerly, the RuntimeError would be lost
# and StopIteration would stop as expected
r1 = Repeater(1, 3, RuntimeError)
r2 = Repeater(2, 4, StopIteration)
it = zip_longest(r1, r2, fillvalue=0)
self.assertEqual(next(it), (1, 2))
self.assertEqual(next(it), (1, 2))
self.assertEqual(next(it), (1, 2))
self.assertRaises(RuntimeError, next, it)
def test_pairwise(self):
self.assertEqual(list(pairwise('')), [])
self.assertEqual(list(pairwise('a')), [])
self.assertEqual(list(pairwise('ab')),
[('a', 'b')]),
self.assertEqual(list(pairwise('abcde')),
[('a', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'e')])
self.assertEqual(list(pairwise(range(10_000))),
list(zip(range(10_000), range(1, 10_000))))
with self.assertRaises(TypeError):
pairwise() # too few arguments
with self.assertRaises(TypeError):
pairwise('abc', 10) # too many arguments
with self.assertRaises(TypeError):
pairwise(iterable='abc') # keyword arguments
with self.assertRaises(TypeError):
pairwise(None) # non-iterable argument
def test_product(self):
for args, result in [
([], [()]), # zero iterables
(['ab'], [('a',), ('b',)]), # one iterable
([range(2), range(3)], [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)]), # two iterables
([range(0), range(2), range(3)], []), # first iterable with zero length
([range(2), range(0), range(3)], []), # middle iterable with zero length
([range(2), range(3), range(0)], []), # last iterable with zero length
]:
self.assertEqual(list(product(*args)), result)
for r in range(4):
self.assertEqual(list(product(*(args*r))),
list(product(*args, **dict(repeat=r))))
self.assertEqual(len(list(product(*[range(7)]*6))), 7**6)
self.assertRaises(TypeError, product, range(6), None)
def product1(*args, **kwds):
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
n = len(pools)
if n == 0:
yield ()
return
if any(len(pool) == 0 for pool in pools):
return
indices = [0] * n
yield tuple(pool[i] for pool, i in zip(pools, indices))
while 1:
for i in reversed(range(n)): # right to left
if indices[i] == len(pools[i]) - 1:
continue
indices[i] += 1
for j in range(i+1, n):
indices[j] = 0
yield tuple(pool[i] for pool, i in zip(pools, indices))
break
else:
return
def product2(*args, **kwds):
'Pure python version used in docs'
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
argtypes = ['', 'abc', '', range(0), range(4), dict(a=1, b=2, c=3),
set('abcdefg'), range(11), tuple(range(13))]
for i in range(100):
args = [random.choice(argtypes) for j in range(random.randrange(5))]
expected_len = prod(map(len, args))
self.assertEqual(len(list(product(*args))), expected_len)
self.assertEqual(list(product(*args)), list(product1(*args)))
self.assertEqual(list(product(*args)), list(product2(*args)))
args = map(iter, args)
self.assertEqual(len(list(product(*args))), expected_len)
@support.bigaddrspacetest
def test_product_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
product(*(['ab']*2**5), repeat=2**25)
@support.impl_detail("tuple reuse is specific to CPython")
def test_product_tuple_reuse(self):
self.assertEqual(len(set(map(id, product('abc', 'def')))), 1)
self.assertNotEqual(len(set(map(id, list(product('abc', 'def'))))), 1)
def test_product_pickling(self):
# check copy, deepcopy, pickle
for args, result in [
([], [()]), # zero iterables
(['ab'], [('a',), ('b',)]), # one iterable
([range(2), range(3)], [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)]), # two iterables
([range(0), range(2), range(3)], []), # first iterable with zero length
([range(2), range(0), range(3)], []), # middle iterable with zero length
([range(2), range(3), range(0)], []), # last iterable with zero length
]:
self.assertEqual(list(copy.copy(product(*args))), result)
self.assertEqual(list(copy.deepcopy(product(*args))), result)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, product(*args))
def test_product_issue_25021(self):
# test that indices are properly clamped to the length of the tuples
p = product((1, 2),(3,))
p.__setstate__((0, 0x1000)) # will access tuple element 1 if not clamped
self.assertEqual(next(p), (2, 3))
# test that empty tuple in the list will result in an immediate StopIteration
p = product((1, 2), (), (3,))
p.__setstate__((0, 0, 0x1000)) # will access tuple element 1 if not clamped
self.assertRaises(StopIteration, next, p)
def test_repeat(self):
self.assertEqual(list(repeat(object='a', times=3)), ['a', 'a', 'a'])
self.assertEqual(lzip(range(3),repeat('a')),
[(0, 'a'), (1, 'a'), (2, 'a')])
self.assertEqual(list(repeat('a', 3)), ['a', 'a', 'a'])
self.assertEqual(take(3, repeat('a')), ['a', 'a', 'a'])
self.assertEqual(list(repeat('a', 0)), [])
self.assertEqual(list(repeat('a', -3)), [])
self.assertRaises(TypeError, repeat)
self.assertRaises(TypeError, repeat, None, 3, 4)
self.assertRaises(TypeError, repeat, None, 'a')
r = repeat(1+0j)
self.assertEqual(repr(r), 'repeat((1+0j))')
r = repeat(1+0j, 5)
self.assertEqual(repr(r), 'repeat((1+0j), 5)')
list(r)
self.assertEqual(repr(r), 'repeat((1+0j), 0)')
# check copy, deepcopy, pickle
c = repeat(object='a', times=10)
self.assertEqual(next(c), 'a')
self.assertEqual(take(2, copy.copy(c)), list('a' * 2))
self.assertEqual(take(2, copy.deepcopy(c)), list('a' * 2))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, repeat(object='a', times=10))
def test_repeat_with_negative_times(self):
self.assertEqual(repr(repeat('a', -1)), "repeat('a', 0)")
self.assertEqual(repr(repeat('a', -2)), "repeat('a', 0)")
self.assertEqual(repr(repeat('a', times=-1)), "repeat('a', 0)")
self.assertEqual(repr(repeat('a', times=-2)), "repeat('a', 0)")
def test_map(self):
self.assertEqual(list(map(operator.pow, range(3), range(1,7))),
[0**1, 1**2, 2**3])
self.assertEqual(list(map(tupleize, 'abc', range(5))),
[('a',0),('b',1),('c',2)])
self.assertEqual(list(map(tupleize, 'abc', count())),
[('a',0),('b',1),('c',2)])
self.assertEqual(take(2,map(tupleize, 'abc', count())),
[('a',0),('b',1)])
self.assertEqual(list(map(operator.pow, [])), [])
self.assertRaises(TypeError, map)
self.assertRaises(TypeError, list, map(None, range(3), range(3)))
self.assertRaises(TypeError, map, operator.neg)
self.assertRaises(TypeError, next, map(10, range(5)))
self.assertRaises(ValueError, next, map(errfunc, [4], [5]))
self.assertRaises(TypeError, next, map(onearg, [4], [5]))
# check copy, deepcopy, pickle
ans = [('a',0),('b',1),('c',2)]
c = map(tupleize, 'abc', count())
self.assertEqual(list(copy.copy(c)), ans)
c = map(tupleize, 'abc', count())
self.assertEqual(list(copy.deepcopy(c)), ans)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
c = map(tupleize, 'abc', count())
self.pickletest(proto, c)
def test_starmap(self):
self.assertEqual(list(starmap(operator.pow, zip(range(3), range(1,7)))),
[0**1, 1**2, 2**3])
self.assertEqual(take(3, starmap(operator.pow, zip(count(), count(1)))),
[0**1, 1**2, 2**3])
self.assertEqual(list(starmap(operator.pow, [])), [])
self.assertEqual(list(starmap(operator.pow, [iter([4,5])])), [4**5])
self.assertRaises(TypeError, list, starmap(operator.pow, [None]))
self.assertRaises(TypeError, starmap)
self.assertRaises(TypeError, starmap, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, starmap(10, [(4,5)]))
self.assertRaises(ValueError, next, starmap(errfunc, [(4,5)]))
self.assertRaises(TypeError, next, starmap(onearg, [(4,5)]))
# check copy, deepcopy, pickle
ans = [0**1, 1**2, 2**3]
c = starmap(operator.pow, zip(range(3), range(1,7)))
self.assertEqual(list(copy.copy(c)), ans)
c = starmap(operator.pow, zip(range(3), range(1,7)))
self.assertEqual(list(copy.deepcopy(c)), ans)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
c = starmap(operator.pow, zip(range(3), range(1,7)))
self.pickletest(proto, c)
def test_islice(self):
for args in [ # islice(args) should agree with range(args)
(10, 20, 3),
(10, 3, 20),
(10, 20),
(10, 10),
(10, 3),
(20,)
]:
self.assertEqual(list(islice(range(100), *args)),
list(range(*args)))
for args, tgtargs in [ # Stop when seqn is exhausted
((10, 110, 3), ((10, 100, 3))),
((10, 110), ((10, 100))),
((110,), (100,))
]:
self.assertEqual(list(islice(range(100), *args)),
list(range(*tgtargs)))
# Test stop=None
self.assertEqual(list(islice(range(10), None)), list(range(10)))
self.assertEqual(list(islice(range(10), None, None)), list(range(10)))
self.assertEqual(list(islice(range(10), None, None, None)), list(range(10)))
self.assertEqual(list(islice(range(10), 2, None)), list(range(2, 10)))
self.assertEqual(list(islice(range(10), 1, None, 2)), list(range(1, 10, 2)))
# Test number of items consumed SF #1171417
it = iter(range(10))
self.assertEqual(list(islice(it, 3)), list(range(3)))
self.assertEqual(list(it), list(range(3, 10)))
it = iter(range(10))
self.assertEqual(list(islice(it, 3, 3)), [])
self.assertEqual(list(it), list(range(3, 10)))
# Test invalid arguments
ra = range(10)
self.assertRaises(TypeError, islice, ra)
self.assertRaises(TypeError, islice, ra, 1, 2, 3, 4)
self.assertRaises(ValueError, islice, ra, -5, 10, 1)
self.assertRaises(ValueError, islice, ra, 1, -5, -1)
self.assertRaises(ValueError, islice, ra, 1, 10, -1)
self.assertRaises(ValueError, islice, ra, 1, 10, 0)
self.assertRaises(ValueError, islice, ra, 'a')
self.assertRaises(ValueError, islice, ra, 'a', 1)
self.assertRaises(ValueError, islice, ra, 1, 'a')
self.assertRaises(ValueError, islice, ra, 'a', 1, 1)
self.assertRaises(ValueError, islice, ra, 1, 'a', 1)
self.assertEqual(len(list(islice(count(), 1, 10, maxsize))), 1)
# Issue #10323: Less islice in a predictable state
c = count()
self.assertEqual(list(islice(c, 1, 3, 50)), [1])
self.assertEqual(next(c), 3)
# check copy, deepcopy, pickle
for args in [ # islice(args) should agree with range(args)
(10, 20, 3),
(10, 3, 20),
(10, 20),
(10, 3),
(20,)
]:
self.assertEqual(list(copy.copy(islice(range(100), *args))),
list(range(*args)))
self.assertEqual(list(copy.deepcopy(islice(range(100), *args))),
list(range(*args)))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, islice(range(100), *args))
# Issue #21321: check source iterator is not referenced
# from islice() after the latter has been exhausted
it = (x for x in (1, 2))
wr = weakref.ref(it)
it = islice(it, 1)
self.assertIsNotNone(wr())
list(it) # exhaust the iterator
support.gc_collect()
self.assertIsNone(wr())
# Issue #30537: islice can accept integer-like objects as
# arguments
class IntLike(object):
def __init__(self, val):
self.val = val
def __index__(self):
return self.val
self.assertEqual(list(islice(range(100), IntLike(10))), list(range(10)))
self.assertEqual(list(islice(range(100), IntLike(10), IntLike(50))),
list(range(10, 50)))
self.assertEqual(list(islice(range(100), IntLike(10), IntLike(50), IntLike(5))),
list(range(10,50,5)))
def test_takewhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
self.assertEqual(list(takewhile(underten, data)), [1, 3, 5])
self.assertEqual(list(takewhile(underten, [])), [])
self.assertRaises(TypeError, takewhile)
self.assertRaises(TypeError, takewhile, operator.pow)
self.assertRaises(TypeError, takewhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, takewhile(10, [(4,5)]))
self.assertRaises(ValueError, next, takewhile(errfunc, [(4,5)]))
t = takewhile(bool, [1, 1, 1, 0, 0, 0])
self.assertEqual(list(t), [1, 1, 1])
self.assertRaises(StopIteration, next, t)
# check copy, deepcopy, pickle
self.assertEqual(list(copy.copy(takewhile(underten, data))), [1, 3, 5])
self.assertEqual(list(copy.deepcopy(takewhile(underten, data))),
[1, 3, 5])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, takewhile(underten, data))
def test_dropwhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
self.assertEqual(list(dropwhile(underten, data)), [20, 2, 4, 6, 8])
self.assertEqual(list(dropwhile(underten, [])), [])
self.assertRaises(TypeError, dropwhile)
self.assertRaises(TypeError, dropwhile, operator.pow)
self.assertRaises(TypeError, dropwhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, dropwhile(10, [(4,5)]))
self.assertRaises(ValueError, next, dropwhile(errfunc, [(4,5)]))
# check copy, deepcopy, pickle
self.assertEqual(list(copy.copy(dropwhile(underten, data))), [20, 2, 4, 6, 8])
self.assertEqual(list(copy.deepcopy(dropwhile(underten, data))),
[20, 2, 4, 6, 8])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, dropwhile(underten, data))
def test_tee(self):
n = 200
a, b = tee([]) # test empty iterator
self.assertEqual(list(a), [])
self.assertEqual(list(b), [])
a, b = tee(irange(n)) # test 100% interleaved
self.assertEqual(lzip(a,b), lzip(range(n), range(n)))
a, b = tee(irange(n)) # test 0% interleaved
self.assertEqual(list(a), list(range(n)))
self.assertEqual(list(b), list(range(n)))
a, b = tee(irange(n)) # test dealloc of leading iterator
for i in range(100):
self.assertEqual(next(a), i)
del a
self.assertEqual(list(b), list(range(n)))
a, b = tee(irange(n)) # test dealloc of trailing iterator
for i in range(100):
self.assertEqual(next(a), i)
del b
self.assertEqual(list(a), list(range(100, n)))
for j in range(5): # test randomly interleaved
order = [0]*n + [1]*n
random.shuffle(order)
lists = ([], [])
its = tee(irange(n))
for i in order:
value = next(its[i])
lists[i].append(value)
self.assertEqual(lists[0], list(range(n)))
self.assertEqual(lists[1], list(range(n)))
# test argument format checking
self.assertRaises(TypeError, tee)
self.assertRaises(TypeError, tee, 3)
self.assertRaises(TypeError, tee, [1,2], 'x')
self.assertRaises(TypeError, tee, [1,2], 3, 'x')
# tee object should be instantiable
a, b = tee('abc')
c = type(a)('def')
self.assertEqual(list(c), list('def'))
# test long-lagged and multi-way split
a, b, c = tee(range(2000), 3)
for i in range(100):
self.assertEqual(next(a), i)
self.assertEqual(list(b), list(range(2000)))
self.assertEqual([next(c), next(c)], list(range(2)))
self.assertEqual(list(a), list(range(100,2000)))
self.assertEqual(list(c), list(range(2,2000)))
# test values of n
self.assertRaises(TypeError, tee, 'abc', 'invalid')
self.assertRaises(ValueError, tee, [], -1)
for n in range(5):
result = tee('abc', n)
self.assertEqual(type(result), tuple)
self.assertEqual(len(result), n)
self.assertEqual([list(x) for x in result], [list('abc')]*n)
# tee pass-through to copyable iterator
a, b = tee('abc')
c, d = tee(a)
self.assertTrue(a is c)
# test tee_new
t1, t2 = tee('abc')
tnew = type(t1)
self.assertRaises(TypeError, tnew)
self.assertRaises(TypeError, tnew, 10)
t3 = tnew(t1)
self.assertTrue(list(t1) == list(t2) == list(t3) == list('abc'))
# test that tee objects are weak referencable
a, b = tee(range(10))
p = weakref.proxy(a)
self.assertEqual(getattr(p, '__class__'), type(b))
del a
support.gc_collect() # For PyPy or other GCs.
self.assertRaises(ReferenceError, getattr, p, '__class__')
ans = list('abc')
long_ans = list(range(10000))
# check copy
a, b = tee('abc')
self.assertEqual(list(copy.copy(a)), ans)
self.assertEqual(list(copy.copy(b)), ans)
a, b = tee(list(range(10000)))
self.assertEqual(list(copy.copy(a)), long_ans)
self.assertEqual(list(copy.copy(b)), long_ans)
# check partially consumed copy
a, b = tee('abc')
take(2, a)
take(1, b)
self.assertEqual(list(copy.copy(a)), ans[2:])
self.assertEqual(list(copy.copy(b)), ans[1:])
self.assertEqual(list(a), ans[2:])
self.assertEqual(list(b), ans[1:])
a, b = tee(range(10000))
take(100, a)
take(60, b)
self.assertEqual(list(copy.copy(a)), long_ans[100:])
self.assertEqual(list(copy.copy(b)), long_ans[60:])
self.assertEqual(list(a), long_ans[100:])
self.assertEqual(list(b), long_ans[60:])
# check deepcopy
a, b = tee('abc')
self.assertEqual(list(copy.deepcopy(a)), ans)
self.assertEqual(list(copy.deepcopy(b)), ans)
self.assertEqual(list(a), ans)
self.assertEqual(list(b), ans)
a, b = tee(range(10000))
self.assertEqual(list(copy.deepcopy(a)), long_ans)
self.assertEqual(list(copy.deepcopy(b)), long_ans)
self.assertEqual(list(a), long_ans)
self.assertEqual(list(b), long_ans)
# check partially consumed deepcopy
a, b = tee('abc')
take(2, a)
take(1, b)
self.assertEqual(list(copy.deepcopy(a)), ans[2:])
self.assertEqual(list(copy.deepcopy(b)), ans[1:])
self.assertEqual(list(a), ans[2:])
self.assertEqual(list(b), ans[1:])
a, b = tee(range(10000))
take(100, a)
take(60, b)
self.assertEqual(list(copy.deepcopy(a)), long_ans[100:])
self.assertEqual(list(copy.deepcopy(b)), long_ans[60:])
self.assertEqual(list(a), long_ans[100:])
self.assertEqual(list(b), long_ans[60:])
# check pickle
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, iter(tee('abc')))
a, b = tee('abc')
self.pickletest(proto, a, compare=ans)
self.pickletest(proto, b, compare=ans)
# Issue 13454: Crash when deleting backward iterator from tee()
def test_tee_del_backward(self):
forward, backward = tee(repeat(None, 20000000))
try:
any(forward) # exhaust the iterator
del backward
except:
del forward, backward
raise
def test_tee_reenter(self):
class I:
first = True
def __iter__(self):
return self
def __next__(self):
first = self.first
self.first = False
if first:
return next(b)
a, b = tee(I())
with self.assertRaisesRegex(RuntimeError, "tee"):
next(a)
def test_tee_concurrent(self):
start = threading.Event()
finish = threading.Event()
class I:
def __iter__(self):
return self
def __next__(self):
start.set()
finish.wait()
a, b = tee(I())
thread = threading.Thread(target=next, args=[a])
thread.start()
try:
start.wait()
with self.assertRaisesRegex(RuntimeError, "tee"):
next(b)
finally:
finish.set()
thread.join()
def test_StopIteration(self):
self.assertRaises(StopIteration, next, zip())
for f in (chain, cycle, zip, groupby):
self.assertRaises(StopIteration, next, f([]))
self.assertRaises(StopIteration, next, f(StopNow()))
self.assertRaises(StopIteration, next, islice([], None))
self.assertRaises(StopIteration, next, islice(StopNow(), None))
p, q = tee([])
self.assertRaises(StopIteration, next, p)
self.assertRaises(StopIteration, next, q)
p, q = tee(StopNow())
self.assertRaises(StopIteration, next, p)
self.assertRaises(StopIteration, next, q)
self.assertRaises(StopIteration, next, repeat(None, 0))
for f in (filter, filterfalse, map, takewhile, dropwhile, starmap):
self.assertRaises(StopIteration, next, f(lambda x:x, []))
self.assertRaises(StopIteration, next, f(lambda x:x, StopNow()))
@support.cpython_only
def test_combinations_result_gc(self):
# bpo-42536: combinations's tuple-reuse speed trick breaks the GC's
# assumptions about what can be untracked. Make sure we re-track result
# tuples whenever we reuse them.
it = combinations([None, []], 1)
next(it)
gc.collect()
# That GC collection probably untracked the recycled internal result
# tuple, which has the value (None,). Make sure it's re-tracked when
# it's mutated and returned from __next__:
self.assertTrue(gc.is_tracked(next(it)))
@support.cpython_only
def test_combinations_with_replacement_result_gc(self):
# Ditto for combinations_with_replacement.
it = combinations_with_replacement([None, []], 1)
next(it)
gc.collect()
self.assertTrue(gc.is_tracked(next(it)))
@support.cpython_only
def test_permutations_result_gc(self):
# Ditto for permutations.
it = permutations([None, []], 1)
next(it)
gc.collect()
self.assertTrue(gc.is_tracked(next(it)))
@support.cpython_only
def test_product_result_gc(self):
# Ditto for product.
it = product([None, []])
next(it)
gc.collect()
self.assertTrue(gc.is_tracked(next(it)))
@support.cpython_only
def test_zip_longest_result_gc(self):
# Ditto for zip_longest.
it = zip_longest([[]])
gc.collect()
self.assertTrue(gc.is_tracked(next(it)))
class TestExamples(unittest.TestCase):
def test_accumulate(self):
self.assertEqual(list(accumulate([1,2,3,4,5])), [1, 3, 6, 10, 15])
def test_accumulate_reducible(self):
# check copy, deepcopy, pickle
data = [1, 2, 3, 4, 5]
accumulated = [1, 3, 6, 10, 15]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
it = accumulate(data)
self.assertEqual(list(pickle.loads(pickle.dumps(it, proto))), accumulated[:])
self.assertEqual(next(it), 1)
self.assertEqual(list(pickle.loads(pickle.dumps(it, proto))), accumulated[1:])
it = accumulate(data)
self.assertEqual(next(it), 1)
self.assertEqual(list(copy.deepcopy(it)), accumulated[1:])
self.assertEqual(list(copy.copy(it)), accumulated[1:])
def test_accumulate_reducible_none(self):
# Issue #25718: total is None
it = accumulate([None, None, None], operator.is_)
self.assertEqual(next(it), None)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
it_copy = pickle.loads(pickle.dumps(it, proto))
self.assertEqual(list(it_copy), [True, False])
self.assertEqual(list(copy.deepcopy(it)), [True, False])
self.assertEqual(list(copy.copy(it)), [True, False])
def test_chain(self):
self.assertEqual(''.join(chain('ABC', 'DEF')), 'ABCDEF')
def test_chain_from_iterable(self):
self.assertEqual(''.join(chain.from_iterable(['ABC', 'DEF'])), 'ABCDEF')
def test_combinations(self):
self.assertEqual(list(combinations('ABCD', 2)),
[('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
self.assertEqual(list(combinations(range(4), 3)),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
def test_combinations_with_replacement(self):
self.assertEqual(list(combinations_with_replacement('ABC', 2)),
[('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
def test_compress(self):
self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF'))
def test_count(self):
self.assertEqual(list(islice(count(10), 5)), [10, 11, 12, 13, 14])
def test_cycle(self):
self.assertEqual(list(islice(cycle('ABCD'), 12)), list('ABCDABCDABCD'))
def test_dropwhile(self):
self.assertEqual(list(dropwhile(lambda x: x<5, [1,4,6,4,1])), [6,4,1])
def test_groupby(self):
self.assertEqual([k for k, g in groupby('AAAABBBCCDAABBB')],
list('ABCDAB'))
self.assertEqual([(list(g)) for k, g in groupby('AAAABBBCCD')],
[list('AAAA'), list('BBB'), list('CC'), list('D')])
def test_filter(self):
self.assertEqual(list(filter(lambda x: x%2, range(10))), [1,3,5,7,9])
def test_filterfalse(self):
self.assertEqual(list(filterfalse(lambda x: x%2, range(10))), [0,2,4,6,8])
def test_map(self):
self.assertEqual(list(map(pow, (2,3,10), (5,2,3))), [32, 9, 1000])
def test_islice(self):
self.assertEqual(list(islice('ABCDEFG', 2)), list('AB'))
self.assertEqual(list(islice('ABCDEFG', 2, 4)), list('CD'))
self.assertEqual(list(islice('ABCDEFG', 2, None)), list('CDEFG'))
self.assertEqual(list(islice('ABCDEFG', 0, None, 2)), list('ACEG'))
def test_zip(self):
self.assertEqual(list(zip('ABCD', 'xy')), [('A', 'x'), ('B', 'y')])
def test_zip_longest(self):
self.assertEqual(list(zip_longest('ABCD', 'xy', fillvalue='-')),
[('A', 'x'), ('B', 'y'), ('C', '-'), ('D', '-')])
def test_permutations(self):
self.assertEqual(list(permutations('ABCD', 2)),
list(map(tuple, 'AB AC AD BA BC BD CA CB CD DA DB DC'.split())))
self.assertEqual(list(permutations(range(3))),
[(0,1,2), (0,2,1), (1,0,2), (1,2,0), (2,0,1), (2,1,0)])
def test_product(self):
self.assertEqual(list(product('ABCD', 'xy')),
list(map(tuple, 'Ax Ay Bx By Cx Cy Dx Dy'.split())))
self.assertEqual(list(product(range(2), repeat=3)),
[(0,0,0), (0,0,1), (0,1,0), (0,1,1),
(1,0,0), (1,0,1), (1,1,0), (1,1,1)])
def test_repeat(self):
self.assertEqual(list(repeat(10, 3)), [10, 10, 10])
def test_stapmap(self):
self.assertEqual(list(starmap(pow, [(2,5), (3,2), (10,3)])),
[32, 9, 1000])
def test_takewhile(self):
self.assertEqual(list(takewhile(lambda x: x<5, [1,4,6,4,1])), [1,4])
class TestPurePythonRoughEquivalents(unittest.TestCase):
@staticmethod
def islice(iterable, *args):
s = slice(*args)
start, stop, step = s.start or 0, s.stop or sys.maxsize, s.step or 1
it = iter(range(start, stop, step))
try:
nexti = next(it)
except StopIteration:
# Consume *iterable* up to the *start* position.
for i, element in zip(range(start), iterable):
pass
return
try:
for i, element in enumerate(iterable):
if i == nexti:
yield element
nexti = next(it)
except StopIteration:
# Consume to *stop*.
for i, element in zip(range(i + 1, stop), iterable):
pass
def test_islice_recipe(self):
self.assertEqual(list(self.islice('ABCDEFG', 2)), list('AB'))
self.assertEqual(list(self.islice('ABCDEFG', 2, 4)), list('CD'))
self.assertEqual(list(self.islice('ABCDEFG', 2, None)), list('CDEFG'))
self.assertEqual(list(self.islice('ABCDEFG', 0, None, 2)), list('ACEG'))
# Test items consumed.
it = iter(range(10))
self.assertEqual(list(self.islice(it, 3)), list(range(3)))
self.assertEqual(list(it), list(range(3, 10)))
it = iter(range(10))
self.assertEqual(list(self.islice(it, 3, 3)), [])
self.assertEqual(list(it), list(range(3, 10)))
# Test that slice finishes in predictable state.
c = count()
self.assertEqual(list(self.islice(c, 1, 3, 50)), [1])
self.assertEqual(next(c), 3)
class TestGC(unittest.TestCase):
def makecycle(self, iterator, container):
container.append(iterator)
next(iterator)
del container, iterator
def test_accumulate(self):
a = []
self.makecycle(accumulate([1,2,a,3]), a)
def test_chain(self):
a = []
self.makecycle(chain(a), a)
def test_chain_from_iterable(self):
a = []
self.makecycle(chain.from_iterable([a]), a)
def test_combinations(self):
a = []
self.makecycle(combinations([1,2,a,3], 3), a)
def test_combinations_with_replacement(self):
a = []
self.makecycle(combinations_with_replacement([1,2,a,3], 3), a)
def test_compress(self):
a = []
self.makecycle(compress('ABCDEF', [1,0,1,0,1,0]), a)
def test_count(self):
a = []
Int = type('Int', (int,), dict(x=a))
self.makecycle(count(Int(0), Int(1)), a)
def test_cycle(self):
a = []
self.makecycle(cycle([a]*2), a)
def test_dropwhile(self):
a = []
self.makecycle(dropwhile(bool, [0, a, a]), a)
def test_groupby(self):
a = []
self.makecycle(groupby([a]*2, lambda x:x), a)
def test_issue2246(self):
# Issue 2246 -- the _grouper iterator was not included in GC
n = 10
keyfunc = lambda x: x
for i, j in groupby(range(n), key=keyfunc):
keyfunc.__dict__.setdefault('x',[]).append(j)
def test_filter(self):
a = []
self.makecycle(filter(lambda x:True, [a]*2), a)
def test_filterfalse(self):
a = []
self.makecycle(filterfalse(lambda x:False, a), a)
def test_zip(self):
a = []
self.makecycle(zip([a]*2, [a]*3), a)
def test_zip_longest(self):
a = []
self.makecycle(zip_longest([a]*2, [a]*3), a)
b = [a, None]
self.makecycle(zip_longest([a]*2, [a]*3, fillvalue=b), a)
def test_map(self):
a = []
self.makecycle(map(lambda x:x, [a]*2), a)
def test_islice(self):
a = []
self.makecycle(islice([a]*2, None), a)
def test_pairwise(self):
a = []
self.makecycle(pairwise([a]*5), a)
def test_permutations(self):
a = []
self.makecycle(permutations([1,2,a,3], 3), a)
def test_product(self):
a = []
self.makecycle(product([1,2,a,3], repeat=3), a)
def test_repeat(self):
a = []
self.makecycle(repeat(a), a)
def test_starmap(self):
a = []
self.makecycle(starmap(lambda *t: t, [(a,a)]*2), a)
def test_takewhile(self):
a = []
self.makecycle(takewhile(bool, [1, 0, a, a]), a)
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class N:
'Iterator missing __next__()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
3 // 0
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def __next__(self):
raise StopIteration
def L(seqn):
'Test multiple tiers of iterators'
return chain(map(lambda x:x, R(Ig(G(seqn)))))
class TestVariousIteratorArgs(unittest.TestCase):
def test_accumulate(self):
s = [1,2,3,4,5]
r = [1,3,6,10,15]
n = len(s)
for g in (G, I, Ig, L, R):
self.assertEqual(list(accumulate(g(s))), r)
self.assertEqual(list(accumulate(S(s))), [])
self.assertRaises(TypeError, accumulate, X(s))
self.assertRaises(TypeError, accumulate, N(s))
self.assertRaises(ZeroDivisionError, list, accumulate(E(s)))
def test_chain(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(chain(g(s))), list(g(s)))
self.assertEqual(list(chain(g(s), g(s))), list(g(s))+list(g(s)))
self.assertRaises(TypeError, list, chain(X(s)))
self.assertRaises(TypeError, list, chain(N(s)))
self.assertRaises(ZeroDivisionError, list, chain(E(s)))
def test_compress(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
n = len(s)
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(compress(g(s), repeat(1))), list(g(s)))
self.assertRaises(TypeError, compress, X(s), repeat(1))
self.assertRaises(TypeError, compress, N(s), repeat(1))
self.assertRaises(ZeroDivisionError, list, compress(E(s), repeat(1)))
def test_product(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
self.assertRaises(TypeError, product, X(s))
self.assertRaises(TypeError, product, N(s))
self.assertRaises(ZeroDivisionError, product, E(s))
def test_cycle(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgtlen = len(s) * 3
expected = list(g(s))*3
actual = list(islice(cycle(g(s)), tgtlen))
self.assertEqual(actual, expected)
self.assertRaises(TypeError, cycle, X(s))
self.assertRaises(TypeError, cycle, N(s))
self.assertRaises(ZeroDivisionError, list, cycle(E(s)))
def test_groupby(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual([k for k, sb in groupby(g(s))], list(g(s)))
self.assertRaises(TypeError, groupby, X(s))
self.assertRaises(TypeError, groupby, N(s))
self.assertRaises(ZeroDivisionError, list, groupby(E(s)))
def test_filter(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(filter(isEven, g(s))),
[x for x in g(s) if isEven(x)])
self.assertRaises(TypeError, filter, isEven, X(s))
self.assertRaises(TypeError, filter, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, filter(isEven, E(s)))
def test_filterfalse(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(filterfalse(isEven, g(s))),
[x for x in g(s) if isOdd(x)])
self.assertRaises(TypeError, filterfalse, isEven, X(s))
self.assertRaises(TypeError, filterfalse, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, filterfalse(isEven, E(s)))
def test_zip(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(zip(g(s))), lzip(g(s)))
self.assertEqual(list(zip(g(s), g(s))), lzip(g(s), g(s)))
self.assertRaises(TypeError, zip, X(s))
self.assertRaises(TypeError, zip, N(s))
self.assertRaises(ZeroDivisionError, list, zip(E(s)))
def test_ziplongest(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(zip_longest(g(s))), list(zip(g(s))))
self.assertEqual(list(zip_longest(g(s), g(s))), list(zip(g(s), g(s))))
self.assertRaises(TypeError, zip_longest, X(s))
self.assertRaises(TypeError, zip_longest, N(s))
self.assertRaises(ZeroDivisionError, list, zip_longest(E(s)))
def test_map(self):
for s in (range(10), range(0), range(100), (7,11), range(20,50,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(map(onearg, g(s))),
[onearg(x) for x in g(s)])
self.assertEqual(list(map(operator.pow, g(s), g(s))),
[x**x for x in g(s)])
self.assertRaises(TypeError, map, onearg, X(s))
self.assertRaises(TypeError, map, onearg, N(s))
self.assertRaises(ZeroDivisionError, list, map(onearg, E(s)))
def test_islice(self):
for s in ("12345", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(islice(g(s),1,None,2)), list(g(s))[1::2])
self.assertRaises(TypeError, islice, X(s), 10)
self.assertRaises(TypeError, islice, N(s), 10)
self.assertRaises(ZeroDivisionError, list, islice(E(s), 10))
def test_pairwise(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
seq = list(g(s))
expected = list(zip(seq, seq[1:]))
actual = list(pairwise(g(s)))
self.assertEqual(actual, expected)
self.assertRaises(TypeError, pairwise, X(s))
self.assertRaises(TypeError, pairwise, N(s))
self.assertRaises(ZeroDivisionError, list, pairwise(E(s)))
def test_starmap(self):
for s in (range(10), range(0), range(100), (7,11), range(20,50,5)):
for g in (G, I, Ig, S, L, R):
ss = lzip(s, s)
self.assertEqual(list(starmap(operator.pow, g(ss))),
[x**x for x in g(s)])
self.assertRaises(TypeError, starmap, operator.pow, X(ss))
self.assertRaises(TypeError, starmap, operator.pow, N(ss))
self.assertRaises(ZeroDivisionError, list, starmap(operator.pow, E(ss)))
def test_takewhile(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not isEven(elem): break
tgt.append(elem)
self.assertEqual(list(takewhile(isEven, g(s))), tgt)
self.assertRaises(TypeError, takewhile, isEven, X(s))
self.assertRaises(TypeError, takewhile, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, takewhile(isEven, E(s)))
def test_dropwhile(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not tgt and isOdd(elem): continue
tgt.append(elem)
self.assertEqual(list(dropwhile(isOdd, g(s))), tgt)
self.assertRaises(TypeError, dropwhile, isOdd, X(s))
self.assertRaises(TypeError, dropwhile, isOdd, N(s))
self.assertRaises(ZeroDivisionError, list, dropwhile(isOdd, E(s)))
def test_tee(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
it1, it2 = tee(g(s))
self.assertEqual(list(it1), list(g(s)))
self.assertEqual(list(it2), list(g(s)))
self.assertRaises(TypeError, tee, X(s))
self.assertRaises(TypeError, tee, N(s))
self.assertRaises(ZeroDivisionError, list, tee(E(s))[0])
class LengthTransparency(unittest.TestCase):
def test_repeat(self):
self.assertEqual(operator.length_hint(repeat(None, 50)), 50)
self.assertEqual(operator.length_hint(repeat(None, 0)), 0)
self.assertEqual(operator.length_hint(repeat(None), 12), 12)
def test_repeat_with_negative_times(self):
self.assertEqual(operator.length_hint(repeat(None, -1)), 0)
self.assertEqual(operator.length_hint(repeat(None, -2)), 0)
self.assertEqual(operator.length_hint(repeat(None, times=-1)), 0)
self.assertEqual(operator.length_hint(repeat(None, times=-2)), 0)
class RegressionTests(unittest.TestCase):
def test_sf_793826(self):
# Fix Armin Rigo's successful efforts to wreak havoc
def mutatingtuple(tuple1, f, tuple2):
# this builds a tuple t which is a copy of tuple1,
# then calls f(t), then mutates t to be equal to tuple2
# (needs len(tuple1) == len(tuple2)).
def g(value, first=[1]):
if first:
del first[:]
f(next(z))
return value
items = list(tuple2)
items[1:1] = list(tuple1)
gen = map(g, items)
z = zip(*[gen]*len(tuple1))
next(z)
def f(t):
global T
T = t
first[:] = list(T)
first = []
mutatingtuple((1,2,3), f, (4,5,6))
second = list(T)
self.assertEqual(first, second)
def test_sf_950057(self):
# Make sure that chain() and cycle() catch exceptions immediately
# rather than when shifting between input sources
def gen1():
hist.append(0)
yield 1
hist.append(1)
raise AssertionError
hist.append(2)
def gen2(x):
hist.append(3)
yield 2
hist.append(4)
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(False)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(True)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, cycle(gen1()))
self.assertEqual(hist, [0,1])
@support.skip_if_pgo_task
def test_long_chain_of_empty_iterables(self):
# Make sure itertools.chain doesn't run into recursion limits when
# dealing with long chains of empty iterables. Even with a high
# number this would probably only fail in Py_DEBUG mode.
it = chain.from_iterable(() for unused in range(10000000))
with self.assertRaises(StopIteration):
next(it)
def test_issue30347_1(self):
def f(n):
if n == 5:
list(b)
return n != 6
for (k, b) in groupby(range(10), f):
list(b) # shouldn't crash
def test_issue30347_2(self):
class K:
def __init__(self, v):
pass
def __eq__(self, other):
nonlocal i
i += 1
if i == 1:
next(g, None)
return True
i = 0
g = next(groupby(range(10), K))[1]
for j in range(2):
next(g, None) # shouldn't crash
class SubclassWithKwargsTest(unittest.TestCase):
def test_keywords_in_subclass(self):
# count is not subclassable...
for cls in (repeat, zip, filter, filterfalse, chain, map,
starmap, islice, takewhile, dropwhile, cycle, compress):
class Subclass(cls):
def __init__(self, newarg=None, *args):
cls.__init__(self, *args)
try:
Subclass(newarg=1)
except TypeError as err:
# we expect type errors because of wrong argument count
self.assertNotIn("keyword arguments", err.args[0])
@support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.ssize_t = struct.calcsize('n')
check_sizeof = support.check_sizeof
def test_product_sizeof(self):
basesize = support.calcobjsize('3Pi')
check = self.check_sizeof
check(product('ab', '12'), basesize + 2 * self.ssize_t)
check(product(*(('abc',) * 10)), basesize + 10 * self.ssize_t)
def test_combinations_sizeof(self):
basesize = support.calcobjsize('3Pni')
check = self.check_sizeof
check(combinations('abcd', 3), basesize + 3 * self.ssize_t)
check(combinations(range(10), 4), basesize + 4 * self.ssize_t)
def test_combinations_with_replacement_sizeof(self):
cwr = combinations_with_replacement
basesize = support.calcobjsize('3Pni')
check = self.check_sizeof
check(cwr('abcd', 3), basesize + 3 * self.ssize_t)
check(cwr(range(10), 4), basesize + 4 * self.ssize_t)
def test_permutations_sizeof(self):
basesize = support.calcobjsize('4Pni')
check = self.check_sizeof
check(permutations('abcd'),
basesize + 4 * self.ssize_t + 4 * self.ssize_t)
check(permutations('abcd', 3),
basesize + 4 * self.ssize_t + 3 * self.ssize_t)
check(permutations('abcde', 3),
basesize + 5 * self.ssize_t + 3 * self.ssize_t)
check(permutations(range(10), 4),
basesize + 10 * self.ssize_t + 4 * self.ssize_t)
libreftest = """ Doctest for examples in the library reference: libitertools.tex
>>> amounts = [120.15, 764.05, 823.14]
>>> for checknum, amount in zip(count(1200), amounts):
... print('Check %d is for $%.2f' % (checknum, amount))
...
Check 1200 is for $120.15
Check 1201 is for $764.05
Check 1202 is for $823.14
>>> import operator
>>> for cube in map(operator.pow, range(1,4), repeat(3)):
... print(cube)
...
1
8
27
>>> reportlines = ['EuroPython', 'Roster', '', 'alex', '', 'laura', '', 'martin', '', 'walter', '', 'samuele']
>>> for name in islice(reportlines, 3, None, 2):
... print(name.title())
...
Alex
Laura
Martin
Walter
Samuele
>>> from operator import itemgetter
>>> d = dict(a=1, b=2, c=1, d=2, e=1, f=2, g=3)
>>> di = sorted(sorted(d.items()), key=itemgetter(1))
>>> for k, g in groupby(di, itemgetter(1)):
... print(k, list(map(itemgetter(0), g)))
...
1 ['a', 'c', 'e']
2 ['b', 'd', 'f']
3 ['g']
# Find runs of consecutive numbers using groupby. The key to the solution
# is differencing with a range so that consecutive numbers all appear in
# same group.
>>> data = [ 1, 4,5,6, 10, 15,16,17,18, 22, 25,26,27,28]
>>> for k, g in groupby(enumerate(data), lambda t:t[0]-t[1]):
... print(list(map(operator.itemgetter(1), g)))
...
[1]
[4, 5, 6]
[10]
[15, 16, 17, 18]
[22]
[25, 26, 27, 28]
>>> def take(n, iterable):
... "Return first n items of the iterable as a list"
... return list(islice(iterable, n))
>>> def prepend(value, iterator):
... "Prepend a single value in front of an iterator"
... # prepend(1, [2, 3, 4]) -> 1 2 3 4
... return chain([value], iterator)
>>> def enumerate(iterable, start=0):
... return zip(count(start), iterable)
>>> def tabulate(function, start=0):
... "Return function(0), function(1), ..."
... return map(function, count(start))
>>> import collections
>>> def consume(iterator, n=None):
... "Advance the iterator n-steps ahead. If n is None, consume entirely."
... # Use functions that consume iterators at C speed.
... if n is None:
... # feed the entire iterator into a zero-length deque
... collections.deque(iterator, maxlen=0)
... else:
... # advance to the empty slice starting at position n
... next(islice(iterator, n, n), None)
>>> def nth(iterable, n, default=None):
... "Returns the nth item or a default value"
... return next(islice(iterable, n, None), default)
>>> def all_equal(iterable):
... "Returns True if all the elements are equal to each other"
... g = groupby(iterable)
... return next(g, True) and not next(g, False)
>>> def quantify(iterable, pred=bool):
... "Count how many times the predicate is true"
... return sum(map(pred, iterable))
>>> def pad_none(iterable):
... "Returns the sequence elements and then returns None indefinitely"
... return chain(iterable, repeat(None))
>>> def ncycles(iterable, n):
... "Returns the sequence elements n times"
... return chain(*repeat(iterable, n))
>>> def dotproduct(vec1, vec2):
... return sum(map(operator.mul, vec1, vec2))
>>> def flatten(listOfLists):
... return list(chain.from_iterable(listOfLists))
>>> def repeatfunc(func, times=None, *args):
... "Repeat calls to func with specified arguments."
... " Example: repeatfunc(random.random)"
... if times is None:
... return starmap(func, repeat(args))
... else:
... return starmap(func, repeat(args, times))
>>> def triplewise(iterable):
... "Return overlapping triplets from an iterable"
... # pairwise('ABCDEFG') -> ABC BCD CDE DEF EFG
... for (a, _), (b, c) in pairwise(pairwise(iterable)):
... yield a, b, c
>>> import collections
>>> def sliding_window(iterable, n):
... # sliding_window('ABCDEFG', 4) -> ABCD BCDE CDEF DEFG
... it = iter(iterable)
... window = collections.deque(islice(it, n), maxlen=n)
... if len(window) == n:
... yield tuple(window)
... for x in it:
... window.append(x)
... yield tuple(window)
>>> def grouper(n, iterable, fillvalue=None):
... "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
... args = [iter(iterable)] * n
... return zip_longest(*args, fillvalue=fillvalue)
>>> def roundrobin(*iterables):
... "roundrobin('ABC', 'D', 'EF') --> A D E B F C"
... # Recipe credited to George Sakkis
... pending = len(iterables)
... nexts = cycle(iter(it).__next__ for it in iterables)
... while pending:
... try:
... for next in nexts:
... yield next()
... except StopIteration:
... pending -= 1
... nexts = cycle(islice(nexts, pending))
>>> def partition(pred, iterable):
... "Use a predicate to partition entries into false entries and true entries"
... # partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9
... t1, t2 = tee(iterable)
... return filterfalse(pred, t1), filter(pred, t2)
>>> def before_and_after(predicate, it):
... ''' Variant of takewhile() that allows complete
... access to the remainder of the iterator.
...
... >>> all_upper, remainder = before_and_after(str.isupper, 'ABCdEfGhI')
... >>> str.join('', all_upper)
... 'ABC'
... >>> str.join('', remainder)
... 'dEfGhI'
...
... Note that the first iterator must be fully
... consumed before the second iterator can
... generate valid results.
... '''
... it = iter(it)
... transition = []
... def true_iterator():
... for elem in it:
... if predicate(elem):
... yield elem
... else:
... transition.append(elem)
... return
... def remainder_iterator():
... yield from transition
... yield from it
... return true_iterator(), remainder_iterator()
>>> def powerset(iterable):
... "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
... s = list(iterable)
... return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
>>> def unique_everseen(iterable, key=None):
... "List unique elements, preserving order. Remember all elements ever seen."
... # unique_everseen('AAAABBBCCDAABBB') --> A B C D
... # unique_everseen('ABBCcAD', str.lower) --> A B C D
... seen = set()
... seen_add = seen.add
... if key is None:
... for element in iterable:
... if element not in seen:
... seen_add(element)
... yield element
... else:
... for element in iterable:
... k = key(element)
... if k not in seen:
... seen_add(k)
... yield element
>>> def unique_justseen(iterable, key=None):
... "List unique elements, preserving order. Remember only the element just seen."
... # unique_justseen('AAAABBBCCDAABBB') --> A B C D A B
... # unique_justseen('ABBCcAD', str.lower) --> A B C A D
... return map(next, map(itemgetter(1), groupby(iterable, key)))
>>> def first_true(iterable, default=False, pred=None):
... '''Returns the first true value in the iterable.
...
... If no true value is found, returns *default*
...
... If *pred* is not None, returns the first item
... for which pred(item) is true.
...
... '''
... # first_true([a,b,c], x) --> a or b or c or x
... # first_true([a,b], x, f) --> a if f(a) else b if f(b) else x
... return next(filter(pred, iterable), default)
>>> def nth_combination(iterable, r, index):
... 'Equivalent to list(combinations(iterable, r))[index]'
... pool = tuple(iterable)
... n = len(pool)
... if r < 0 or r > n:
... raise ValueError
... c = 1
... k = min(r, n-r)
... for i in range(1, k+1):
... c = c * (n - k + i) // i
... if index < 0:
... index += c
... if index < 0 or index >= c:
... raise IndexError
... result = []
... while r:
... c, n, r = c*r//n, n-1, r-1
... while index >= c:
... index -= c
... c, n = c*(n-r)//n, n-1
... result.append(pool[-1-n])
... return tuple(result)
This is not part of the examples but it tests to make sure the definitions
perform as purported.
>>> take(10, count())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> list(prepend(1, [2, 3, 4]))
[1, 2, 3, 4]
>>> list(enumerate('abc'))
[(0, 'a'), (1, 'b'), (2, 'c')]
>>> list(islice(tabulate(lambda x: 2*x), 4))
[0, 2, 4, 6]
>>> it = iter(range(10))
>>> consume(it, 3)
>>> next(it)
3
>>> consume(it)
>>> next(it, 'Done')
'Done'
>>> nth('abcde', 3)
'd'
>>> nth('abcde', 9) is None
True
>>> [all_equal(s) for s in ('', 'A', 'AAAA', 'AAAB', 'AAABA')]
[True, True, True, False, False]
>>> quantify(range(99), lambda x: x%2==0)
50
>>> a = [[1, 2, 3], [4, 5, 6]]
>>> flatten(a)
[1, 2, 3, 4, 5, 6]
>>> list(repeatfunc(pow, 5, 2, 3))
[8, 8, 8, 8, 8]
>>> import random
>>> take(5, map(int, repeatfunc(random.random)))
[0, 0, 0, 0, 0]
>>> list(islice(pad_none('abc'), 0, 6))
['a', 'b', 'c', None, None, None]
>>> list(ncycles('abc', 3))
['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c']
>>> dotproduct([1,2,3], [4,5,6])
32
>>> list(grouper(3, 'abcdefg', 'x'))
[('a', 'b', 'c'), ('d', 'e', 'f'), ('g', 'x', 'x')]
>>> list(triplewise('ABCDEFG'))
[('A', 'B', 'C'), ('B', 'C', 'D'), ('C', 'D', 'E'), ('D', 'E', 'F'), ('E', 'F', 'G')]
>>> list(sliding_window('ABCDEFG', 4))
[('A', 'B', 'C', 'D'), ('B', 'C', 'D', 'E'), ('C', 'D', 'E', 'F'), ('D', 'E', 'F', 'G')]
>>> list(roundrobin('abc', 'd', 'ef'))
['a', 'd', 'e', 'b', 'f', 'c']
>>> def is_odd(x):
... return x % 2 == 1
>>> evens, odds = partition(is_odd, range(10))
>>> list(evens)
[0, 2, 4, 6, 8]
>>> list(odds)
[1, 3, 5, 7, 9]
>>> it = iter('ABCdEfGhI')
>>> all_upper, remainder = before_and_after(str.isupper, it)
>>> ''.join(all_upper)
'ABC'
>>> ''.join(remainder)
'dEfGhI'
>>> list(powerset([1,2,3]))
[(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
>>> all(len(list(powerset(range(n)))) == 2**n for n in range(18))
True
>>> list(powerset('abcde')) == sorted(sorted(set(powerset('abcde'))), key=len)
True
>>> list(unique_everseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D']
>>> list(unique_everseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'D']
>>> list(unique_justseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D', 'A', 'B']
>>> list(unique_justseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'A', 'D']
>>> first_true('ABC0DEF1', '9', str.isdigit)
'0'
>>> population = 'ABCDEFGH'
>>> for r in range(len(population) + 1):
... seq = list(combinations(population, r))
... for i in range(len(seq)):
... assert nth_combination(population, r, i) == seq[i]
... for i in range(-len(seq), 0):
... assert nth_combination(population, r, i) == seq[i]
"""
__test__ = {'libreftest' : libreftest}
def test_main(verbose=None):
test_classes = (TestBasicOps, TestVariousIteratorArgs, TestGC,
RegressionTests, LengthTransparency,
SubclassWithKwargsTest, TestExamples,
TestPurePythonRoughEquivalents,
SizeofTest)
support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
# doctest the examples in the library reference
support.run_doctest(sys.modules[__name__], verbose)
if __name__ == "__main__":
test_main(verbose=True)
|
pyminer.py | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 7555
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
pingbot.py | import os
import logging
import settings
import time
import multiprocessing
from pingbot_lib import Operations as op
from pingbot_lib import Prometheus as prom
#Instantiate prom
pr = prom()
o = op()
def pinghost(input_list):
for host in input_list:
success = o.ping(host['ip'])
if success:
print('abbadoo %s'%host['ip'])
pr.current_ping({'hostname':host['hostname'],'ip':host['ip'],'status':True})
else:
print('boob %s'%host['ip'])
pr.current_ping({'hostname':host['hostname'],'ip':host['ip'],'status':False})
def main():
pr.start_server()
#make sure the git repo has not already been cloned
if not os.path.exists(settings.CONFIG['GITROOT']):
logging.warn("No gitroot directory present")
#get the directory
if settings.CONFIG['CLONEREPO']:
try:
logging.info('Cloneing the git repo %s'%(settings.CONFIG['GITURL']))
o.git_clone()
except Exception as e:
logging.error('Could not clone the repo %s.'%(settings.CONFIG['GITURL']))
logging.error(e)
while True:
#read in the hosts file
try:
#open the hosts file and read it.
hosts_file = settings.CONFIG['GITROOT'] + settings.CONFIG['HOSTS']
out = o.read_hosts_file(hosts_file)
except Exception as e:
logging.error("Could not read the hosts file.")
logging.error(e)
try:
split_list = o.split_up_list(out)
except Exception as e:
logging.error("Could not split up the ip list.")
logging.error(e)
for chunk in split_list:
pinghost(chunk)
"""
try:
process = [multiprocessing.Process(target=pinghost, args=(chunk,)) for chunk in split_list]
for p in process:
p.start()
for p in process:
p.join()
p.terminate()
except Exception as e:
logging.error("Could not process the chunk of hosts.")
logging.error(e)
"""
time.sleep(settings.CONFIG['INTERVAL'])
if __name__ == '__main__':
main() |
monitored_session_test.py | # pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for monitored_session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Counter
import glob
import os
import threading
import time
import tensorflow as tf
from tensorflow.contrib import testing
from tensorflow.python.training import monitored_session
class ScaffoldTest(tf.test.TestCase):
"""Scaffold tests."""
def test_nothing_created_before_finalize(self):
with tf.Graph().as_default():
scaffold = tf.train.Scaffold()
self.assertEqual(None, scaffold.init_op)
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertEqual(None, scaffold.ready_op)
self.assertEqual(None, scaffold.local_init_op)
self.assertEqual(None, scaffold.saver)
def test_defaults_empty_graph(self):
with tf.Graph().as_default():
scaffold = tf.train.Scaffold()
tf.Variable(1, name='my_var')
scaffold.finalize()
self.assertTrue(isinstance(scaffold.init_op, tf.Operation))
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertTrue(isinstance(scaffold.ready_op, tf.Tensor))
self.assertTrue(isinstance(scaffold.local_init_op, tf.Operation))
self.assertTrue(isinstance(scaffold.saver, tf.train.Saver))
with self.test_session() as sess:
self.assertTrue(b'my_var' in sess.run(scaffold.ready_op))
sess.run([scaffold.init_op, scaffold.local_init_op])
self.assertEquals(0, len(sess.run(scaffold.ready_op)))
def test_defaults_no_variables(self):
with tf.Graph().as_default():
scaffold = tf.train.Scaffold()
tf.constant(1, name='my_const')
scaffold.finalize()
self.assertTrue(isinstance(scaffold.init_op, tf.Operation))
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertTrue(isinstance(scaffold.ready_op, tf.Tensor))
self.assertTrue(isinstance(scaffold.local_init_op, tf.Operation))
self.assertTrue(isinstance(scaffold.saver, tf.train.Saver))
def test_caches_values(self):
with tf.Graph().as_default():
tf.Variable([1])
scaffold1 = tf.train.Scaffold()
scaffold1.finalize()
scaffold2 = tf.train.Scaffold()
scaffold2.finalize()
self.assertEqual(scaffold1.init_op, scaffold2.init_op)
self.assertEqual(scaffold1.ready_op, scaffold2.ready_op)
self.assertEqual(scaffold1.local_init_op, scaffold2.local_init_op)
self.assertEqual(scaffold1.saver, scaffold2.saver)
def test_raise_error_if_more_than_one_cached_item(self):
with tf.Graph().as_default():
tf.Variable([1])
tf.add_to_collection(tf.GraphKeys.SAVERS, tf.train.Saver())
tf.add_to_collection(tf.GraphKeys.SAVERS, tf.train.Saver())
with self.assertRaisesRegexp(RuntimeError, 'More than one item'):
tf.train.Scaffold().finalize()
def test_uses_passed_values(self):
with tf.Graph().as_default():
tf.Variable([1])
saver = tf.train.Saver()
scaffold = tf.train.Scaffold(
init_op=2,
init_feed_dict=3,
init_fn=lambda scaffold, sess: 4,
ready_op=5,
local_init_op=6,
saver=saver)
scaffold.finalize()
self.assertEqual(2, scaffold.init_op)
self.assertEqual(3, scaffold.init_feed_dict)
self.assertTrue(callable(scaffold.init_fn))
self.assertEqual(5, scaffold.ready_op)
self.assertEqual(6, scaffold.local_init_op)
self.assertEqual(saver, scaffold.saver)
def test_graph_is_finalized(self):
with tf.Graph().as_default():
tf.Variable([1])
tf.train.Scaffold().finalize()
with self.assertRaisesRegexp(RuntimeError,
'Graph is finalized and cannot be modified'):
tf.constant([0])
def _test_dir(temp_dir, test_name):
"""Create an empty dir to use for tests.
Args:
temp_dir: Tmp directory path.
test_name: Name of the test.
Returns:
Absolute path to the test directory.
"""
test_dir = os.path.join(temp_dir, test_name)
if os.path.isdir(test_dir):
for f in glob.glob('%s/*' % test_dir):
os.remove(f)
else:
os.makedirs(test_dir)
return test_dir
class MonitoredTrainingSessionTest(tf.test.TestCase):
"""Tests MonitoredTrainingSession."""
def test_saving_restoring_checkpoint(self):
logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
with tf.train.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# A restart will find the checkpoint and recover automatically.
with tf.train.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(2, session.run(gstep))
def test_summaries(self):
logdir = _test_dir(self.get_temp_dir(), 'test_summaries')
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
tf.scalar_summary('my_summary_tag', gstep * 2)
with tf.train.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
for _ in range(101): # 100 is default summary writing steps
session.run(do_step)
summaries = testing.latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
class StopAtNSession(monitored_session._WrappedSession):
"""A wrapped session that stops at the N-th call to _check_stop."""
def __init__(self, sess, n):
super(StopAtNSession, self).__init__(sess)
self._count = n
def _check_stop(self):
if self._count == 0:
return True
self._count -= 1
return False
class WrappedSessionTest(tf.test.TestCase):
"""_WrappedSession tests."""
def test_properties(self):
with self.test_session() as sess:
tf.constant(0.0)
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEquals(sess.graph, wrapped_sess.graph)
self.assertEquals(sess.sess_str, wrapped_sess.sess_str)
def test_should_stop_on_close(self):
with self.test_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertFalse(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
def test_should_stop_uses_check_stop(self):
with self.test_session() as sess:
wrapped_sess = StopAtNSession(sess, 3)
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertTrue(wrapped_sess.should_stop())
def test_should_stop_delegates_to_wrapped_session(self):
with self.test_session() as sess:
wrapped_sess0 = StopAtNSession(sess, 4)
wrapped_sess1 = monitored_session._WrappedSession(wrapped_sess0)
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertTrue(wrapped_sess1.should_stop())
def test_close_twice(self):
with self.test_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
def test_run(self):
with self.test_session() as sess:
c = tf.constant(0)
v = tf.identity(c)
self.assertEqual(42, sess.run(v, feed_dict={c: 42}))
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEqual(51, wrapped_sess.run(v, feed_dict={c: 51}))
def busy_wait_for_coord_stop(coord):
while not coord.should_stop():
time.sleep(0.001)
class CoordinatedSessionTest(tf.test.TestCase):
"""_CoordinatedSession tests."""
def test_properties(self):
with self.test_session() as sess:
tf.constant(0.0)
coord = tf.train.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEquals(sess.graph, coord_sess.graph)
self.assertEquals(sess.sess_str, coord_sess.sess_str)
def test_run(self):
with self.test_session() as sess:
c = tf.constant(0)
v = tf.identity(c)
coord = tf.train.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEqual(42, coord_sess.run(v, feed_dict={c: 42}))
def test_should_stop_on_close(self):
with self.test_session() as sess:
coord = tf.train.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord_sess.close()
self.assertTrue(coord_sess.should_stop())
def test_should_stop_on_coord_stop(self):
with self.test_session() as sess:
coord = tf.train.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord.request_stop()
self.assertTrue(coord_sess.should_stop())
def test_dont_request_stop_on_exception_in_main_thread(self):
with self.test_session() as sess:
c = tf.constant(0)
v = tf.identity(c)
coord = tf.train.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
self.assertEqual(0, coord_sess.run(c))
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
with self.assertRaisesRegexp(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
self.assertFalse(coord.should_stop())
self.assertFalse(coord_sess.should_stop())
def test_stop_threads_on_close_after_exception(self):
with self.test_session() as sess:
c = tf.constant(0)
v = tf.identity(c)
coord = tf.train.Coordinator()
threads = [threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(0, coord_sess.run(c))
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
for t in threads:
self.assertTrue(t.is_alive())
with self.assertRaisesRegexp(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
def test_stop_threads_on_close(self):
with self.test_session() as sess:
coord = tf.train.Coordinator()
threads = [threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
class AbortAtNSession(object):
"""A mock sessionthat aborts at the N-th run call."""
def __init__(self, sess, n):
self._sess = sess
self._count = n
def close(self):
pass
def run(self, *args, **kwargs):
if self._count == 0:
raise tf.errors.AbortedError('Aborted at N', None, None)
self._count -= 1
return self._sess.run(*args, **kwargs)
class RecoverableSessionTest(tf.test.TestCase):
"""_RecoverableSession tests."""
class _SessionReturner(object):
def __init__(self, sess):
self._sess = sess
def create_session(self):
return self._sess
def test_properties(self):
with self.test_session() as sess:
tf.constant(0.0)
recoverable_sess = monitored_session._RecoverableSession(
self._SessionReturner(sess))
self.assertEquals(sess.graph, recoverable_sess.graph)
self.assertEquals(sess.sess_str, recoverable_sess.sess_str)
def test_run(self):
with self.test_session() as sess:
c = tf.constant(0)
v = tf.identity(c)
recoverable_sess = monitored_session._RecoverableSession(
self._SessionReturner(sess))
self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))
def test_recovery(self):
with self.test_session() as sess:
class StackSessionCreator(object):
def __init__(self, sess):
self.sessions_to_use = [
AbortAtNSession(sess, x + 1) for x in range(3)
]
def create_session(self):
return self.sessions_to_use.pop(0)
c = tf.constant(0)
v = tf.identity(c)
session_creator = StackSessionCreator(sess)
# List of 3 sessions to use for recovery. The first one aborts
# after 1 run() call, the second after 2 run calls, the third
# after 3 run calls.
self.assertEqual(3, len(session_creator.sessions_to_use))
# Make the recoverable session uses these 3 sessions in sequence by
# passing a factory that pops from the session_to_use list.
recoverable_sess = monitored_session._RecoverableSession(session_creator)
self.assertEqual(
2, len(session_creator.sessions_to_use)) # One session popped.
# Using first session.
self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))
self.assertEqual(
2, len(session_creator.sessions_to_use)) # Still 2 sessions available
# This will fail and recover by picking up the second session.
self.assertEqual(42, recoverable_sess.run(v, feed_dict={c: 42}))
self.assertEqual(
1, len(session_creator.sessions_to_use)) # Still 1 session available
self.assertEqual(33, recoverable_sess.run(v, feed_dict={c: 33}))
self.assertEqual(
1, len(session_creator.sessions_to_use)) # Still 1 session available
# This will fail and recover by picking up the last session.
self.assertEqual(24, recoverable_sess.run(v, feed_dict={c: 24}))
self.assertEqual(
0, len(session_creator.sessions_to_use)) # All sessions used.
self.assertEqual(11, recoverable_sess.run(v, feed_dict={c: 11}))
self.assertEqual(0, recoverable_sess.run(v, feed_dict={c: 0}))
# This will fail and throw a real error as the pop() will fail.
with self.assertRaisesRegexp(IndexError, 'pop from empty list'):
recoverable_sess.run(v, feed_dict={c: -12})
class FakeSession(monitored_session._WrappedSession):
def __init__(self, sess):
monitored_session._WrappedSession.__init__(self, sess)
self.args_called = {}
def run(self, fetches, **kwargs):
self.args_called = dict(kwargs)
# Call run only with fetches since we directly pass other arguments.
return monitored_session._WrappedSession.run(self, fetches)
class FakeHook(tf.train.SessionRunHook):
def __init__(self):
self.should_stop = False
self.request = None
self.call_counter = Counter()
self.last_run_context = None
self.last_run_values = None
def before_run(self, run_context):
self.call_counter['before_run'] += 1
self.last_run_context = run_context
return self.request
def after_run(self, run_context, run_values):
self.call_counter['after_run'] += 1
self.last_run_values = run_values
if self.should_stop:
run_context.request_stop()
class HookedSessionTest(tf.test.TestCase):
def testRunPassesAllArguments(self):
with tf.Graph().as_default(), tf.Session() as sess:
mock_run = FakeSession(sess)
mon_sess = monitored_session._HookedSession(sess=mock_run, hooks=[])
a_tensor = tf.constant([0], name='a_tensor')
sess.run(tf.initialize_all_variables())
output = mon_sess.run(fetches=a_tensor,
feed_dict='a_feed',
options='an_option',
run_metadata='a_metadata')
self.assertEqual(output, [0])
self.assertEqual(mock_run.args_called, {
'feed_dict': 'a_feed',
'options': 'an_option',
'run_metadata': 'a_metadata'
})
def testCallsHooksBeginEnd(self):
with tf.Graph().as_default(), tf.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = tf.constant([0], name='a_tensor')
sess.run(tf.initialize_all_variables())
mon_sess.run(a_tensor)
for hook in [mock_hook, mock_hook2]:
self.assertEqual(
hook.last_run_values, tf.train.SessionRunValues(results=None))
self.assertEqual(hook.last_run_context.original_args,
tf.train.SessionRunArgs(a_tensor))
self.assertEqual(hook.last_run_context.session, sess)
self.assertEqual(hook.call_counter['before_run'], 1)
self.assertEqual(hook.call_counter['after_run'], 1)
def testShouldStop(self):
with tf.Graph().as_default(), tf.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
tf.constant([0], name='a_tensor')
sess.run(tf.initialize_all_variables())
mon_sess.run(fetches='a_tensor')
self.assertFalse(mon_sess.should_stop())
mock_hook.should_stop = True
mon_sess.run(fetches='a_tensor')
self.assertTrue(mon_sess.should_stop())
def testFetchesHookRequests(self):
with tf.Graph().as_default(), tf.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = tf.constant([0], name='a_tensor')
another_tensor = tf.constant([5], name='another_tensor')
third_tensor = tf.constant([10], name='third_tensor')
mock_hook.request = tf.train.SessionRunArgs([another_tensor])
mock_hook2.request = tf.train.SessionRunArgs([third_tensor])
sess.run(tf.initialize_all_variables())
output = mon_sess.run(fetches=a_tensor)
self.assertEqual(output, [0])
self.assertEqual(mock_hook.last_run_values.results, [5])
self.assertEqual(mock_hook2.last_run_values.results, [10])
def testOnlyHooksHaveFeeds(self):
with tf.Graph().as_default(), tf.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = tf.constant([0], name='a_tensor')
b_tensor = tf.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = tf.train.SessionRunArgs(
None, feed_dict={
a_tensor: [5]
})
mock_hook2.request = tf.train.SessionRunArgs(
None, feed_dict={
b_tensor: [10]
})
sess.run(tf.initialize_all_variables())
self.assertEqual(mon_sess.run(fetches=add_tensor), [15])
def testBothHooksAndUserHaveFeeds(self):
with tf.Graph().as_default(), tf.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = tf.constant([0], name='a_tensor')
b_tensor = tf.constant([0], name='b_tensor')
c_tensor = tf.constant([0], name='c_tensor')
add_tensor = a_tensor + b_tensor + c_tensor
mock_hook.request = tf.train.SessionRunArgs(
None, feed_dict={
a_tensor: [5]
})
mock_hook2.request = tf.train.SessionRunArgs(
None, feed_dict={
b_tensor: [10]
})
sess.run(tf.initialize_all_variables())
feed_dict = {c_tensor: [20]}
self.assertEqual(
mon_sess.run(fetches=add_tensor, feed_dict=feed_dict), [35])
# User feed_dict should not be changed
self.assertEqual(len(feed_dict), 1)
def testHooksFeedConflicts(self):
with tf.Graph().as_default(), tf.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = tf.constant([0], name='a_tensor')
b_tensor = tf.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = tf.train.SessionRunArgs(
None, feed_dict={
a_tensor: [5]
})
mock_hook2.request = tf.train.SessionRunArgs(
None, feed_dict={
a_tensor: [10]
})
sess.run(tf.initialize_all_variables())
with self.assertRaisesRegexp(RuntimeError, 'Same tensor is fed'):
mon_sess.run(fetches=add_tensor)
def testHooksAndUserFeedConflicts(self):
with tf.Graph().as_default(), tf.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = tf.constant([0], name='a_tensor')
b_tensor = tf.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = tf.train.SessionRunArgs(
None, feed_dict={
a_tensor: [5]
})
mock_hook2.request = tf.train.SessionRunArgs(
None, feed_dict={
b_tensor: [10]
})
sess.run(tf.initialize_all_variables())
with self.assertRaisesRegexp(RuntimeError, 'Same tensor is fed'):
mon_sess.run(fetches=add_tensor, feed_dict={b_tensor: [10]})
class RaiseOnceAtCountN(tf.train.SessionRunHook):
"""Hook that raises an Exception at step N."""
def __init__(self, n, ex):
self.n = n
self.ex = ex
self.raised = False
def before_run(self, run_context):
# Raise the first time we reach step N.
self.n -= 1
if 0 == self.n and not self.raised:
self.raised = True
raise self.ex
return None
class MonitoredSessionTest(tf.test.TestCase):
"""MonitoredSession tests."""
def test_defaults(self):
with tf.Graph().as_default():
a_var = tf.Variable(0)
with tf.train.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
def test_last_step(self):
logdir = _test_dir(self.get_temp_dir(), 'test_last_step')
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
# Run till step 3 and save.
hooks = [tf.train.StopAtStepHook(last_step=3)]
scaffold = tf.train.Scaffold().finalize()
with tf.train.MonitoredSession(hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
self.assertEqual(1, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(3, session.run(do_step))
self.assertTrue(session.should_stop())
save_path = scaffold.saver.save(session._coordinated_creator.tf_sess,
os.path.join(logdir, 'step-3'))
# Run till step 5 and save.
def load_ckpt(scaffold, sess):
scaffold.saver.restore(sess, save_path)
session_creator = tf.train.ChiefSessionCreator(
tf.train.Scaffold(init_fn=load_ckpt))
hooks = [tf.train.StopAtStepHook(last_step=5)]
with tf.train.MonitoredSession(
hooks=hooks, session_creator=session_creator) as session:
self.assertEqual(3, session.run(gstep))
self.assertFalse(session.should_stop())
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(5, session.run(do_step))
self.assertTrue(session.should_stop())
def test_num_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_num_steps')
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
# Do 3 steps and save.
hooks = [tf.train.StopAtStepHook(num_steps=3)]
scaffold = tf.train.Scaffold().finalize()
with tf.train.MonitoredSession(hooks=hooks) as session:
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertTrue(session.should_stop())
save_path = scaffold.saver.save(session._coordinated_creator.tf_sess,
os.path.join(logdir, 'step-3'))
# Restore and do 4 steps.
def load_ckpt(scaffold, sess):
scaffold.saver.restore(sess, save_path)
session_creator = tf.train.ChiefSessionCreator(
scaffold=tf.train.Scaffold(init_fn=load_ckpt))
hooks = [tf.train.StopAtStepHook(num_steps=4)]
with tf.train.MonitoredSession(
hooks=hooks, session_creator=session_creator) as session:
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertTrue(session.should_stop())
# This set of tests, verifies the supervised session behavior when exceptions
# are raised next to the innermost session run() call.
def test_recovery(self):
logdir = _test_dir(self.get_temp_dir(), 'test_recovery')
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
scaffold = tf.train.Scaffold()
# Use a hook to save the model every 100 steps. It also saves it at
# the end.
hooks = [tf.train.CheckpointSaverHook(
logdir, save_steps=1, scaffold=scaffold)]
with tf.train.MonitoredSession(
session_creator=tf.train.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir),
hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# A restart will find the checkpoint and recover automatically.
with tf.train.MonitoredSession(
session_creator=tf.train.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir)) as session:
self.assertEqual(2, session.run(gstep))
def test_retry_on_aborted_error(self):
# Tests that we silently retry on abort. Note that this does not test
# recovery as we do not use a CheckpointSaver in this test.
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(4, tf.errors.AbortedError(None, None, 'Abort'))
with tf.train.MonitoredSession(hooks=[hook]) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Here at step 3, the hook triggers and raises AbortedError. The
# MonitoredSession automatically retries and restart from a freshly
# initialized session, so the step is back to 0 and running do_step
# moves it to 1.
self.assertEqual(1, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertTrue(hook.raised)
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
def test_recover_and_retry_on_aborted_error(self):
# Tests that we silently retry and recover on abort. This test uses
# a CheckpointSaver to have something to recover from.
logdir = _test_dir(self.get_temp_dir(),
'test_recover_and_retry_on_aborted_error')
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
scaffold = tf.train.Scaffold()
abort_hook = RaiseOnceAtCountN(
4, tf.errors.AbortedError(None, None, 'Abort'))
# Save after each step.
ckpt_hook = tf.train.CheckpointSaverHook(
logdir, save_steps=1, scaffold=scaffold)
hooks = [abort_hook, ckpt_hook]
with tf.train.MonitoredSession(
session_creator=tf.train.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir),
hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Here at step 3, the hook triggers and raises AbortedError. The
# MonitoredSession automatically restores and retries.
self.assertEqual(3, session.run(do_step))
self.assertTrue(abort_hook.raised)
self.assertFalse(session.should_stop())
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
def test_exit_cleanly_on_out_of_range_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, tf.errors.OutOfRangeError(None, None, 'EOI'))
session = tf.train.MonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises OutOfRange. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_exit_cleanly_on_stop_iteration_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, StopIteration)
session = tf.train.MonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises StopIteration. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_regular_exception_pass_through_run(self):
# Tests that regular exceptions just pass through a "with
# MonitoredSession" block and set the session in stop mode.
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(4, RuntimeError('regular exception'))
session = tf.train.MonitoredSession(hooks=[hook])
with self.assertRaisesRegexp(RuntimeError, 'regular exception'):
with session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# This triggers the hook and raises the exception
session.run(do_step)
# We should not hit this
self.assertFalse(True)
self.assertTrue(hook.raised)
self.assertTrue(session.should_stop())
def test_regular_exception_reported_to_coord_pass_through_run(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through a "run()" call within a "with MonitoredSession" block and
# set the session in stop mode.
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
session = tf.train.MonitoredSession()
run_performed_without_error = False
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
# Call run() which should perform normally.
self.assertEqual(0, session.run(gstep))
run_performed_without_error = True
self.assertTrue(run_performed_without_error)
def test_regular_exception_reported_to_coord_pass_through_return(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through returning from a "with MonitoredSession" block and
# set the session in stop mode.
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
session = tf.train.MonitoredSession()
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
self.assertTrue(session.should_stop())
# This set of tests, verifies the session behavior when exceptions are raised
# from code inside a "with MonitoredSession:" context.
def test_stop_cleanly_when_no_exception_in_with_body(self):
# Tests that regular exceptions pass through
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
session = tf.train.MonitoredSession()
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Should have closed.
self.assertTrue(session.should_stop())
self.assertTrue(session._is_closed())
def test_raises_regular_exceptions_in_with_body(self):
# Tests that regular exceptions in "with body" are seen outside.
with tf.Graph().as_default():
gstep = tf.contrib.framework.get_or_create_global_step()
do_step = tf.assign_add(gstep, 1)
session = tf.train.MonitoredSession()
# We should see that exception.
with self.assertRaisesRegexp(RuntimeError, 'regular exception'):
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Will be visible outside the "with body".
raise RuntimeError('regular exception')
# Should have closed.
self.assertTrue(session.should_stop())
self.assertTrue(session._is_closed())
def test_graph(self):
g = tf.Graph()
with g.as_default():
session = tf.train.MonitoredSession()
self.assertEqual(g, session.graph)
if __name__ == '__main__':
tf.test.main()
|
download_task_runner.py | import os
import sys
import importlib
import argparse
import Queue
from threading import Thread
from time import time, localtime, strftime, sleep
from ConfigParser import SafeConfigParser
import random
from prettytable import PrettyTable
import runner_util
from benchcloud.file_utils import file_util
from benchcloud.traffic import capturer
class DownloadTaskRunner(object):
def __init__(self, conf_filename):
self.load_conf(conf_filename)
# init task queue, log queue, operation_times map
# element in task queue: (seq, remote_filename, local_filename)
self.task_queue = Queue.Queue()
self.log_queue = Queue.Queue()
# {operation sequence -> operation time in milliseconds, ...}
self.operation_times = {}
self.init_logging()
self.load_testers()
def load_conf(self, filename):
"""Load configuration file."""
if os.path.exists(filename):
try:
self.parser = SafeConfigParser()
self.parser.read(filename)
except IOError:
print "ERROR opening config file: {}".format(filename)
sys.exit(1)
else:
print 'The configure file does not exist: {}'.format(filename)
sys.exit(-1)
# driver conf
self.driver_conf = dict(self.parser.items('driver'))
# basic test conf
self.test_conf = dict(self.parser.items('test'))
self.description = self.test_conf['description']
self.sleep_enabled = self.parser.getboolean('test', 'sleep')
if self.sleep_enabled:
self.sleep_seconds = self.parser.getint('test', 'sleep_seconds')
self.random_start_sleep_min = self.test_conf.get('random_start_sleep_min', None)
self.random_start_sleep_max = self.test_conf.get('random_start_sleep_max', None)
if self.random_start_sleep_min:
self.random_start_sleep_min = int(self.random_start_sleep_min)
if self.random_start_sleep_max:
self.random_start_sleep_max = int(self.random_start_sleep_max)
# concurrent conf
if self.parser.has_section('concurrent') and self.parser.has_option('concurrent', 'threads'):
self.task_thread_num = max(1, self.parser.getint('concurrent', 'threads'))
else:
self.task_thread_num = 1
def init_logging(self):
self.logging_enabled = self.parser.getboolean('logging', 'enabled')
if self.logging_enabled:
log_filename = self.parser.get('logging', 'log_file')
self.logfile_obj = open(log_filename, mode='w', buffering=0)
def load_testers(self):
# driver
driver_module_name, driver_class_name = runner_util.parse_class(self.driver_conf['class'])
driver_module = importlib.import_module(driver_module_name)
driver_class = getattr(driver_module, driver_class_name)
self.driver = driver_class()
self.driver.connect()
def log(self, message):
millis = int(round(time() * 1000))
timestamp = "[{}] {} |".format(millis, strftime("%d %b %Y %H:%M:%S", localtime()))
whole_message = u"{} {}\n".format(timestamp, message)
self.log_raw(whole_message)
def log_raw(self, raw_message):
self.logfile_obj.write(raw_message.encode('utf8'))
def make_statistics(self):
"""Make statistics for operation time
Return:
A pretty message showing the statistics
"""
table_op_time = PrettyTable(['Operation', 'Time'])
table_op_time.padding_width = 1
for i, t in self.operation_times.iteritems():
table_op_time.add_row(['#{}'.format(i), '{}ms'.format(t)])
table_stat = PrettyTable(['Min', 'Max', 'Average'])
table_stat.padding_width = 1
t_min = min(self.operation_times.itervalues())
t_max = max(self.operation_times.itervalues())
t_avg = sum(self.operation_times.itervalues()) / len(self.operation_times)
table_stat.add_row(['{}ms'.format(t) for t in (t_min, t_max, t_avg)])
return '{}\n{}'.format(str(table_op_time), str(table_stat))
def download_worker(self, worker_seq):
"""A thread worker for downloading files one by one from task queue.
Should be a daemon thread.
"""
# random sleep before start of operation
if self.random_start_sleep_min is not None and self.random_start_sleep_max is not None:
random_start_sleep = random.randint(
self.random_start_sleep_min, self.random_start_sleep_max)
self.log_queue.put('Worker #{}: Sleep before start: {}s'.format(
worker_seq, random_start_sleep))
sleep(random_start_sleep)
while True:
# get task
operation_seq, remote_filename, local_filename = self.task_queue.get()
self.log_queue.put(u'Start downloading file #{}: {}'.format(operation_seq, remote_filename))
# download the file
millis_start = int(round(time() * 1000))
self.driver.download(remote_filename=remote_filename, local_filename=local_filename)
millis_end = int(round(time() * 1000))
self.log_queue.put("Operation #{} finished. ({}ms)".format(operation_seq, millis_end-millis_start))
self.operation_times[operation_seq] = millis_end-millis_start
# notify that the task is handled
self.task_queue.task_done()
# Sleep
if self.sleep_enabled:
self.log_queue.put("Operation #{}: About to sleep for {} second(s)...".format(
operation_seq, self.sleep_seconds))
sleep(self.sleep_seconds)
self.log_queue.put("Operation #{}: Sleep finished, now wake up.".format(operation_seq))
def log_worker(self):
"""A thread worker for reading logging msg from msg queue and writing to log file.
Note that the log thread should be a daemon thread.
"""
while True:
log_msg = self.log_queue.get()
self.log(log_msg)
def run(self):
# start log thread
log_thread = Thread(target=self.log_worker)
log_thread.setDaemon(True)
log_thread.start()
self.log("Start testing: {}".format(self.description))
# bookkeeping start time for all operations
millis_start = int(round(time() * 1000))
# sending all tasks into task queue
# a task is just a number indication its operation sequence
local_dir = self.test_conf['local_dir']
remote_dir = self.test_conf['remote_dir']
files_metadata = self.driver.list_files(remote_dir=remote_dir)
num_regular_file = 0
for i, file_md in enumerate(files_metadata):
if file_md['is_dir']:
continue
remote_filename = file_md['path']
local_filename = os.path.join(local_dir, file_util.path_leaf(remote_filename))
self.task_queue.put((num_regular_file, remote_filename, local_filename))
num_regular_file += 1
num_operation = num_regular_file
print "Number of operation: {}".format(num_operation)
# starting task worker threads
for i in range(self.task_thread_num):
t = Thread(target=self.download_worker, kwargs={'worker_seq': i})
t.setDaemon(True)
t.start()
print "invoking task_queue.join()..."
# wait for all tasks to be handled
self.task_queue.join()
# get total amount of time spent for all operations
millis_end = int(round(time() * 1000))
millis_total = millis_end - millis_start
print "task_queue.join() returned"
# all operations should have finished now,
# do not need to put to log queue first
self.log('\nAll {} operations finished! :)'.format(num_operation))
# log statistics for operation time
self.log_raw('\nStatistics of all operations:\n')
statistics = self.make_statistics()
self.log_raw(statistics)
self.log_raw('\n')
self.log('Time spent for all operations: {}ms'.format(millis_total))
# print statistics
print 'All operations finished!'
print ''
print statistics
print '\n'
print 'Time spent for all operations: {}ms'.format(millis_total)
def auth_driver(self):
"""Acquire authentication info needed to use driver"""
self.driver.acquire_access_token()
def main(prog=None, args=None):
arg_parser = argparse.ArgumentParser(
prog=prog,
description='Downloader: Execute benchmarking predefined in a configuration file.')
arg_parser.add_argument('-f', action='store', dest='conf_filename', help='Configuration file', required=True)
arg_parser.add_argument('-a', action='store_true', default=False, dest='auth', help='Make authentication')
arg_parser.add_argument('-c', action='store', dest='capturer_conf_filename', default='', help='Capturer configuration file')
results = arg_parser.parse_args(args=args)
conf_filename = results.conf_filename
runner = DownloadTaskRunner(conf_filename)
if results.auth:
runner.auth_driver()
if results.capturer_conf_filename:
the_capturer = capturer.from_conf(results.capturer_conf_filename)
the_capturer.start()
runner.run()
if __name__ == '__main__':
main(sys.argv[1:])
|
api_test.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing
import os
import unittest
from unittest.mock import patch
import torch.distributed as dist
import torch.distributed.rpc as rpc
import torchelastic.distributed.app as app
from test_utils import find_free_port, is_tsan
from torch.distributed.rpc.backend_registry import BackendType
def echo(msg):
return msg
class TestStore:
def __init__(self, _name="", _source_store=None):
self.name = _name
self.source_store = _source_store
def get(self, key: str):
return f"retrieved:{key}"
class TestRpc(unittest.TestCase):
def test_init_app(self):
app.init_app(
role="trainer", backend=BackendType.PROCESS_GROUP, backend_options=None
)
@patch("torch.distributed.autograd._init")
@patch("torch.distributed.rpc.api._init_rpc_backend")
def test_init_rpc(self, rpc_backend_mock, autograd_mock):
os.environ["RANK"] = "0"
os.environ["WORLD_SIZE"] = "1"
store = TestStore()
app.init_rpc(
name="trainer_worker",
backend=BackendType.PROCESS_GROUP,
backend_options=None,
store=store,
)
autograd_mock.assert_called_once()
rpc_backend_mock.assert_called_once()
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_custom_init_rpc(self):
def init_rpc(rank, world_size, port, name):
os.environ["RANK"] = str(rank)
os.environ["WORLD_SIZE"] = str(world_size)
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = str(port)
rendezvous_iterator = dist.rendezvous(
"env://", rank=rank, world_size=world_size
)
store, _, _ = next(rendezvous_iterator)
app.init_rpc(
name=name,
backend=BackendType.PROCESS_GROUP,
backend_options=None,
store=store,
)
def master(msg, port):
init_rpc(rank=0, world_size=2, port=port, name="master")
ret = rpc.rpc_sync(to="worker", func=echo, args=(msg,))
rpc.shutdown()
return ret
def worker(port):
init_rpc(rank=1, world_size=2, port=port, name="worker")
rpc.shutdown()
sock = find_free_port()
port = sock.getsockname()[1]
sock.close()
worker_proc = multiprocessing.Process(target=worker, args=(port,))
worker_proc.start()
expected_msg = "test_message_on_worker"
actual_msg = master(expected_msg, port)
worker_proc.join()
self.assertEqual(expected_msg, actual_msg)
def test_get_worker_names(self):
pass
def test_get_role_info(self):
pass
def test_get_all_roles(self):
pass
def test_wait_all(self):
pass
def test_rpc_sync_on_role(self):
pass
def test_rpc_async_on_role(self):
pass
def test_rpc_remote_on_role(self):
pass
def test_init_process_group(self):
pass
|
deadline_test.py | import os
import sys
import time
from absl import app
from absl import flags
from multiprocessing import Process
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import erdos.graph
from erdos.data_stream import DataStream
from erdos.message import Message
from erdos.op import Op
from erdos.timestamp import Timestamp
from erdos.utils import deadline
try:
from std_msgs.msg import String
except ModuleNotFoundError:
# ROS not installed
String = str
FLAGS = flags.FLAGS
flags.DEFINE_string('framework', 'ros',
'Execution framework to use: ros | ray.')
MAX_MSG_COUNT = 50
class PublisherOp(Op):
def __init__(self, name):
super(PublisherOp, self).__init__(name)
self.idx = 0
@staticmethod
def setup_streams(input_streams):
return [DataStream(data_type=String, name='pub_out')]
@deadline(10, "on_next_deadline_miss")
def publish_msg(self):
if self.idx % 2 == 0:
time.sleep(0.02)
data = 'data %d' % self.idx
output_msg = Message(data, Timestamp(coordinates=[0]))
self.get_output_stream('pub_out').send(output_msg)
self.idx += 1
def execute(self):
for _ in range(0, MAX_MSG_COUNT):
self.publish_msg()
def on_next_deadline_miss(self):
assert self.idx % 2 == 0
print('%s missed deadline on data %d' % (self.name, self.idx))
class SubscriberOp(Op):
def __init__(self, name, spin=True):
super(SubscriberOp, self).__init__(name)
self.is_spin = spin
self.idx = 0
@staticmethod
def setup_streams(input_streams):
input_streams.add_callback(SubscriberOp.on_msg)
return [DataStream(data_type=String, name='sub_out')]
@deadline(10, "on_next_deadline_miss")
def on_msg(self, msg):
if self.idx % 2 == 0:
time.sleep(0.02)
self.idx += 1
def execute(self):
if self.is_spin:
while self.idx < MAX_MSG_COUNT:
time.sleep(0.1)
def on_next_deadline_miss(self):
assert self.idx % 2 == 0
print('%s missed deadline on data %d' % (self.name, self.idx))
def run_graph(spin):
graph = erdos.graph.get_current_graph()
pub = graph.add(PublisherOp, name='publisher')
sub = graph.add(SubscriberOp, name='subscriber', init_args={'spin': spin})
graph.connect([pub], [sub])
graph.execute(FLAGS.framework)
def main(argv):
spin = True
if FLAGS.framework == 'ray':
spin = False
proc = Process(target=run_graph, args=(spin,))
proc.start()
time.sleep(5)
proc.terminate()
if __name__ == '__main__':
app.run(main)
|
io_wrap.py | #!/usr/bin/env python
from __future__ import print_function
"""Utilities for capturing output from the current process and processes it
starts.
This file is also a test harness for I/O wrapping: run it as a script with a
shell command in the commandline arguments to see how PTY redirection behaves
for that command.
Watch out for bugs in this module. Uncaught exceptions here may prevent their
own tracebacks from being written to the terminal. Disable STDERR wrapping by
setting WANDB_DEBUG to 'true'.
== Resources
The TTY demystified. Great article on Linux terminals, sessions and process groups.
http://www.linusakesson.net/programming/tty/
Pymux, a Python implementation of tmux:
https://github.com/jonathanslenders/pymux
PTY module source code:
https://github.com/python/cpython/blob/master/Lib/pty.py
PTYProcess from Pexpect, a Python implementation of expect (good *nix support):
https://github.com/pexpect/ptyprocess/blob/master/ptyprocess/ptyprocess.py
https://stackoverflow.com/questions/4675728/redirect-stdout-to-a-file-in-python/22434262#22434262
https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/
https://stackoverflow.com/questions/34186035/can-you-fool-isatty-and-log-stdout-and-stderr-separately?rq=1
"""
import atexit
import functools
import io
import logging
import os
try:
import pty
import tty
import termios
except ImportError: # windows
pass
import subprocess
import sys
import tempfile
import threading
import six
from six.moves import queue, shlex_quote
logger = logging.getLogger(__name__)
class SimpleTee(object):
"""Monkey patches the given io to write to itself and the object passed in"""
def __init__(self, source_io, destination_io):
self.source_write = source_io.write
self.destination = destination_io
source_io.write = self.write
def write(self, data):
self.source_write(data)
try:
# We need bytes, but sometimes we get strings
data = data.encode('utf-8')
except AttributeError:
pass
self.destination.write(data)
class Tee(object):
"""Reads raw data from a file and writes it to other files.
Writes synchronously to one file and asynchronously to any number of others.
"""
@classmethod
def pty(cls, sync_dst_file, *async_dst_files):
master_fd, slave_fd = pty.openpty()
# raw mode so carriage returns etc. don't get added by the terminal driver,
# bash for windows blows up on this so we catch the error and do nothing
try:
tty.setraw(master_fd)
except termios.error:
pass
master = os.fdopen(master_fd, 'rb')
tee = cls(master, sync_dst_file, *async_dst_files)
tee.tee_file = os.fdopen(slave_fd, 'wb')
return tee
@classmethod
def pipe(cls, sync_dst_file, *async_dst_files):
read_fd, write_fd = os.pipe()
read_file = os.fdopen(read_fd, 'rb')
tee = cls(read_file, sync_dst_file, *async_dst_files)
tee.tee_file = os.fdopen(write_fd, 'wb')
return tee
def __init__(self, src_file, sync_dst_file, *async_dst_files):
"""Constructor.
Args:
src_file: file to read from.
sync_dst_file: file to write to synchronously when `self.write()` is
called.
async_dst_files: files to write to asynchronously
"""
self.tee_file = None # convenience for users that want a writable file to put things into the tee
self._src_file = src_file
self._sync_dst_file = sync_dst_file
self._async_dst_files = list(async_dst_files)
self._write_queues = []
self._write_threads = []
for f in async_dst_files:
q = queue.Queue()
t = spawn_reader_writer(q.get, functools.partial(self._write, f))
self._write_queues.append(q)
self._write_threads.append(t)
src_fd = self._src_file.fileno()
def read():
# We use `os.read()` instead of `file.read()` because `os.read()` will return
# any non-empty amount of data, blocking only until there is data available to
# be read. On the other hand, `file.read()` waits until its buffer is full.
# Since we use this code for console output, `file.read()`'s stuttering output
# is undesirable.
try:
return os.read(src_fd, 1024)
except OSError:
# errno 5 on linux; happens with PTYs if the slave is closed. mac os just
# returns b'' from os.read().
return six.b('')
self._read_thread = spawn_reader_writer(read, self._write_to_all)
def _write_to_all(self, data):
#print('writing', repr(data))
self._write(self._sync_dst_file, data)
for q in self._write_queues:
q.put(data)
@classmethod
def _write(_, f, data):
if not data:
# windows explodes if you try to write an empty string to a terminal:
# OSError: [WinError 87] The parameter is incorrect
# https://github.com/pytest-dev/py/issues/103
return
i = f.write(data)
if i is not None: # python 3 w/ unbuffered i/o: we need to keep writing
while i < len(data):
i += f.write(data[i:])
def close_join(self):
self._read_thread.join()
for t in self._write_threads:
t.join()
self._src_file.close()
def spawn_reader_writer(get_data_fn, put_data_fn):
"""Spawn a thread that reads from a data source and writes to a sink.
The thread will terminate if it receives a Falsey value from the source.
Args:
get_data_fn: Data-reading function. Called repeatedly until it returns
False-y to indicate that the thread should terminate.
put_data_fn: Data-writing function.
Returns: threading.Thread
"""
def _reader_thread():
while True:
out = get_data_fn()
put_data_fn(out)
if not out:
# EOF.
# We've passed this on so things farther down the pipeline will
# know to shut down.
break
t = threading.Thread(target=_reader_thread)
t.daemon = True
t.start()
return t
class FileRedirector(object):
"""Redirects a file object to a different file descriptor.
Properties:
redir_file: The file object that gets redirected.
orig_file: A unbuffered new file object that points where `redir_file` originally pointed.
Adapted from
https://stackoverflow.com/questions/4675728/redirect-stdout-to-a-file-in-python/22434262#22434262
"""
def __init__(self, redir_file, to_file):
"""Constructor
Args:
redir_file: (file) The file object to redirect
to_file: (file) The file object `redir_file` should be redirected to.
"""
self.redir_file = redir_file
self._from_fd = redir_file.fileno()
self._to_fd = to_file.fileno()
# copy from_fd before it is overwritten
# NOTE: `self._from_fd` is inheritable on Windows when duplicating a standard stream
# we make this unbuffered because we want to rely on buffers earlier in the I/O chain
self.orig_file = os.fdopen(os.dup(self._from_fd), 'wb', 0)
def redirect(self):
self.redir_file.flush() # flush library buffers that dup2 knows nothing about
os.dup2(self._to_fd, self._from_fd) # $ exec >&to
# This isn't tested properly:
def restore(self):
"""Restore `self.redir_file` to its original state.
"""
# NOTE: dup2 makes `self._from_fd` inheritable unconditionally
self.redir_file.flush()
os.dup2(self.orig_file.fileno(), self._from_fd) # $ exec >&copied
# self.orig_file.close()
#self.orig_file = None
#self.redir_file = None
|
TimerCommand.py | #!/usr/bin/env python
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Wen Guan, <wguan@cern.ch>, 2014-2018
import os
import signal
import time
import traceback
from Queue import Empty, Full
import subprocess, threading
import multiprocessing
class TimerCommand(object):
def __init__(self, cmd=None):
self.cmd = cmd
self.process = None
self.stdout = None
self.stderr = None
self.is_timeout = False
def run(self, timeout=3600):
def target():
# print 'Thread started'
self.process = subprocess.Popen(self.cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, preexec_fn=os.setsid)
self.stdout, self.stderr = self.process.communicate()
# print 'Thread finished'
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
self.is_timeout = True
try:
# print 'TimeOut. Terminating process'
self.process.terminate()
thread.join(2)
if thread.is_alive():
os.killpg(os.getpgid(self.process.pid), signal.SIGKILL)
thread.join(2)
except:
if thread.is_alive():
try:
os.killpg(os.getpgid(self.process.pid), signal.SIGKILL)
except:
pass
thread.join(2)
if not self.stdout:
self.stdout = ''
self.stdout += "Command time-out: %s s" % timeout
if not self.stderr:
self.stderr = ''
if not self.stdout:
self.stdout = ''
if self.stderr and self.stderr != '':
self.stdout += " Error: " + self.stderr
if self.process:
returncode = self.process.returncode
else:
returncode = 1
if returncode != 1 and 'Command time-out' in self.stdout:
returncode = 1
if returncode == None:
returncode = 0
return returncode, self.stdout
def runFunction(self, func, args, timeout=3600):
def target(func, args, retQ):
error = ''
try:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
except:
error = error + '%s\n' % traceback.format_exc()
try:
ret= func(*args)
retQ.put(ret)
except:
retQ.put((-1, error + '%s\n' % traceback.format_exc()))
retQ = multiprocessing.Queue()
process = multiprocessing.Process(target=target, args=(func, args, retQ))
try:
process.start()
except:
timeout = 1
ret = None
try:
ret = retQ.get(block=True, timeout=timeout)
except Empty:
ret = (-1, "function timeout, killed")
try:
if process.is_alive():
process.terminate()
process.join(2)
if process.is_alive():
# os.kill(int(process.pid), signal.SIGKILL)
process.terminate()
process.join(2)
except:
if process.is_alive():
try:
# os.kill(int(process.pid), signal.SIGKILL)
process.terminate()
except:
pass
process.join(2)
finally:
while process.is_alive():
process.terminate()
process.join(2)
multiprocessing.active_children()
if ret is None:
ret = (-1, "function failed with unknow error")
return ret
def getstatusoutput(cmd, timeout=1800):
timerCmd = TimerCommand(cmd)
status, output = timerCmd.run(timeout=timeout)
return status, output
|
database_variot.py | import base64
import os
import numpy
import sys
import traceback
from database import Database
import queue
import threading
import os
from time import sleep
import pycurl
import json
import io
import requests
# Set up MAC address on server when ready, and use mac address as device ID; possibly token in the future
class VarIOTDatabase(Database):
def __init__(self, crypto, db_path='https://variot.ece.drexel.edu', token='', device='0000000000', dispatchsleep=0):
Database.__init__(self, crypto, db_path=db_path)
self.token = token
self.insertion_queue = queue.Queue()
self.dispatcher_thread = threading.Thread(
target=self.dispatcher, args=())
self.dispatcher_thread.start()
self.dispatchsleep = dispatchsleep
self.dev = device
def variot_dispatch(self, recordsdictlist):
# Remove password since there is no application level encryption to VarIOT
for record in recordsdictlist:
del record['db_pw']
data = json.dumps(recordsdictlist)
# TODO POST TO VARIOT HERE
# data is a json array of json records
# token constructor parameter will be the API key when VarIOT is ready for that
# self.dev is the device ID on VarIOT
# Viewable at http://10.248.101.200:5000/messages/5e4af7041c9d440000f0cd38
# Post to https?
# Which side encrypts? Right now, eliminating this side encryption for VarIOT (hence removal of password from the body and use of crypto from this module
payload = {'data': json.dumps(data)}
URL = self.db_path + '/api/v2/hubs/message/xarray?address=' + self.dev
r = requests.post(url = URL, json = payload)
#print(r.status_code)
#print(r.text)
# dispatch insertions from the queue so that the webserver can continue receiving requests
# log each request to the Audit
def dispatcher(self):
while 1:
queuelist = []
input_dict = self.insertion_queue.get(block=True)
queuelist.append(input_dict)
# http://stackoverflow.com/questions/156360/get-all-items-from-thread-queue
# while we're here, try to pick up any more items that were inserted into the queue
while 1:
try:
input_dict = self.insertion_queue.get_nowait()
queuelist.append(input_dict)
except queue.Empty:
break
self.variot_dispatch(queuelist)
if self.dispatchsleep > 0:
# if desired, sleep the dispatcher for a short time to queue up some inserts and give the producer some CPU time
sleep(self.dispatchsleep)
# just insert into a queue for the dispatcher to insert in the background
def insert_row(self, relativetime, interrogatortime, freeform, db_pw=''):
input_dict = dict() # read by the consumer dispatcher
input_dict['relativetime'] = relativetime
input_dict['interrogatortime'] = interrogatortime
input_dict['freeform'] = freeform
input_dict['db_pw'] = db_pw
self.insertion_queue.put(input_dict)
|
DAQProvider.py |
import multiprocessing as mult
import logging
import re
import Queue
try:
import zmq
except ImportError:
print "no zmq installed..."
from SimDaqConnection import SimDaqConnection,SimDaqServer
from DaqConnection import DaqConnection,DaqServer
class DAQIOError(IOError):
pass
class DAQProvider:
"""
Launch the main part of the GUI and the worker threads. periodicCall and
endApplication could reside in the GUI part, but putting them here
means that you have all the thread controls in a single place.
"""
def __init__(self,logger=None,sim=False):
self.outqueue = mult.Queue()
self.inqueue = mult.Queue()
self.running = 1
self.good_pattern = re.compile("^[a-zA-Z0-9+-.,:()=$/#?!%_@*|~' ]*[\n\r]*$")
# get option parser options
if logger is None:
logger = logging.getLogger()
self.logger = logger
self.sim = sim
if self.sim:
self.daq = SimDaqConnection(self.inqueue, self.outqueue, self.logger)
else:
self.daq = DaqConnection(self.inqueue, self.outqueue, self.logger)
# Set up the thread to do asynchronous I/O
# More can be made if necessary
# Set daemon flag so that the threads finish when the main app finishes
self.readthread = mult.Process(target=self.daq.read,name="pREADER")
self.readthread.daemon = True
self.readthread.start()
if not self.sim:
self.writethread = mult.Process(target=self.daq.write,name="pWRITER")
self.writethread.daemon = True
self.writethread.start()
def get(self,*args):
"""
Get something from the daq
"""
try:
line = self.outqueue.get(*args)
except Queue.Empty:
raise DAQIOError("Queue is empty")
if self.good_pattern.match(line) is None:
# Do something more sensible here, like stopping the DAQ
# then wait until service is restar ted?
self.logger.warning("Got garbage from the DAQ: %s"%line.rstrip('\r\n'))
return None
return line
def put(self,*args):
"""
Send information to the daq
"""
self.inqueue.put(*args)
def data_available(self):
"""
is new data from daq available
"""
size = None
try:
size = self.outqueue.qsize()
except NotImplementedError:
self.logger.debug("Running Mac version of muonic.")
size = not self.outqueue.empty()
return size
class DAQClient(DAQProvider):
def __init__(self,port,logger=None,root=None):
self.running = 1
self.root = root
self.good_pattern = re.compile("^[a-zA-Z0-9+-.,:()=$/#?!%_@*|~' ]*[\n\r]*$")
# get option parser options
if logger is None:
logger = logging.getLogger()
self.logger = logger
self.setup_socket(port)
def setup_socket(self,port):
#port = "5556"
context = zmq.Context()
self.socket = context.socket(zmq.PAIR)
self.socket.connect("tcp://127.0.0.1:%s" % port)
self.socket_port = port
def get(self,*args):
"""
Get something from the daq
"""
try:
line = self.socket.recv_string()
except Queue.Empty:
raise DAQIOError("Queue is empty")
if self.good_pattern.match(line) is None:
# Do something more sensible here, like stopping the DAQ
# then wait until service is restar ted?
self.logger.warning("Got garbage from the DAQ: %s"%line.rstrip('\r\n'))
return None
#raise DAQIOError("Queue contains garbage!")
return line
def put(self,*args):
"""
Send information to the daq
"""
self.socket.send_string(*args)
def data_available(self):
"""
is new data from daq available
"""
return self.socket.poll(200)
|
manager.py | #!/usr/bin/env python3
import datetime
import os
import signal
import subprocess
import sys
import traceback
from multiprocessing import Process
import cereal.messaging as messaging
import selfdrive.crash as crash
from common.basedir import BASEDIR
from common.params import Params, ParamKeyType
from common.text_window import TextWindow
from selfdrive.boardd.set_time import set_time
from selfdrive.hardware import HARDWARE, PC, EON
from selfdrive.manager.helpers import unblock_stdout
from selfdrive.manager.process import ensure_running, launcher
from selfdrive.manager.process_config import managed_processes
from selfdrive.athena.registration import register, UNREGISTERED_DONGLE_ID
from selfdrive.swaglog import cloudlog, add_file_handler
from selfdrive.version import dirty, get_git_commit, version, origin, branch, commit, \
terms_version, training_version, comma_remote, \
get_git_branch, get_git_remote
from selfdrive.hardware.eon.apk import system
sys.path.append(os.path.join(BASEDIR, "pyextra"))
def manager_init():
# update system time from panda
set_time(cloudlog)
params = Params()
params.clear_all(ParamKeyType.CLEAR_ON_MANAGER_START)
default_params = [
("OpenpilotEnabledToggle", "1"),
("CommunityFeaturesToggle", "1"),
("IsMetric", "1"),
("LanelessMode", "0"),
# HKG
("UseClusterSpeed", "1"),
("LongControlEnabled", "0"),
("MadModeEnabled", "1"),
("IsLdwsCar", "0"),
("LaneChangeEnabled", "0"),
("AutoLaneChangeEnabled", "0"),
("LateralControlSelect", "0"),
("SccSmootherSlowOnCurves", "0"),
("SccSmootherSyncGasPressed", "0"),
("StockNaviDecelEnabled", "0"),
("ShowDebugUI", "0"),
("DisableOpFcw", "0"),
("CustomLeadMark", "0"),
("IsOpenpilotViewEnabled", "0"),
("NewRadarInterface", "0"),
("PutPrebuiltOn", "0"),
("AutoScreenOff", "0"),
("NDACamera", "1"),
("ShowCgearUI", "1"),
]
if not PC:
default_params.append(("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')))
if params.get_bool("RecordFrontLock"):
params.put_bool("RecordFront", True)
if not params.get_bool("DisableRadar_Allow"):
params.delete("DisableRadar")
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this dashcam?
if os.getenv("PASSIVE") is not None:
params.put_bool("Passive", bool(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
# set version params
params.put("Version", version)
params.put("TermsVersion", terms_version)
params.put("TrainingVersion", training_version)
params.put("GitCommit", get_git_commit(default=""))
params.put("GitBranch", get_git_branch(default=""))
params.put("GitRemote", get_git_remote(default=""))
# set dongle id
reg_res = register(show_spinner=True)
if reg_res:
dongle_id = reg_res
else:
serial = params.get("HardwareSerial")
raise Exception(f"Registration failed for device {serial}")
os.environ['DONGLE_ID'] = dongle_id # Needed for swaglog
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty,
device=HARDWARE.get_device_type())
if comma_remote and not (os.getenv("NOLOG") or os.getenv("NOCRASH") or PC):
crash.init()
crash.bind_user(id=dongle_id)
crash.bind_extra(dirty=dirty, origin=origin, branch=branch, commit=commit,
device=HARDWARE.get_device_type())
os.system("/data/openpilot/gitcommit.sh") # 깃관련
def manager_prepare():
for p in managed_processes.values():
p.prepare()
def manager_cleanup():
for p in managed_processes.values():
p.stop()
cloudlog.info("everything is dead")
def manager_thread():
if EON:
Process(name="shutdownd", target=launcher, args=("selfdrive.shutdownd",)).start()
system("am startservice com.neokii.optool/.MainService")
Process(name="road_speed_limiter", target=launcher, args=("selfdrive.road_speed_limiter",)).start()
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
#subprocess.call("./bootlog", cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
ignore = []
if params.get("DongleId", encoding='utf8') == UNREGISTERED_DONGLE_ID:
ignore += ["manage_athenad", "uploader"]
if os.getenv("NOBOARD") is not None:
ignore.append("pandad")
if os.getenv("BLOCK") is not None:
ignore += os.getenv("BLOCK").split(",")
ensure_running(managed_processes.values(), started=False, not_run=ignore)
started_prev = False
sm = messaging.SubMaster(['deviceState'])
pm = messaging.PubMaster(['managerState'])
while True:
sm.update()
not_run = ignore[:]
if sm['deviceState'].freeSpacePercent < 5:
not_run.append("loggerd")
started = sm['deviceState'].started
driverview = params.get_bool("IsDriverViewEnabled")
ensure_running(managed_processes.values(), started, driverview, not_run)
# trigger an update after going offroad
if started_prev and not started and 'updated' in managed_processes:
os.sync()
managed_processes['updated'].signal(signal.SIGHUP)
started_prev = started
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if p.proc.is_alive() else "\u001b[31m", p.name)
for p in managed_processes.values() if p.proc]
cloudlog.debug(' '.join(running_list))
# send managerState
msg = messaging.new_message('managerState')
msg.managerState.processes = [p.get_process_state_msg() for p in managed_processes.values()]
pm.send('managerState', msg)
# TODO: let UI handle this
# Exit main loop when uninstall is needed
if params.get_bool("DoUninstall"):
break
def main():
prepare_only = os.getenv("PREPAREONLY") is not None
manager_init()
# Start UI early so prepare can happen in the background
if not prepare_only:
managed_processes['ui'].start()
manager_prepare()
if prepare_only:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
manager_cleanup()
if Params().get_bool("DoUninstall"):
cloudlog.warning("uninstalling")
HARDWARE.uninstall()
if __name__ == "__main__":
unblock_stdout()
try:
main()
except Exception:
add_file_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n\n" + error
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
server.py | import socket
import time
import threading
from text_formatting import RED, END
from constants import HEADER_SIZE, DATA_LEFT_SIZE, PORT, MAX_SIZE, FORMAT, DISCONNECT, NOTHING, LENGTH, RESPOND,\
END, DATA
LENGTH_SIZE = HEADER_SIZE - (DATA_LEFT_SIZE + 2)
RESPOND_FLAG = LENGTH_SIZE
END_FLAG = LENGTH_SIZE + 1
class Server:
def __init__(self):
self.IP = socket.gethostbyname(socket.gethostname())
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server_socket.bind(("", PORT))
self.server_socket.listen()
self.socket_list = [self.server_socket]
self.clients = {}
print(f"[LISTENING] Listening for connections on {self.IP}:{PORT}")
def handle_client(self, client_socket, client_address):
print(f"[NEW CONNECTION] {client_address} connected")
username = ""
while True:
user = self.receive_msg(client_socket)
if user is False:
print("User is False")
continue
username = user[DATA]
self.socket_list.append(client_socket)
self.clients[client_socket] = username
print(f"{client_address} {username}")
break
# connected = True
# while connected:
# message = self.receive_msg(client_socket)
# if not message or message[DATA] == DISCONNECT or not self.clients[client_socket]:
# connected = False
# else:
# self.message_list[client_socket] = message[DATA]
#
# print(f"[LOST CONNECTION] {client_address} {username}")
# self.socket_list.remove(client_socket)
# del self.clients[client_socket]
# client_socket.close()
def close_client_connection(self, client_socket):
print(f"[LOST CONNECTION] {client_socket.getpeername()}")
self.socket_list.remove(client_socket)
del self.clients[client_socket]
client_socket.close()
def receive_msg(self, client_socket):
"""Receive a message from client_socket"""
try:
msg_len, respond, end, data = self.receive_full_msg(client_socket)
end = "\n" if end else ""
data = data.decode(FORMAT)
if data == NOTHING:
data = ""
# print(f"[RECEIVE] {client_socket.getpeername()} {msg_len}:{data}")
return {LENGTH: msg_len,
RESPOND: respond,
END: end,
DATA: data}
except Exception as e:
print(f"{RED}{e}{END}")
return False
def receive_full_msg(self, client_socket):
msg_len, respond, end, data_left = self.receive_header(client_socket)
data = client_socket.recv(msg_len)
msg_len_part = len(data)
while msg_len_part < msg_len:
resend_len = msg_len - msg_len_part
self.request_resend(client_socket, data_left=resend_len)
_msg_len, _, _, _ = self.receive_header(client_socket)
new_data = client_socket.recv(_msg_len)
msg_len_part += len(new_data)
data += new_data
self.request_resend(client_socket, 0)
return msg_len, respond, end, data
@staticmethod
def receive_header(client_socket):
msg_header = client_socket.recv(HEADER_SIZE).decode(FORMAT)
if not len(msg_header):
return False
msg_len = int(msg_header[:LENGTH_SIZE].strip())
respond = int(msg_header[RESPOND_FLAG])
end = int(msg_header[END_FLAG])
data_left = int(msg_header[-DATA_LEFT_SIZE:].strip())
return msg_len, respond, end, data_left
def request_resend(self, client, data_left):
msg_header = self.encode_header(0, 0, 1, data_left)
client.send(msg_header)
def send_msg(self, msg, client=None, respond=0, end=1):
"""Sends msg to client socket specified in client (all clients if not specified).
respond (int) [0, 1]: Whether to require response from clients.
end (int) [0, 1]: Whether to insert newline after end of line."""
if not client:
for c in self.clients:
self.send_full_message(c, msg, respond, end)
elif type(client) is socket.socket:
self.send_full_message(client, msg, respond, end)
elif type(client) is list:
for c in client:
self.send_full_message(c, msg, respond, end)
else:
return False
if respond and type(client) is socket.socket:
response = self.receive_msg(client)[DATA]
if response is False or response == DISCONNECT:
self.close_client_connection(client)
return False
return response
return True
def send_full_message(self, client, msg, respond, end):
"""Send msg to client in chunks of size MAX_SIZE.
client (socket.socket): client socket to send message to.
msg (str): message payload to send to client.
respond (int) [0, 1]: Whether to require response from clients.
end (int) [0, 1]: Whether to insert newline after end of line."""
full_msg = self.encode_msg_body(msg)
full_msg_len = len(full_msg)
_respond = respond
_end = end
if full_msg_len > MAX_SIZE:
_respond = 0
_end = 0
chunks = [full_msg[i:i + MAX_SIZE] for i in range(0, len(full_msg), MAX_SIZE)]
for n, chunk in enumerate(chunks):
data_left = len(chunk)
if n == len(chunks) - 1:
_respond = respond
_end = end
while data_left > 0:
msg_header = self.encode_header(data_left, _respond, _end, -1)
msg = msg_header + chunk[len(chunk) - data_left:]
client.send(msg)
# print(f"[SEND] {msg}")
_, _, _, data_left = self.receive_header(client)
@staticmethod
def encode_header(msg_len, respond, end, data_left):
msg_header = f"{msg_len:<{LENGTH_SIZE}}{respond}{end}{data_left:<{DATA_LEFT_SIZE}}"
return msg_header.encode(FORMAT)
@staticmethod
def encode_msg_body(msg):
if msg == "":
msg = NOTHING
try:
msg = msg.encode(FORMAT)
except AttributeError as e:
print(f"{RED}{e}{END}")
msg = msg.__str__().encode(FORMAT)
return msg
def accept_incoming_connections(self, num=1):
"""Sets up handling for incoming clients.
Num specifies number of clients to accept."""
try:
accepted = 0
while accepted < num:
client_socket, client_address = self.server_socket.accept()
print(f"[ACCEPTED] Accepted new connection from {client_address[0]}:{client_address[1]}")
threading.Thread(target=self.handle_client, args=(client_socket, client_address)).start()
print(f"[ACTIVE CONNECTIONS] {threading.activeCount() - 1}")
accepted += 1
while len(self.clients) < num:
time.sleep(1)
except:
print("[ACCEPT ERROR]")
|
lf_several_queues.py | #!/usr/bin/env python
# encoding: utf-8
from azure.storage.queue import QueueService
from datetime import datetime
from common.common import *
import threading
import json
import random
def connect_queue():
queue_service = QueueService(ACCOUNT_NAME, ACCOUNT_KEY)
return queue_service
def create_queue(queue_service, queue_num):
name = queue_name(queue_num)
print('Creating queue %s' % name)
queue_service.create_queue(name)
queue_service.clear_messages(name)
def queue_name(queue_num):
return '%s-%d' % (QUEUE_NAME, queue_num)
def random_queue_name(num_queues):
q = random.randint(0, num_queues-1)
return queue_name(q)
def producer(producer_num, num_to_produce, num_queues):
for i in range(0, num_to_produce):
content = {'data': producer_num * 10 + i, 'message_num': i, 'timestamp:': "%s" % datetime.now()}
name = random_queue_name(num_queues)
queue_service.put_message(name, json.dumps(content))
def consumer(consumer_num, queues_to_check):
for queue in queues_to_check:
check_queue(consumer_num, queue)
def check_queue(consumer_num, queue):
while True:
messages = queue_service.get_messages(queue, 32)
for message in messages:
parsed = json.loads(message.message_text)
num = int(parsed['data'])
print('Comsumer %d reading from %s: %02d' % (consumer_num, queue, num));
queue_service.delete_message(queue, message.message_id, message.pop_receipt)
if (len(messages) == 0):
break
num_consumers = 5
num_producers = 3
num_to_produce = 10
num_queues = 3
queue_service = connect_queue();
all_queues = []
for q in range(0, num_queues):
create_queue(queue_service, q);
all_queues.append(queue_name(q))
for p in range(0, num_producers):
t = threading.Thread(target=producer, args=(p, num_to_produce, num_queues))
t.start()
for c in range(0, num_consumers):
t = threading.Thread(target=consumer, args=(c,random.sample(all_queues, len(all_queues))))
t.start()
|
skywatch.py | import json, os, requests, datetime
# import adafruit_dht
import board, time, glob
from adafruit_bme280 import basic as adafruit_bme280
import RPi.GPIO as GPIO # Import Raspberry Pi GPIO library
import threading
import socket, subprocess, uuid, shutil
class systemInfo:
def __init__(self):
self.systemInfo = {}
# Get IP address
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ipAddress = s.getsockname()[0]
self.systemInfo['localip'] = ipAddress
# Get WIFI SSID
try:
output = subprocess.check_output(['sudo', 'iwgetid']).decode('UTF-8')
ssid = output.split('"')[1]
except:
ssid = "none"
self.systemInfo['SSID'] = ssid
# Get mac address
macAddress = ':'.join(['{:02x}'.format((uuid.getnode() >> ele) & 0xff) for ele in range(0,8*6,8)][::-1])
self.systemInfo['macaddress'] = macAddress
# Get uptime
with open('/proc/uptime', 'r') as f:
uptime_seconds = float(f.readline().split()[0])
self.systemInfo['uptime'] = uptime_seconds
# Get disk usage
total, used, free = shutil.disk_usage("/")
self.systemInfo['disktotal'] = total // (2**30)
self.systemInfo['diskused'] = used // (2**30)
self.systemInfo['diskfree'] = free // (2**30)
class meteoUploader:
def __init__(self, baseURL = "http://rashley.local", timezone="utc", config={}):
self.statusURL = "http://rashley.local/piStatus"
self.baseURL = baseURL
self.uploadURL = "https://skywatching.eu/meteo"
self.identity = socket.gethostname()
self.status = {self.identity: {}}
self.sensors = []
self.exit = False
try:
self.monitorCadence = config['cadence']
except KeyError:
self.monitorCadence = 180
self.name = "meteo uploader"
self.timezone = timezone
def attachSensor(self, sensor):
self.sensors.append(sensor)
def send(self):
data = {'hostname': self.identity}
timeStamp = datetime.datetime.now()
timeStampStr = timeStamp.strftime("%Y-%m-%d %H:%M:%S")
data['timestamp'] = timeStampStr
data['timezone'] = self.timezone
for sensor in self.sensors:
data[sensor.name] = sensor.logData
print(str(self.name), json.dumps(data, indent=4), flush=True)
self.sendData(self.uploadURL, data)
def monitor(self):
while not self.exit:
self.send()
time.sleep(self.monitorCadence)
def startMonitor(self):
self.monitorThread = threading.Thread(name='non-block', target=self.monitor)
self.monitorThread.start()
def killMonitor(self):
print("stopping %s monitor."%self.name)
self.exit = True
def sendData(self, URL, jsonData):
success = False
print("Sending to:", URL)
try:
response = requests.post(URL, json=jsonData)
responseJSON = json.loads(response.text)
print(json.dumps(responseJSON, indent=4))
if responseJSON['status'] == 'success': success = True
response.close()
except Exception as e:
success = False
print(e, flush=True)
print(success, flush=True)
return success
class statusController:
def __init__(self, config = {}):
print("config", config)
self.URL = config['URL']
self.identity = socket.gethostname()
self.status = {self.identity: {}}
self.exit = False
self.monitorCadence = config['cadence']
self.name = "web uploader"
self.timezone = "UTC"
def sendSystem(self):
self.status = { "hostname" : self.identity}
print("generating system info", flush=True)
systemJSON = {}
timeStamp = datetime.datetime.now()
timeStampStr = timeStamp.strftime("%Y-%m-%d %H:%M:%S")
self.status['system'] = systemInfo().systemInfo
self.status['date'] = timeStampStr
self.status['timezone'] = self.timezone
print("Sending..." + json.dumps(self.status, indent=4), flush=True)
self.sendData(self.URL, self.status)
def sendData(self, URL, jsonData):
success = False
try:
response = requests.post(URL, json=jsonData)
responseJSON = json.loads(response.text)
print(json.dumps(responseJSON, indent=4))
if responseJSON['status'] == 'success': success = True
response.close()
except Exception as e:
success = False
print(e)
print(success, flush=True)
return success
def monitor(self):
while not self.exit:
print("status monitor", flush=True)
self.sendSystem()
time.sleep(self.monitorCadence)
def startMonitor(self):
self.systemThread = threading.Thread(name='non-block', target=self.monitor)
print("starting status monitor", flush=True)
self.systemThread.start()
def killMonitor(self):
print("stopping %s monitor."%self.name)
self.exit = True
class logger():
def __init__(self, filename = '/var/log/skywatch.log'):
self.logfile = filename
self.handle = open(self.logfile, 'at')
self.sensors = []
self.logCadence = 120
self.exit = False
self.name = "log"
def writeEntry(self, message):
timeStamp = datetime.datetime.now()
timeStampStr = timeStamp.strftime("%Y-%m-%d %H:%M:%S")
self.handle.write(timeStampStr + ": " + message)
self.handle.write("\n")
self.handle.flush()
def attachSensor(self, sensor):
self.sensors.append(sensor)
def createEntry(self):
for sensor in self.sensors:
self.writeEntry(str(sensor.name) + ": " + str(sensor.logData))
def createJSONlog(self):
logEntry = {}
for sensor in self.sensors:
logEntry[sensor.name] = sensor.logData
self.writeEntry(json.dumps(logEntry))
def monitor(self):
while not self.exit:
self.createJSONlog()
time.sleep(self.logCadence)
def startMonitor(self):
self.monitorThread = threading.Thread(name='non-block', target=self.monitor)
self.monitorThread.start()
def killMonitor(self):
print("stopping %s monitor."%self.name)
self.exit = True
def reset(self):
self.handle.truncate(0)
def close(self):
self.handle.close()
class exteriorSensor():
def __init__(self, name="exterior"):
self.temperature = -999
self.base_dir = '/sys/bus/w1/devices/'
self.name = name
self.monitorCadence = 20
self.fan = False
self.attachedFan = None
self.exit = False
self.logData = { }
try:
self.device_folder = glob.glob(self.base_dir + '28*')[0]
self.device_file = self.device_folder + '/w1_slave'
except IndexError as e:
print("Cannot initiliase the DS18B20")
def monitor(self):
while not self.exit:
self.readTemp()
print(self.name + "monitor: ", self.temperature, flush=True)
if self.fan: self.attachedFan.checkFan(self.temperature)
self.logData['temperature'] = self.temperature
time.sleep(self.monitorCadence)
def startMonitor(self):
self.exit = False
self.monitorThread = threading.Thread(name='non-block', target=self.monitor)
self.monitorThread.start()
def killMonitor(self):
print("stopping %s monitor."%self.name)
self.exit = True
def readTemp(self):
try:
f = open(self.device_file, 'r')
lines = f.readlines()
f.close()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
self.temperature = float(temp_string) / 1000.0
except IndexError as e:
print("Cannot read the DS18B20")
return -999
return self.temperature
class cpuSensor():
def __init__(self, name = "cpu", config = {}):
self.cpuTempPath = "/sys/class/thermal/thermal_zone0/temp"
self.temperature = -999
self.name = name
self.attachedFan = None
self.fan = False
self.exit = False
try:
self.monitorCadence = config['cadence']
except KeyError:
self.monitorCadence = 20
self.logData = { }
def attachFan(self, fan):
self.fan = True
self.attachedFan = fan
def killMonitor(self):
print("stopping %s monitor."%self.name, flush=True)
self.exit = True
def readTemp(self):
try:
CPUtempFile = open(self.cpuTempPath, "rt")
for line in CPUtempFile:
self.temperature = float(line.strip())/1000
self.logData['temperature'] = self.temperature
CPUtempFile.close()
except Exception as e:
print(str(e))
return self.temperature
def monitor(self):
while not self.exit:
self.readTemp()
print(self.name + "monitor: ", self.temperature, flush=True)
if self.fan: self.attachedFan.checkFan(self.temperature)
time.sleep(self.monitorCadence)
def startMonitor(self):
self.monitorThread = threading.Thread(name='non-block', target=self.monitor)
self.monitorThread.start()
class domeSensor2():
def __init__(self, name = "dome", config={}):
# Initialise the bme280
i2c = board.I2C() # uses board.SCL and board.SDA
self.active = False
decAddress = int(config['address'], 16)
try:
self.bme280 = adafruit_bme280.Adafruit_BME280_I2C(i2c, address = decAddress)
except ValueError:
print("Sensor BME280 failed!", flush=True)
self.active = False
self.fan = False
self.attachedFans = []
self.temperature = -999
self.humidity = -999
self.pressure = -999
self.name = name
print(config, flush=True)
try:
self.monitorCadence = config['cadence']
except KeyError:
self.monitorCadence = 20
self.exit = False
self.logData = { }
def attachFan(self, fan):
self.attachedFans.append(fan)
self.fan = True
def readTemp(self):
try:
self.temperature = round(self.bme280.temperature, 1)
except:
self.temperature = -999
self.logData['temperature'] = self.temperature
return self.temperature
def readHumidity(self):
try:
self.humidity = round(self.bme280.humidity, 1)
except:
self.humidity = -999
self.logData['humidity'] = self.humidity
return self.humidity
def readPressure(self):
try:
self.pressure = round(self.bme280.pressure, 1)
except:
self.pressure = -999
self.logData['pressure'] = self.pressure
return self.pressure
def monitor(self):
while not self.exit:
self.readTemp()
self.readHumidity()
self.readPressure()
print(self.name + "monitor: ", self.temperature, self.humidity, self.pressure, flush=True)
if self.fan:
for fan in self.attachedFans:
fan.checkFan(self.temperature)
time.sleep(self.monitorCadence)
def startMonitor(self):
self.monitorThread = threading.Thread(name='non-block', target=self.monitor)
self.monitorThread.start()
def killMonitor(self):
print("stopping %s monitor."%self.name, flush=True)
self.exit = True
class IRSensor():
def __init__(self, name = "IR", config={}):
self.logData = { }
self.monitorCadence = 10
self.skytemperature = -999
self.ambienttemperature = -999
self.exit = False
self.name = name
try:
self.monitorCadence = config['cadence']
except KeyError:
self.monitorCadence = 20
def readSky(self):
try:
output = subprocess.check_output(['/home/pi/code/meteopi/readTsky']).decode('UTF-8')
self.skytemperature = round(float(output.split('\n')[0]),1)
except Exception as e:
print(e, flush=True)
self.skytemperature = -999
self.logData['sky'] = self.skytemperature
return self.skytemperature
def readAmb(self):
try:
output = subprocess.check_output(['/home/pi/code/meteopi/readTamb']).decode('UTF-8')
self.ambienttemperature = round(float(output.split('\n')[0]),1)
except Exception as e:
print(e)
self.ambienttemperature = -999
self.logData['ambient'] = self.ambienttemperature
return self.ambienttemperature
def monitor(self):
while not self.exit:
self.readSky()
self.readAmb()
print(self.name + "monitor: ", self.skytemperature, self.ambienttemperature, flush=True)
time.sleep(self.monitorCadence)
def startMonitor(self):
self.monitorThread = threading.Thread(name='non-block', target=self.monitor)
self.monitorThread.start()
def killMonitor(self):
print("stopping %s monitor."%self.name, flush=True)
self.exit = True
class domeSensor():
def __init__(self, name = "dome"):
# Initialise the dht device, with data pin connected to:
self.pin = board.D17
self.dhtDevice = adafruit_dht.DHT22(board.D17)
self.temperature = -999
self.humidity = -999
self.monitorCadence = 20
self.name = "dome"
self.attachedFan = None
self.attachedFans = []
self.fan = False
self.exit = False
self.logData = { }
def killMonitor(self):
print("stopping %s monitor."%self.name, flush=True)
self.exit = True
def setFan(self, fan):
self.fan = True
self.attachedFan = fan
def attachFan(self, fan):
self.attachedFans.append(fan)
self.fan = True
def readTemp(self):
try:
self.temperature = self.dhtDevice.temperature
except RuntimeError as error:
# Errors happen fairly often, DHT's are hard to read, just keep going
# print(error.args[0])
time.sleep(2.0)
except Exception as error:
dhtDevice.exit()
print("Re-initiliasing dome sensor", flush=True)
time.sleep(5)
self.dhtDevice = adafruit_dht.DHT(board.D17)
self.logData['temperature'] = self.temperature
return self.temperature
def readHumidity(self):
try:
self.humidity = self.dhtDevice.humidity
except RuntimeError as error:
# Errors happen fairly often, DHT's are hard to read, just keep going
# print(error.args[0])
time.sleep(2.0)
except Exception as error:
dhtDevice.exit()
print("Re-initiliasing dome sensor")
time.sleep(5)
self.dhtDevice = adafruit_dht.DHT(board.D17)
self.logData['humidity'] = self.humidity
return self.humidity
def monitor(self):
while not self.exit:
self.readTemp()
self.readHumidity()
print(self.name + "monitor: ", self.temperature, self.humidity, flush=True)
if self.fan:
for fan in self.attachedFans:
fan.checkFan(self.temperature)
time.sleep(self.monitorCadence)
def startMonitor(self):
self.monitorThread = threading.Thread(name='non-block', target=self.monitor)
self.monitorThread.start()
class fanController():
def __init__(self, config):
print("config:", config)
self.GPIO = config['GPIO']
self.name = config['name']
self.triggerTemperature = config['temperatureUpper']
self.hysterisis = config['temperatureUpper'] - config['temperatureLower']
GPIO.setmode(GPIO.BCM) # Use BCM pin numbering
GPIO.setup(self.GPIO, GPIO.OUT, initial=GPIO.LOW)
self.fanOn = False
def checkFan(self, temp):
if temp>self.triggerTemperature:
if not self.fanOn:
print("Input temperature is above %d... Turning on %s fan."%(self.triggerTemperature, self.name), flush=True)
self.on()
if temp<self.triggerTemperature-self.hysterisis:
if self.fanOn:
print("Input temperature is below %d... Turning off %s fan."%(self.triggerTemperature-self.hysterisis, self.name), flush=True)
self.off()
def on(self):
GPIO.setup(self.GPIO, GPIO.OUT, initial=GPIO.HIGH)
self.fanOn = True
def off(self):
GPIO.setup(self.GPIO, GPIO.OUT, initial=GPIO.LOW)
self.fanOn = False
def flip(self):
if self.fanOn: self.off()
else: self.on()
|
run_mxnet.py | """
This is a sample stub of loadgen with multiple processes support.
Each process sets its affinity by a proc list.
Loadgen is a producer, which calls issue_queries(). issue_queries() gets query
from loadgen and puts query id/sample indices into an input queue.
Each Consumer(process)'s run() reads input queue, calls model_predict() to get
inference result, and put result into output queue.
A standalone thread's response_loadgen() reads output queue, and responds
inference result to loadgen.
Server and Offline scenario PerformanceOnly mode are verified.
Each Model needs to implement below
model_predict()
load_query_samples()
unload_query_samples()
For model_predict(), how to return data to loadgen is model specific, the
loadgen CPP API requires a data pointer and length, then it saves the data to
mlperf_log_accuracy.json, which is used to generate accuracy number offline.
"""
import multiprocessing
import threading
import subprocess
import time
import os
import sys
import argparse
import array
import logging
import numpy as np
import mlperf_loadgen as lg
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("MXNet-BERT")
num_cpus = 28
num_ins = 2
NANO_SEC = 1e9
MILLI_SEC = 1000
in_queue_cnt = 0
out_queue_cnt = 0
bs_step = 8
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--scenario", choices=["Offline", "Server"], default="Offline", help="Scenario")
parser.add_argument("--batching", choices=["Fixed", "Dynamic", "Adaptive"], default="Adaptive", help="Batching method")
parser.add_argument("--batch-size", default=1, type=int, help="batch_size")
parser.add_argument("--num-instance", default=2, type=int, help="number of instance")
parser.add_argument("--num-phy-cpus", default=28, type=int, help="number of physical cpus")
parser.add_argument("--vocab", default='converted_from_tf_to_mxnet/tf.vocab',
type=str, help="vocab file path")
parser.add_argument("--params", default='converted_from_tf_to_mxnet/tf_fp32.params',
type=str, help="FP32 params path")
parser.add_argument("--quantized_model_prefix",
default='converted_from_tf_to_mxnet/offline_model/model_bert_squad_quantized_customize',
type=str, help="quantized model prefix")
parser.add_argument("--accuracy", action="store_true", help="enable accuracy pass")
parser.add_argument("--quantized", action="store_true", help="use quantized model")
parser.add_argument("--mlperf-conf", default="mlperf.conf", help="mlperf rules config")
parser.add_argument("--user-conf", default="user.conf", help="user rules config")
parser.add_argument("--perf-count", default=None, help="perf count")
parser.add_argument("--profile", action="store_true", help="whether enable profiler")
parser.add_argument("--warmup", action="store_true", help="whether do warmup")
parser.add_argument("--perf_calibrate", action="store_true", help="whether do performance calibration")
args = parser.parse_args()
return args
scenario_map = {
"Offline": lg.TestScenario.Offline,
"Server": lg.TestScenario.Server,
}
def load_query_samples(sample_list):
# This is model specific place holder
pass
def unload_query_samples(sample_list):
# This is model specific place holder
pass
def block_until(counter, num_ins, t=1):
while counter.value < num_ins:
time.sleep(t)
batches = None
def load_perf_prof():
global batches
global throughputs
# load performance profile map for offline scenario
if os.path.exists("prof.py"):
from prof import prof_map
from prof import prof_bs_step
else:
prof_map = {}
prof_bs_step = 1
return
longest_seq = 0
for k, v in sorted(prof_map.items()):
if k > longest_seq:
longest_seq = k
batches = [0.0] * (longest_seq+1)
throughputs = [0.0] * (longest_seq+1)
for k, v in sorted(prof_map.items()):
max_throughput = 0.0
max_bs = 0
for i in range(1, len(v)):
current_bs = i * prof_bs_step
if current_bs/v[i] > max_throughput:
max_throughput = current_bs/v[i]
max_bs = current_bs
batches[k] = max_bs
throughputs[k] = max_throughput
def get_best_bs(seq_len):
global batches
if batches == None:
load_perf_prof()
global throughputs
while batches[seq_len] == 0:
seq_len += 1
best_seq_len = seq_len
best_bs = batches[seq_len]
best_throughput = throughputs[seq_len]
seq_len += 1
while seq_len < 385:
if throughputs[seq_len] > best_throughput:
best_seq_len = seq_len
best_bs = batches[seq_len]
best_throughput = throughputs[seq_len]
seq_len += 1
return best_seq_len, best_bs, best_throughput
class Consumer(multiprocessing.Process):
def __init__(self, task_queue, result_queue, lock, init_counter, calibrate_counter, proc_idx, world_size, args):
multiprocessing.Process.__init__(self)
global num_ins
self.task_queue = task_queue
self.result_queue = result_queue
self.lock = lock
self.init_counter = init_counter
self.calibrate_counter = calibrate_counter
self.proc_idx = proc_idx
self.world_size = world_size
self.args = args
self.affinity = range(round(proc_idx * num_cpus / num_ins),
round((proc_idx + 1) * num_cpus / num_ins))
self.start_core_idx = proc_idx * num_cpus // num_ins
self.end_core_idx = (proc_idx + 1) * num_cpus // num_ins - 1
self.length_list = {}
self.length_time_list = {}
def warmup(self, model, data_set, context, scenario):
if self.proc_idx == 0:
print ('Start warmup...')
data_size = len(data_set.eval_features)
count = 0
import mxnet as mx
for start in range(0, data_size):
inputs_list = []
token_types_list = []
valid_length_list = []
eval_feature = data_set.eval_features[start]
_, inputs, token_types, valid_length, _, _ = eval_feature
if len(inputs) in self.length_list:
continue
self.length_list[len(inputs)] = True
max_throughput = 0.0
best_bs = 0
if scenario == 'Offline':
# only support warmup of adaptive batching
best_len, best_bs, _ = get_best_bs(len(inputs))
if best_len in self.length_list:
continue
self.length_list[best_len] = True
inputs += [0] * (best_len - len(inputs))
token_types += [0] * (best_len - len(token_types))
for i in range(best_bs):
inputs_list.append(inputs)
token_types_list.append(token_types)
valid_length_list.append(valid_length)
if self.proc_idx == 0:
print ("warmup seqlen {} batchsize {}".format(best_len, best_bs))
else:
inputs_list.append(inputs)
token_types_list.append(token_types)
valid_length_list.append(valid_length)
inputs_nd = mx.nd.array(inputs_list).as_in_context(context)
token_types_nd = mx.nd.array(token_types_list).as_in_context(context)
valid_length_nd = mx.nd.array(valid_length_list).as_in_context(context).astype('float32')
# warm up primitive once
out = model.net(inputs_nd, token_types_nd, valid_length_nd)
out_np = out.asnumpy()
count += 1
if count % 10 == 0 and self.proc_idx == 0:
print ('Warmup {} samples'.format(count))
if self.proc_idx == 0:
print ('Warmup done')
def calibrate(self, model, data_set, context):
if self.proc_idx == 0:
print ('Start calibration...')
data_size = len(data_set.eval_features)
count = 0
global bs_step
import mxnet as mx
for start in range(0, data_size):
inputs_list = []
token_types_list = []
valid_length_list = []
eval_feature = data_set.eval_features[start]
_, inputs, token_types, valid_length, _, _ = eval_feature
cur_len = len(inputs)
if cur_len in self.length_list:
continue
self.length_list[cur_len] = True
if count % self.world_size != self.proc_idx:
count += 1
continue
count += 1
length_time_list = []
length_time_list.append(0)
max_throughput = 0.0
best_bs = 0
max_len = len(inputs)
while True:
for i in range(bs_step):
inputs_list.append(inputs)
token_types_list.append(token_types)
valid_length_list.append(valid_length)
inputs_nd = mx.nd.array(inputs_list).as_in_context(context)
token_types_nd = mx.nd.array(token_types_list).as_in_context(context)
valid_length_nd = mx.nd.array(valid_length_list).as_in_context(context).astype('float32')
# warm up primitive once
out = model.net(inputs_nd, token_types_nd, valid_length_nd)
out_np = out.asnumpy()
# measure time for the batch
t0 = time.time()
for i in range(8):
out = model.net(inputs_nd, token_types_nd, valid_length_nd)
out_np = out.asnumpy()
t1 = time.time()
duration = (t1 - t0)/8.0
throughput = len(inputs_list)/duration
if throughput > max_throughput:
max_throughput = throughput
best_bs = len(inputs_list)
if len(inputs_list) >= 256:
print ("{} - Best efficiency for seq len {} is BS {} with seq/s {:.5}".format(
self.proc_idx, max_len, best_bs, max_throughput))
break
#print ("{} - Best efficiency for seq len {} is BS {} with seq/s {:.5}, current BS {} seq/s {:.5}\r".format(
# self.proc_idx, max_len, best_bs, max_throughput, len(inputs_list), throughput), end='')
length_time_list.append(duration)
self.length_time_list[cur_len] = length_time_list
with open('prof_new.py', 'a') as f:
for k, v in sorted(self.length_time_list.items()):
print (' {} : {},'.format(k, v), file=f)
# keep the processor hot until all instance done calibration
print ('Calibrate almost done, keep instance hot')
self.lock.acquire()
self.calibrate_counter.value += 1
self.lock.release()
while self.calibrate_counter.value < 2 * self.world_size:
out = model.net(inputs_nd, token_types_nd, valid_length_nd)
out_np = out.asnumpy()
print ('Calibrate done')
def run(self):
global batching
#os.sched_setaffinity(self.pid, self.affinity)
cmd = "taskset -p -c %d-%d %d" % (self.start_core_idx, self.end_core_idx, self.pid)
print (cmd)
os.system(cmd)
import mxnet as mx
ctx = mx.cpu()
#from numexpr.utils import set_num_threads
#set_num_threads(28)
os.environ['OMP_NUM_THREADS'] = '{}'.format(self.end_core_idx-self.start_core_idx+1)
model = BERTModel(mx.cpu(), self.args.vocab, self.args.params,
self.args.quantized, self.args.quantized_model_prefix)
data_set = BERTDataSet(self.args.vocab, self.args.perf_count)
self.lock.acquire()
self.calibrate_counter.value += 1
self.lock.release()
block_until(self.calibrate_counter, self.world_size)
if self.args.perf_calibrate:
self.calibrate(model, data_set, ctx)
return
self.lock.acquire()
self.calibrate_counter.value += 1
self.lock.release()
if self.args.warmup:
self.warmup(model, data_set, ctx, self.args.scenario)
self.lock.acquire()
self.init_counter.value += 1
self.lock.release()
#affinity = os.sched_getaffinity(self.pid)
#print('Process', self.pid, 'affinity proc list:', affinity)
cur_step = 0
start_step = 384
end_step = -1
from utils import profile
while True:
next_task = self.task_queue.get(self.proc_idx)
if next_task is None:
# None means shutdown
log.info('Exiting {}-pid:{}, cur_step={}'.format(self.name, self.pid, cur_step))
self.task_queue.task_done()
if self.args.profile and self.proc_idx==0:
if end_step == -1:
end_step = cur_step
profile(cur_step, start_step, end_step, profile_name='profile_{}.json'.format(self.pid), early_exit=False)
break
query_id_list = next_task.query_id_list
sample_index_list = next_task.sample_index_list
batch_size = len(sample_index_list)
#print ('pid-{}, query_id_list: {}, sample_index_list: {}'.format(self.pid, query_id_list, sample_index_list))
inputs_list = []
token_types_list = []
valid_length_list = []
for sample_index in sample_index_list:
eval_feature = data_set.eval_features[sample_index]
_, inputs, token_types, valid_length, _, _ = eval_feature
inputs_list.append(inputs)
token_types_list.append(token_types)
valid_length_list.append(valid_length)
if len(inputs_list) > 1:
max_len = max([len(inp) for inp in inputs_list])
new_max_len, bs, best_throughput = get_best_bs(max_len)
if bs == len(inputs_list):
max_len = new_max_len
for i in range(len(inputs_list)):
inputs_list[i] += [0] * (max_len - len(inputs_list[i]))
token_types_list[i] += [0] * (max_len - len(token_types_list[i]))
else:
max_len = len(inputs_list)
inputs = mx.nd.array(inputs_list).as_in_context(ctx)
token_types = mx.nd.array(token_types_list).as_in_context(ctx)
valid_length = mx.nd.array(valid_length_list).as_in_context(ctx).astype('float32')
if self.args.profile and self.proc_idx==0:
profile(cur_step, start_step, end_step, profile_name='profile_{}.json'.format(self.pid), early_exit=False)
cur_step += 1
#t0 = time.time()
out = model.net(inputs, token_types, valid_length)
out_np = out.asnumpy()
#t1 = time.time()
#if self.proc_idx == 0:
# cur_throughput = len(inputs_list)/(t1-t0)
# if best_throughput != 0:
# throughput_diff = (cur_throughput - best_throughput) / best_throughput
# print ('inference seq len = {} BS = {} throughput = {:.5f} ({:.3f}%)'.format(max_len, len(inputs_list), cur_throughput, throughput_diff*100))
# else:
# print ('inference seq len = {} BS = {} throughput = {:.5f})'.format(max_len, len(inputs_list), cur_throughput))
result = Output(query_id_list, out_np)
self.result_queue.put(result)
#print('consumer-{}: output.shape={}, query_id={}'.format(self.pid, out_np.shape, query_id_list[0]))
self.task_queue.task_done()
class Input(object):
def __init__(self, id_list, index_list, sample_length_list):
assert isinstance(id_list, list)
assert isinstance(index_list, list)
assert isinstance(sample_length_list, list)
assert len(id_list) == len(index_list)
self.query_id_list = id_list
self.sample_index_list = index_list
self.sample_length_list = sample_length_list
class Output(object):
def __init__(self, query_id_list, result):
self.query_id_list = query_id_list
self.result = result
class InQueue():
def __init__(self, in_queue, batch_size, data_set):
from preprocessing_utils import max_seq_length
self.in_queue = in_queue
self.batch_size = batch_size
self.query_id_list = []
self.sample_index_list = []
self.sample_length_list = []
self.index = 0
self.data_set = data_set
self.max_seq_len = max_seq_length
def put(self, query_samples):
global in_queue_cnt
##TODO, debug
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
query_len = len(query_samples)
num_samples = len(query_samples)
def idx_len(e):
idx = e.index
feature = self.data_set.eval_features[idx]
_, inputs, _, _, _, _ = feature
return len(inputs)
if num_samples == 1:
if self.batch_size == 1:
in_queue_cnt += 1
self.in_queue.put(Input([query_samples[0].id],
[query_samples[0].index],
[idx_len(query_samples[0])]))
else:
self.index += 1
if self.index < self.batch_size:
self.query_id_list.append(query_samples[0].id)
self.sample_index_list.append(query_samples[0].index)
self.sample_length_list.append(idx_len(query_samples[0]))
else:
self.query_id_list.append(query_samples[0].id)
self.sample_index_list.append(query_samples[0].index)
self.sample_length_list.append(idx_len(query_samples[0]))
self.in_queue.put(Input(self.query_id_list, self.sample_index_list, self.sample_length_list))
in_queue_cnt += self.batch_size
self.index = 0
self.query_id_list = []
self.sample_index_list = []
self.sample_length_list = []
else:
query_samples.sort(key=idx_len, reverse=True)
def enqueue_batch(cur_batch_size, base_index=0):
global in_queue_cnt
id_list = []
index_list = []
length_list = []
for i in range(cur_batch_size):
id_list.append(query_samples[base_index + i].id)
index_list.append(query_samples[base_index + i].index)
length_list.append(idx_len(query_samples[base_index + i]))
self.in_queue.put(Input(id_list, index_list, length_list))
in_queue_cnt += cur_batch_size
global batching
true_total_len = 0
total_len = 0
for i in range(num_samples):
true_total_len += idx_len(query_samples[i])
if batching == 'Dynamic':
batch_seq_len = self.batch_size * self.max_seq_len
base_index = 0
num_batches = 0
while base_index < num_samples:
base_len = idx_len(query_samples[base_index])
for i in range(base_index, num_samples):
current_len = base_len * (i-base_index+1)
if i+1 < num_samples:
next_len = base_len * (i+1-base_index+1)
if next_len > batch_seq_len:
if next_len - batch_seq_len > batch_seq_len - current_len:
next_index = i+1
else:
next_index = i+2
break
else:
next_index = i+1
break
total_len += base_len * (next_index-base_index)
enqueue_batch(next_index-base_index, base_index)
num_batches += 1
#print('pid-{2}: enqueue bs={0} and input volume {1}...'
# .format(next_index-base_index, current_len, os.getpid()))
base_index = next_index
print('pid-{1}: enqueued {0} batches, pad ratio = {2}%'
.format(num_batches, os.getpid(), (total_len-true_total_len)*100/true_total_len))
elif batching == 'Adaptive':
batch_seq_len = self.batch_size * self.max_seq_len
base_index = 0
num_batches = 0
while base_index < num_samples:
base_len = idx_len(query_samples[base_index])
best_len, best_bs, _ = get_best_bs(base_len)
next_index = base_index + best_bs
if next_index > num_samples:
next_index = num_samples
total_len += base_len * (next_index-base_index)
enqueue_batch(next_index-base_index, base_index)
num_batches += 1
#print('pid-{2}: enqueue bs={0} and input volume {1}...'
# .format(next_index-base_index, current_len, os.getpid()))
base_index = next_index
print('pid-{1}: enqueued {0} batches, pad ratio = {2}%'
.format(num_batches, os.getpid(), (total_len-true_total_len)*100/true_total_len))
else:
num_batch = num_samples // self.batch_size
remaining_batch = num_samples % self.batch_size
## TODO, remove
print('pid-{3}: split the datasets into {0} batches with bs={1} and remaining {2}...'
.format(num_batch, self.batch_size, remaining_batch, os.getpid()))
for b in range(num_batch):
base_index = b * self.batch_size
enqueue_batch(self.batch_size, base_index)
if remaining_batch > 0:
base_index = num_batch * self.batch_size
enqueue_batch(remaining_batch, base_index)
#print ('in_queue_cnt=', in_queue_cnt)
def flush_queries():
pass
def process_latencies(latencies_ns):
# It's called by loadgen to show us the recorded latencies
log.info("Average latency (ms) per query:")
log.info(np.mean(latencies_ns)/1000000.0)
log.info("Median latency (ms): ")
log.info(np.percentile(latencies_ns, 50)/1000000.0)
log.info("90 percentile latency (ms): ")
log.info(np.percentile(latencies_ns, 90)/1000000.0)
def response_loadgen(out_queue):
global out_queue_cnt
while True:
next_task = out_queue.get()
if next_task is None:
# None means shutdown
log.info('Exiting response thread')
break
query_id_list = next_task.query_id_list
result = next_task.result
batch_size = len(query_id_list)
result.reshape(batch_size, -1, 2)
out_list = np.split(result, batch_size, axis=0)
#responses = []
for i, o in enumerate(out_list):
response_array = array.array("B", np.array(o).astype(np.float32).tobytes())
bi = response_array.buffer_info()
#responses.append(lg.QuerySampleResponse(query_id_list[i], bi[0], bi[1]))
responses = [lg.QuerySampleResponse(query_id_list[i], bi[0], bi[1])]
out_queue_cnt += 1
#print('Response loadgen ({}), query_id {}, out_queue_cnt {}'.format(os.getpid(), query_id_list[i], out_queue_cnt))
lg.QuerySamplesComplete(responses)
#lg.QuerySamplesComplete(responses)
class BERTModel():
def __init__(self, ctx, mx_vocab, params, quantized, quantized_model_prefix):
import gluonnlp as nlp
from utils import BertForQA
import mxnet as mx
if quantized:
log.info('Loading quantized MXNet model...')
self.net = mx.gluon.SymbolBlock.imports('{}-symbol.json'.format(quantized_model_prefix),
['data0', 'data1', 'data2'],
'{}-0000.params'.format(quantized_model_prefix))
self.net.hybridize(static_alloc=True, static_shape=True)
else:
log.info('Loading MXNet model...')
with open(mx_vocab, 'r') as f:
vocab = nlp.vocab.BERTVocab.from_json(f.read())
bert, vocab = nlp.model.get_model(
name='bert_24_1024_16',
dataset_name=None,
vocab=vocab,
pretrained=False,
ctx=ctx,
use_pooler=False,
use_decoder=False,
use_classifier=False)
self.net = BertForQA(bert=bert)
nlp.utils.load_parameters(self.net, params, ctx=ctx, cast_dtype=True)
self.net.hybridize(static_alloc=True)
class BERTDataSet():
def __init__(self, mx_vocab, perf_count):
import gluonnlp as nlp
from preprocessing_utils import preprocess_dataset, max_seq_length, max_query_length, doc_stride
from gluonnlp.data import SQuAD
eval_features = []
with open(mx_vocab, 'r') as f:
vocab = nlp.vocab.BERTVocab.from_json(f.read())
log.info("Creating tokenizer...")
tokenizer = nlp.data.BERTTokenizer(vocab=vocab, lower=True)
round_to = None
log.info("Reading examples...")
dev_path = os.path.join(os.getcwd(), 'build/data')
dev_data = SQuAD('dev', version='1.1', root=dev_path)
dev_data_transform = preprocess_dataset(tokenizer,
dev_data,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
input_features=True)
self.eval_features = dev_data_transform
self.count = len(self.eval_features)
self.perf_count = perf_count if perf_count is not None else self.count
class MultiprocessShapeBasedQueue(object):
def __init__(self):
global num_ins
self._jq = multiprocessing.JoinableQueue()
self._instances_queue = [multiprocessing.Queue() for _ in range(num_ins)]
self._manager = multiprocessing.Manager()
self.shape_in_instance = self._manager.dict()
self.finish_status = self._manager.dict()
def get(self, instance_id):
return self._jq.get()
# with multiprocessing.Lock():
# if self._instances_queue[instance_id].empty():
# while True:
# item = self._jq.get()
# if item != None:
# sample_length = item.sample_length_list[0]
# batch_size = len(item.sample_index_list)
# key = (batch_size, sample_length)
# if key in self.shape_in_instance.keys():
# if self.shape_in_instance[key] == instance_id:
# return item
# else:
# target_instance = self.shape_in_instance[key]
# if target_instance in self.finish_status.keys():
# # target instance already finished execution - get item
# del shape_in_instance[key]
# return item
# else:
# self._instances_queue[target_instance].put(item)
# # reapeat while loop - get new item and check if it's suitable for instance
# else:
# # mark shape with current instance
# self.shape_in_instance[key] = instance_id
# return item
# else:
# self.finish_status[instance_id] = True
# return item # return None
# else:
# item = self._instances_queue[instance_id].get()
# return item
def put(self, obj, block=True, timeout=None):
return self._jq.put(obj, block, timeout)
##print("end put")
def task_done(self):
#print("task_done")
return self._jq.task_done()
#print("end task_done")
def join(self):
#print("join")
return self._jq.join()
#print("end join")
def main():
global num_ins
global num_cpus
global in_queue_cnt
global out_queue_cnt
global batching
global bs_step
args = get_args()
log.info(args)
scenario = args.scenario
accuracy_mode = args.accuracy
perf_count = args.perf_count
batch_size = args.batch_size
num_ins = args.num_instance
num_cpus = args.num_phy_cpus
batching = args.batching
## TODO, remove
log.info('Run with {} instance on {} cpus: '.format(num_ins, num_cpus))
# Establish communication queues
lock = multiprocessing.Lock()
init_counter = multiprocessing.Value("i", 0)
calibrate_counter = multiprocessing.Value("i", 0)
out_queue = multiprocessing.Queue()
in_queue = MultiprocessShapeBasedQueue()
if args.perf_calibrate:
with open('prof_new.py', 'w') as f:
print ('prof_bs_step = {}'.format(bs_step), file=f)
print ('prof_map = {', file=f)
# Start consumers
consumers = [Consumer(in_queue, out_queue, lock, init_counter, calibrate_counter, i, num_ins, args)
for i in range(num_ins)]
for c in consumers:
c.start()
# used by constructQSL
data_set = BERTDataSet(args.vocab, args.perf_count)
issue_queue = InQueue(in_queue, batch_size, data_set)
# Wait until all sub-processors ready to do calibration
block_until(calibrate_counter, num_ins)
# Wait until all sub-processors done calibration
block_until(calibrate_counter, 2*num_ins)
if args.perf_calibrate:
with open('prof_new.py', 'a') as f:
print ('}', file=f)
sys.exit(0)
# Wait until all sub-processors are ready
block_until(init_counter, num_ins)
# Start response thread
response_worker = threading.Thread(
target=response_loadgen, args=(out_queue,))
response_worker.daemon = True
response_worker.start()
# Start loadgen
settings = lg.TestSettings()
settings.scenario = scenario_map[scenario]
settings.FromConfig(args.mlperf_conf, "bert", scenario)
settings.FromConfig(args.user_conf, "bert", scenario)
settings.mode = lg.TestMode.AccuracyOnly if accuracy_mode else lg.TestMode.PerformanceOnly
# TODO, for debug, remove
#settings.server_target_qps = 40
#settings.server_target_latency_ns = 100000000
#settings.min_query_count = 100
#settings.min_duration_ms = 10000
def issue_queries(query_samples):
# It's called by loadgen to send query to SUT
issue_queue.put(query_samples)
sut = lg.ConstructSUT(
issue_queries, flush_queries, process_latencies)
qsl = lg.ConstructQSL(
data_set.count, data_set.perf_count, load_query_samples, unload_query_samples)
log_path = "build/logs"
if not os.path.exists(log_path):
os.makedirs(log_path)
log_output_settings = lg.LogOutputSettings()
log_output_settings.outdir = log_path
log_output_settings.copy_summary_to_stdout = True
log_settings = lg.LogSettings()
log_settings.log_output = log_output_settings
#lg.StartTest(sut, qsl, settings)
lg.StartTestWithLogSettings(sut, qsl, settings, log_settings)
# Wait until outQueue done
while out_queue_cnt < in_queue_cnt:
time.sleep(0.2)
in_queue.join()
for i in range(num_ins):
in_queue.put(None)
for c in consumers:
c.join()
out_queue.put(None)
if accuracy_mode:
cmd = "python accuracy-squad.py --log_file={}/mlperf_log_accuracy.json".format(log_path)
subprocess.check_call(cmd, shell=True)
lg.DestroyQSL(qsl)
lg.DestroySUT(sut)
if __name__ == '__main__':
main()
|
plugin.py | import queue
from threading import Thread, Barrier
class EtherSensePlugin:
def __init__(self, process_async, barrier):
self.process_async = process_async
self.barrier = barrier
self.__bypass = False
if self.process_async:
self.frame_queue = queue.Queue()
self.result_queue = queue.Queue()
self.last_frame = None
self.frames_dropped = 0
self.run = True
self.processing_thread = Thread(target=self.processing_thread)
self.processing_thread.start()
def __call__(self, frame):
if self.__bypass:
return None
if self.process_async:
self.frame_queue.put_nowait(frame)
try:
res = self.result_queue.get_nowait()
self.last_frame = res
except:
self.frames_dropped += 1
res = self.last_frame
if res:
res[1]['frames_dropped'] = self.frames_dropped
else:
res = self.process(frame)
return res
def stop(self):
if self.process_async:
self.run = False
self.processing_thread.join()
@property
def bypass(self):
return self.__bypass
@bypass.setter
def bypass(self, b):
self.__bypass = b
def processing_thread(self):
while self.run:
try:
frame = self.frame_queue.get_nowait()
self.frame_queue.queue.clear()
res = self.process(frame)
self.barrier.wait()
self.result_queue.put_nowait(res)
except queue.Empty:
pass
|
app.py | # encoding: utf-8
'''
A REST API for Salt
===================
.. versionadded:: 2014.7.0
.. py:currentmodule:: salt.netapi.rest_cherrypy.app
:depends:
- CherryPy Python module. Version 3.2.3 is currently recommended when
SSL is enabled, since this version worked the best with SSL in
internal testing. Versions 3.2.3 - 4.x can be used if SSL is not enabled.
Be aware that there is a known
`SSL error <https://bitbucket.org/cherrypy/cherrypy/issue/1298/ssl-not-working>`_
introduced in version 3.2.5. The issue was reportedly resolved with
CherryPy milestone 3.3, but the patch was committed for version 3.6.1.
:optdepends: - ws4py Python module for websockets support.
:client_libraries:
- Java: https://github.com/SUSE/salt-netapi-client
- Python: https://github.com/saltstack/pepper
:setup:
All steps below are performed on the machine running the Salt Master
daemon. Configuration goes into the Master configuration file.
1. Install ``salt-api``. (This step varies between OS and Linux distros.
Some package systems have a split package, others include salt-api in
the main Salt package. Ensure the ``salt-api --version`` output matches
the ``salt --version`` output.)
2. Install CherryPy. (Read the version caveat in the section above.)
3. Optional: generate self-signed SSL certificates.
Using a secure HTTPS connection is strongly recommended since Salt
eauth authentication credentials will be sent over the wire.
1. Install the PyOpenSSL package.
2. Generate a self-signed certificate using the
:py:func:`~salt.modules.tls.create_self_signed_cert` execution
function.
.. code-block:: bash
salt-call --local tls.create_self_signed_cert
4. Edit the master config to create at least one external auth user or
group following the :ref:`full external auth instructions <acl-eauth>`.
5. Edit the master config with the following production-ready example to
enable the ``rest_cherrypy`` module. (Adjust cert paths as needed, or
disable SSL (not recommended!).)
.. code-block:: yaml
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
6. Restart the ``salt-master`` daemon.
7. Start the ``salt-api`` daemon.
:configuration:
All available configuration options are detailed below. These settings
configure the CherryPy HTTP server and do not apply when using an external
server such as Apache or Nginx.
port
**Required**
The port for the webserver to listen on.
host : ``0.0.0.0``
The socket interface for the HTTP server to listen on.
debug : ``False``
Starts the web server in development mode. It will reload itself when
the underlying code is changed and will output more debugging info.
ssl_crt
The path to a SSL certificate. (See below)
ssl_key
The path to the private key for your SSL certificate. (See below)
disable_ssl
A flag to disable SSL. Warning: your Salt authentication credentials
will be sent in the clear!
webhook_disable_auth : False
The :py:class:`Webhook` URL requires authentication by default but
external services cannot always be configured to send authentication.
See the Webhook documentation for suggestions on securing this
interface.
webhook_url : /hook
Configure the URL endpoint for the :py:class:`Webhook` entry point.
thread_pool : ``100``
The number of worker threads to start up in the pool.
socket_queue_size : ``30``
Specify the maximum number of HTTP connections to queue.
expire_responses : True
Whether to check for and kill HTTP responses that have exceeded the
default timeout.
max_request_body_size : ``1048576``
Maximum size for the HTTP request body.
collect_stats : False
Collect and report statistics about the CherryPy server
Reports are available via the :py:class:`Stats` URL.
static
A filesystem path to static HTML/JavaScript/CSS/image assets.
static_path : ``/static``
The URL prefix to use when serving static assets out of the directory
specified in the ``static`` setting.
app
A filesystem path to an HTML file that will be served as a static file.
This is useful for bootstrapping a single-page JavaScript app.
app_path : ``/app``
The URL prefix to use for serving the HTML file specified in the ``app``
setting. This should be a simple name containing no slashes.
Any path information after the specified path is ignored; this is
useful for apps that utilize the HTML5 history API.
root_prefix : ``/``
A URL path to the main entry point for the application. This is useful
for serving multiple applications from the same URL.
.. _rest_cherrypy-auth:
Authentication
--------------
Authentication is performed by passing a session token with each request.
Tokens are generated via the :py:class:`Login` URL.
The token may be sent in one of two ways:
* Include a custom header named :mailheader:`X-Auth-Token`.
For example, using curl:
.. code-block:: bash
curl -sSk https://localhost:8000/login \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
Copy the ``token`` value from the output and include it in subsequent requests:
.. code-block:: bash
curl -sSk https://localhost:8000 \\
-H 'Accept: application/x-yaml' \\
-H 'X-Auth-Token: 697adbdc8fe971d09ae4c2a3add7248859c87079'\\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
* Sent via a cookie. This option is a convenience for HTTP clients that
automatically handle cookie support (such as browsers).
For example, using curl:
.. code-block:: bash
# Write the cookie file:
curl -sSk https://localhost:8000/login \\
-c ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
# Read the cookie file:
curl -sSk https://localhost:8000 \\
-b ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
.. seealso:: You can bypass the session handling via the :py:class:`Run` URL.
Usage
-----
Commands are sent to a running Salt master via this module by sending HTTP
requests to the URLs detailed below.
.. admonition:: Content negotiation
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, x-www-form-urlencoded).
* Specify the format of data in the request body by including the
:mailheader:`Content-Type` header.
* Specify the desired data format for the response body with the
:mailheader:`Accept` header.
Data sent in :http:method:`post` and :http:method:`put` requests must be in
the format of a list of lowstate dictionaries. This allows multiple commands to
be executed in a single HTTP request. The order of commands in the request
corresponds to the return for each command in the response.
Lowstate, broadly, is a dictionary of values that are mapped to a function
call. This pattern is used pervasively throughout Salt. The functions called
from netapi modules are described in :ref:`Client Interfaces <netapi-clients>`.
The following example (in JSON format) causes Salt to execute two commands, a
command sent to minions as well as a runner function on the master::
[{
"client": "local",
"tgt": "*",
"fun": "test.fib",
"arg": ["10"]
},
{
"client": "runner",
"fun": "jobs.lookup_jid",
"jid": "20130603122505459265"
}]
.. admonition:: x-www-form-urlencoded
Sending JSON or YAML in the request body is simple and most flexible,
however sending data in urlencoded format is also supported with the
caveats below. It is the default format for HTML forms, many JavaScript
libraries, and the :command:`curl` command.
For example, the equivalent to running ``salt '*' test.ping`` is sending
``fun=test.ping&arg&client=local&tgt=*`` in the HTTP request body.
Caveats:
* Only a single command may be sent per HTTP request.
* Repeating the ``arg`` parameter multiple times will cause those
parameters to be combined into a single list.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
parameters. E.g., ``arg=one``, ``arg=two`` will be sent as ``arg[]=one``,
``arg[]=two``. This is not supported; send JSON or YAML instead.
.. |req_token| replace:: a session token from :py:class:`~Login`.
.. |req_accept| replace:: the desired response format.
.. |req_ct| replace:: the format of the request body.
.. |res_ct| replace:: the format of the response body; depends on the
:mailheader:`Accept` request header.
.. |200| replace:: success
.. |401| replace:: authentication required
.. |406| replace:: requested Content-Type not available
'''
# We need a custom pylintrc here...
# pylint: disable=W0212,E1101,C0103,R0201,W0221,W0613
# Import Python libs
from __future__ import absolute_import
import collections
import itertools
import functools
import logging
import json
import StringIO
import tarfile
import time
from multiprocessing import Process, Pipe
# Import third-party libs
# pylint: disable=import-error
import cherrypy
from cherrypy.lib import cpstats
import yaml
import salt.ext.six as six
# pylint: enable=import-error
# Import Salt libs
import salt
import salt.auth
import salt.utils.event
# Import salt-api libs
import salt.netapi
logger = logging.getLogger(__name__)
# Imports related to websocket
try:
from .tools import websockets
from . import event_processor
HAS_WEBSOCKETS = True
except ImportError:
websockets = type('websockets', (object,), {
'SynchronizingWebsocket': None,
})
HAS_WEBSOCKETS = False
def html_override_tool():
'''
Bypass the normal handler and serve HTML for all URLs
The ``app_path`` setting must be non-empty and the request must ask for
``text/html`` in the ``Accept`` header.
'''
apiopts = cherrypy.config['apiopts']
request = cherrypy.request
url_blacklist = (
apiopts.get('app_path', '/app'),
apiopts.get('static_path', '/static'),
)
if 'app' not in cherrypy.config['apiopts']:
return
if request.path_info.startswith(url_blacklist):
return
if request.headers.get('Accept') == '*/*':
return
try:
wants_html = cherrypy.lib.cptools.accept('text/html')
except cherrypy.HTTPError:
return
else:
if wants_html != 'text/html':
return
raise cherrypy.InternalRedirect(apiopts.get('app_path', '/app'))
def salt_token_tool():
'''
If the custom authentication header is supplied, put it in the cookie dict
so the rest of the session-based auth works as intended
'''
x_auth = cherrypy.request.headers.get('X-Auth-Token', None)
# X-Auth-Token header trumps session cookie
if x_auth:
cherrypy.request.cookie['session_id'] = x_auth
def salt_api_acl_tool(username, request):
'''
..versionadded:: 2016.3.0
Verifies user requests against the API whitelist. (User/IP pair)
in order to provide whitelisting for the API similar to the
master, but over the API.
..code-block:: yaml
rest_cherrypy:
api_acl:
users:
'*':
- 1.1.1.1
- 1.1.1.2
foo:
- 8.8.4.4
bar:
- '*'
:param username: Username to check against the API.
:type username: str
:param request: Cherrypy request to check against the API.
:type request: cherrypy.request
'''
failure_str = ("[api_acl] Authentication failed for "
"user {0} from IP {1}")
success_str = ("[api_acl] Authentication sucessful for "
"user {0} from IP {1}")
pass_str = ("[api_acl] Authentication not checked for "
"user {0} from IP {1}")
acl = None
# Salt Configuration
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
# Cherrypy Config.
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
# ACL Config.
acl = cherrypy_conf.get('api_acl', None)
ip = request.remote.ip
if acl:
users = acl.get('users', {})
if users:
if username in users:
if ip in users[username] or '*' in users[username]:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
elif username not in users and '*' in users:
if ip in users['*'] or '*' in users['*']:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(pass_str.format(username, ip))
return True
def salt_ip_verify_tool():
'''
If there is a list of restricted IPs, verify current
client is coming from one of those IPs.
'''
# This is overly cumbersome and crude,
# But, it's also safe... ish...
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
auth_ip_list = cherrypy_conf.get('authorized_ips', None)
if auth_ip_list:
logger.debug("Found IP list: {0}".format(auth_ip_list))
rem_ip = cherrypy.request.headers.get('Remote-Addr', None)
logger.debug("Request from IP: {0}".format(rem_ip))
if rem_ip not in auth_ip_list:
logger.error("Blocked IP: {0}".format(rem_ip))
cherrypy.response.status = 403
return {
'status': cherrypy.response.status,
'return': "Bad IP",
}
def salt_auth_tool():
'''
Redirect all unauthenticated requests to the login page
'''
# Redirect to the login page if the session hasn't been authed
if 'token' not in cherrypy.session: # pylint: disable=W8601
raise cherrypy.HTTPError(401)
# Session is authenticated; inform caches
cherrypy.response.headers['Cache-Control'] = 'private'
def cors_handler(*args, **kwargs):
'''
Check a CORS preflight request and return a valid response
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
ac_method = req_head.get('Access-Control-Request-Method', None)
allowed_methods = ['GET', 'POST']
allowed_headers = ['X-Auth-Token', 'Content-Type']
if ac_method and ac_method in allowed_methods:
resp_head['Access-Control-Allow-Methods'] = ', '.join(allowed_methods)
resp_head['Access-Control-Allow-Headers'] = ', '.join(allowed_headers)
resp_head['Connection'] = 'keep-alive'
resp_head['Access-Control-Max-Age'] = '1400'
return {}
def cors_tool():
'''
Handle both simple and complex CORS requests
Add CORS headers to each response. If the request is a CORS preflight
request swap out the default handler with a simple, single-purpose handler
that verifies the request and provides a valid CORS response.
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
# Always set response headers necessary for 'simple' CORS.
resp_head['Access-Control-Allow-Origin'] = req_head.get('Origin', '*')
resp_head['Access-Control-Expose-Headers'] = 'GET, POST'
resp_head['Access-Control-Allow-Credentials'] = 'true'
# If this is a non-simple CORS preflight request swap out the handler.
if cherrypy.request.method == 'OPTIONS':
cherrypy.serving.request.handler = cors_handler
# Be conservative in what you send
# Maps Content-Type to serialization functions; this is a tuple of tuples to
# preserve order of preference.
ct_out_map = (
('application/json', json.dumps),
('application/x-yaml', functools.partial(
yaml.safe_dump, default_flow_style=False)),
)
def hypermedia_handler(*args, **kwargs):
'''
Determine the best output format based on the Accept header, execute the
regular handler, and transform the output to the request content type (even
if it's an error).
:param args: Pass args through to the main handler
:param kwargs: Pass kwargs through to the main handler
'''
# Execute the real handler. Handle or pass-through any errors we know how
# to handle (auth & HTTP errors). Reformat any errors we don't know how to
# handle as a data structure.
try:
cherrypy.response.processors = dict(ct_out_map)
ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs)
except (salt.exceptions.EauthAuthenticationError,
salt.exceptions.TokenAuthenticationError):
raise cherrypy.HTTPError(401)
except (salt.exceptions.SaltDaemonNotRunning,
salt.exceptions.SaltReqTimeoutError) as exc:
raise cherrypy.HTTPError(503, exc.strerror)
except cherrypy.CherryPyException:
raise
except Exception as exc:
import traceback
logger.debug("Error while processing request for: %s",
cherrypy.request.path_info,
exc_info=True)
cherrypy.response.status = 500
ret = {
'status': cherrypy.response.status,
'return': '{0}'.format(traceback.format_exc(exc))
if cherrypy.config['debug']
else "An unexpected error occurred"}
# Raises 406 if requested content-type is not supported
best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map])
# Transform the output from the handler into the requested output format
cherrypy.response.headers['Content-Type'] = best
out = cherrypy.response.processors[best]
return out(ret)
def hypermedia_out():
'''
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
'''
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
request.handler = hypermedia_handler
@functools.wraps
def process_request_body(fn):
'''
A decorator to skip a processor function if process_request_body is False
'''
def wrapped(*args, **kwargs): # pylint: disable=C0111
if cherrypy.request.process_request_body is not False:
fn(*args, **kwargs)
return wrapped
def urlencoded_processor(entity):
'''
Accept x-www-form-urlencoded data (run through CherryPy's formatter)
and reformat it into a Low State data structure.
Since we can't easily represent complicated data structures with
key-value pairs, any more complicated requirements (e.g. compound
commands) must instead be delivered via JSON or YAML.
For example::
.. code-block:: bash
curl -si localhost:8000 -d client=local -d tgt='*' \\
-d fun='test.kwarg' -d arg='one=1' -d arg='two=2'
:param entity: raw POST data
'''
# First call out to CherryPy's default processor
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy.serving.request.unserialized_data = entity.params
cherrypy.serving.request.raw_body = ''
@process_request_body
def json_processor(entity):
'''
Unserialize raw POST data in JSON format to a Python data structure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
cherrypy.serving.request.raw_body = body
@process_request_body
def yaml_processor(entity):
'''
Unserialize raw POST data in YAML format to a Python data structure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = yaml.safe_load(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid YAML document')
cherrypy.serving.request.raw_body = body
@process_request_body
def text_processor(entity):
'''
Attempt to unserialize plain text as JSON
Some large services still send JSON with a text/plain Content-Type. Those
services are bad and should feel bad.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
cherrypy.serving.request.unserialized_data = body
cherrypy.serving.request.raw_body = body
def hypermedia_in():
'''
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
'''
# Be liberal in what you accept
ct_in_map = {
'application/x-www-form-urlencoded': urlencoded_processor,
'application/json': json_processor,
'application/x-yaml': yaml_processor,
'text/yaml': yaml_processor,
'text/plain': text_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (cherrypy.request.method.upper() == 'POST'
and cherrypy.request.headers.get('Content-Length', '0') == '0'):
cherrypy.request.process_request_body = False
cherrypy.request.unserialized_data = None
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, 'Content type not supported')
cherrypy.request.body.processors = ct_in_map
def lowdata_fmt():
'''
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
'''
if cherrypy.request.method.upper() != 'POST':
return
data = cherrypy.request.unserialized_data
# if the data was sent as urlencoded, we need to make it a list.
# this is a very forgiving implementation as different clients set different
# headers for form encoded data (including charset or something similar)
if data and not isinstance(data, list):
# Make the 'arg' param a list if not already
if 'arg' in data and not isinstance(data['arg'], list):
data['arg'] = [data['arg']]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data
cherrypy.tools.html_override = cherrypy.Tool('on_start_resource',
html_override_tool, priority=53)
cherrypy.tools.salt_token = cherrypy.Tool('on_start_resource',
salt_token_tool, priority=55)
cherrypy.tools.salt_auth = cherrypy.Tool('before_request_body',
salt_auth_tool, priority=60)
cherrypy.tools.hypermedia_in = cherrypy.Tool('before_request_body',
hypermedia_in)
cherrypy.tools.cors_tool = cherrypy.Tool('before_request_body',
cors_tool, priority=30)
cherrypy.tools.lowdata_fmt = cherrypy.Tool('before_handler',
lowdata_fmt, priority=40)
cherrypy.tools.hypermedia_out = cherrypy.Tool('before_handler',
hypermedia_out)
cherrypy.tools.salt_ip_verify = cherrypy.Tool('before_handler',
salt_ip_verify_tool)
###############################################################################
class LowDataAdapter(object):
'''
The primary entry point to Salt's REST API
'''
exposed = True
_cp_config = {
'tools.sessions.on': True,
'tools.sessions.timeout': 60 * 10, # 10 hours
# 'tools.autovary.on': True,
'tools.hypermedia_out.on': True,
'tools.hypermedia_in.on': True,
'tools.lowdata_fmt.on': True,
'tools.salt_ip_verify.on': True,
}
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.api = salt.netapi.NetapiClient(self.opts)
def exec_lowstate(self, client=None, token=None):
'''
Pull a Low State data structure from request and execute the low-data
chunks through Salt. The low-data chunks will be updated to include the
authorization token for the current session.
'''
lowstate = cherrypy.request.lowstate
# Release the session lock before executing any potentially
# long-running Salt commands. This allows different threads to execute
# Salt commands concurrently without blocking.
if cherrypy.request.config.get('tools.sessions.on', False):
cherrypy.session.release_lock()
# if the lowstate loaded isn't a list, lets notify the client
if not isinstance(lowstate, list):
raise cherrypy.HTTPError(400, 'Lowstates must be a list')
# Make any requested additions or modifications to each lowstate, then
# execute each one and yield the result.
for chunk in lowstate:
if token:
chunk['token'] = token
if client:
chunk['client'] = client
# Make any 'arg' params a list if not already.
# This is largely to fix a deficiency in the urlencoded format.
if 'arg' in chunk and not isinstance(chunk['arg'], list):
chunk['arg'] = [chunk['arg']]
ret = self.api.run(chunk)
# Sometimes Salt gives us a return and sometimes an iterator
if isinstance(ret, collections.Iterator):
for i in ret:
yield i
else:
yield ret
def GET(self):
'''
An explanation of the API with links of where to go next
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: http
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
'''
import inspect
# Grab all available client interfaces
clients = [name for name, _ in inspect.getmembers(salt.netapi.NetapiClient,
predicate=inspect.ismethod) if not name.startswith('__')]
clients.remove('run') # run method calls client interfaces
return {
'return': "Welcome",
'clients': clients,
}
@cherrypy.tools.salt_token()
@cherrypy.tools.salt_auth()
def POST(self, **kwargs):
'''
Send one or more Salt commands in the request body
.. http:post:: /
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body.
**Example request:**
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-H "Accept: application/x-yaml" \\
-H "X-Auth-Token: d40d1e1e<...snip...>" \\
-d client=local \\
-d tgt='*' \\
-d fun='test.ping' \\
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Length: 36
Content-Type: application/x-www-form-urlencoded
fun=test.ping&client=local&tgt=*
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
**Other examples**:
.. code-block:: bash
# Sending multiple positional args with urlencoded:
curl -sSik https://localhost:8000 \\
-d client=local \\
-d tgt='*' \\
-d fun='cmd.run' \\
-d arg='du -sh .' \\
-d arg='/path/to/dir'
# Sending positional args and Keyword args with JSON:
echo '[
{
"client": "local",
"tgt": "*",
"fun": "cmd.run",
"arg": [
"du -sh .",
"/path/to/dir"
],
"kwarg": {
"shell": "/bin/sh",
"template": "jinja"
}
}
]' | curl -sSik https://localhost:8000 \\
-H 'Content-type: application/json' \\
-d@-
# Calling runner functions:
curl -sSik https://localhost:8000 \\
-d client=runner \\
-d fun='jobs.lookup_jid' \\
-d jid='20150129182456704682' \\
-d outputter=highstate
# Calling wheel functions:
curl -sSik https://localhost:8000 \\
-d client=wheel \\
-d fun='key.gen_accept' \\
-d id_=dave \\
-d keysize=4096
'''
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token')))
}
class Minions(LowDataAdapter):
'''
Convenience URLs for working with minions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, mid=None):
'''
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/minions/ms-3
.. code-block:: http
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
'''
cherrypy.request.lowstate = [{
'client': 'local', 'tgt': mid or '*', 'fun': 'grains.items',
}]
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token'))),
}
def POST(self, **kwargs):
'''
Start an execution command and immediately return the job id
.. http:post:: /minions
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body. The ``client`` option will be set to
:py:meth:`~salt.client.LocalClient.local_async`.
**Example request:**
.. code-block:: bash
curl -sSi localhost:8000/minions \\
-H "Accept: application/x-yaml" \\
-d tgt='*' \\
-d fun='status.diskusage'
.. code-block:: http
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 26
Content-Type: application/x-www-form-urlencoded
tgt=*&fun=status.diskusage
**Example response:**
.. code-block:: http
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
return:
- jid: '20130603122505459265'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
_links:
jobs:
- href: /jobs/20130603122505459265
'''
job_data = list(self.exec_lowstate(client='local_async',
token=cherrypy.session.get('token')))
cherrypy.response.status = 202
return {
'return': job_data,
'_links': {
'jobs': [{'href': '/jobs/{0}'.format(i['jid'])}
for i in job_data if i],
},
}
class Jobs(LowDataAdapter):
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, jid=None, timeout=''):
'''
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
List jobs or show a single job from the job cache.
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs
.. code-block:: http
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: jerry
Target-type: glob
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: http
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
info:
- Arguments:
- '3'
Function: test.fib
Minions:
- jerry
Start Time: 2012, Nov 30 10:46:33.606931
Target: '*'
Target-type: glob
User: saltdev
jid: '20121130104633606931'
return:
- jerry:
- - 0
- 1
- 1
- 2
- 6.9141387939453125e-06
'''
lowstate = [{
'client': 'runner',
'fun': 'jobs.lookup_jid' if jid else 'jobs.list_jobs',
'jid': jid,
}]
if jid:
lowstate.append({
'client': 'runner',
'fun': 'jobs.list_job',
'jid': jid,
})
cherrypy.request.lowstate = lowstate
job_ret_info = list(self.exec_lowstate(
token=cherrypy.session.get('token')))
ret = {}
if jid:
job_ret, job_info = job_ret_info
ret['info'] = [job_info]
else:
job_ret = job_ret_info[0]
ret['return'] = [job_ret]
return ret
class Keys(LowDataAdapter):
'''
Convenience URLs for working with minion keys
.. versionadded:: 2014.7.0
These URLs wrap the functionality provided by the :py:mod:`key wheel
module <salt.wheel.key>` functions.
'''
@cherrypy.config(**{'tools.salt_token.on': True})
def GET(self, mid=None):
'''
Show the list of minion keys or detail on a specific key
.. versionadded:: 2014.7.0
.. http:get:: /keys/(mid)
List all keys or show a specific key
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys
.. code-block:: http
GET /keys HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
local:
- master.pem
- master.pub
minions:
- jerry
minions_pre: []
minions_rejected: []
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys/jerry
.. code-block:: http
GET /keys/jerry HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
minions:
jerry: 51:93:b3:d0:9f:3a:6d:e5:28:67:c2:4b:27:d6:cd:2b
'''
if mid:
lowstate = [{
'client': 'wheel',
'fun': 'key.finger',
'match': mid,
}]
else:
lowstate = [{
'client': 'wheel',
'fun': 'key.list_all',
}]
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate(token=cherrypy.session.get('token'))
return {'return': next(result, {}).get('data', {}).get('return', {})}
@cherrypy.config(**{'tools.hypermedia_out.on': False, 'tools.sessions.on': False})
def POST(self, **kwargs):
r'''
Easily generate keys for a minion and auto-accept the new key
Accepts all the same parameters as the :py:func:`key.gen_accept
<salt.wheel.key.gen_accept>`.
Example partial kickstart script to bootstrap a new minion:
.. code-block:: text
%post
mkdir -p /etc/salt/pki/minion
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
| tar -C /etc/salt/pki/minion -xf -
mkdir -p /etc/salt/minion.d
printf 'master: 10.0.0.5\nid: jerry' > /etc/salt/minion.d/id.conf
%end
.. http:post:: /keys
Generate a public and private key and return both as a tarball
Authentication credentials must be passed in the request.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
-o jerry-salt-keys.tar
.. code-block:: http
POST /keys HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 10240
Content-Disposition: attachment; filename="saltkeys-jerry.tar"
Content-Type: application/x-tar
jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000
'''
lowstate = cherrypy.request.lowstate
lowstate[0].update({
'client': 'wheel',
'fun': 'key.gen_accept',
})
if 'mid' in lowstate[0]:
lowstate[0]['id_'] = lowstate[0].pop('mid')
result = self.exec_lowstate()
ret = next(result, {}).get('data', {}).get('return', {})
pub_key = ret.get('pub', '')
pub_key_file = tarfile.TarInfo('minion.pub')
pub_key_file.size = len(pub_key)
priv_key = ret.get('priv', '')
priv_key_file = tarfile.TarInfo('minion.pem')
priv_key_file.size = len(priv_key)
fileobj = StringIO.StringIO()
tarball = tarfile.open(fileobj=fileobj, mode='w')
tarball.addfile(pub_key_file, StringIO.StringIO(pub_key))
tarball.addfile(priv_key_file, StringIO.StringIO(priv_key))
tarball.close()
headers = cherrypy.response.headers
headers['Content-Disposition'] = 'attachment; filename="saltkeys-{0}.tar"'.format(lowstate[0]['id_'])
headers['Content-Type'] = 'application/x-tar'
headers['Content-Length'] = fileobj.len
headers['Cache-Control'] = 'no-cache'
fileobj.seek(0)
return fileobj
class Login(LowDataAdapter):
'''
Log in to receive a session token
:ref:`Authentication information <rest_cherrypy-auth>`.
'''
def __init__(self, *args, **kwargs):
super(Login, self).__init__(*args, **kwargs)
self.auth = salt.auth.Resolver(self.opts)
def GET(self):
'''
Present the login interface
.. http:get:: /login
An explanation of how to log in.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: http
GET /login HTTP/1.1
Host: localhost:8000
Accept: text/html
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: text/html
'''
cherrypy.response.headers['WWW-Authenticate'] = 'Session'
return {
'status': cherrypy.response.status,
'return': "Please log in",
}
def POST(self, **kwargs):
'''
:ref:`Authenticate <rest_cherrypy-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-H "Accept: application/json" \\
-d username='saltuser' \\
-d password='saltpass' \\
-d eauth='pam'
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/x-www-form-urlencoded
Accept: application/json
username=saltuser&password=saltpass&eauth=pam
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
'''
if not self.api._is_master_running():
raise salt.exceptions.SaltDaemonNotRunning(
'Salt Master is not available.')
# the urlencoded_processor will wrap this in a list
if isinstance(cherrypy.serving.request.lowstate, list):
creds = cherrypy.serving.request.lowstate[0]
else:
creds = cherrypy.serving.request.lowstate
username = creds.get('username', None)
# Validate against the whitelist.
if not salt_api_acl_tool(username, cherrypy.request):
raise cherrypy.HTTPError(401)
# Mint token.
token = self.auth.mk_token(creds)
if 'token' not in token:
raise cherrypy.HTTPError(401,
'Could not authenticate using provided credentials')
cherrypy.response.headers['X-Auth-Token'] = cherrypy.session.id
cherrypy.session['token'] = token['token']
cherrypy.session['timeout'] = (token['expire'] - token['start']) / 60
# Grab eauth config for the current backend for the current user
try:
eauth = self.opts.get('external_auth', {}).get(token['eauth'], {})
# Get sum of '*' perms, user-specific perms, and group-specific perms
perms = eauth.get(token['name'], [])
perms.extend(eauth.get('*', []))
if 'groups' in token and token['groups'] is not False:
user_groups = set(token['groups'])
eauth_groups = set([i.rstrip('%') for i in eauth.keys() if i.endswith('%')])
for group in user_groups & eauth_groups:
perms.extend(eauth['{0}%'.format(group)])
if not perms:
raise ValueError("Eauth permission list not found.")
except (AttributeError, IndexError, KeyError, ValueError):
logger.debug("Configuration for external_auth malformed for "
"eauth '{0}', and user '{1}'."
.format(token.get('eauth'), token.get('name')), exc_info=True)
raise cherrypy.HTTPError(500,
'Configuration for external_auth could not be read.')
return {'return': [{
'token': cherrypy.session.id,
'expire': token['expire'],
'start': token['start'],
'user': token['name'],
'eauth': token['eauth'],
'perms': perms,
}]}
class Logout(LowDataAdapter):
'''
Class to remove or invalidate sessions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
'tools.lowdata_fmt.on': False,
})
def POST(self):
'''
Destroy the currently active session and expire the session cookie
'''
cherrypy.lib.sessions.expire() # set client-side to expire
cherrypy.session.regenerate() # replace server-side with new
return {'return': "Your token has been cleared"}
class Run(LowDataAdapter):
'''
Class to run commands without normal session handling
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.sessions.on': False,
})
def POST(self, **kwargs):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`
.. http:post:: /run
This entry point is primarily for "one-off" commands. Each request
must pass full Salt authentication credentials. Otherwise this URL
is identical to the :py:meth:`root URL (/) <LowDataAdapter.POST>`.
:term:`lowstate` data describing Salt commands must be sent in the
request body.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='local' \\
-d tgt='*' \\
-d fun='test.ping' \\
-d username='saltdev' \\
-d password='saltdev' \\
-d eauth='pam'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=local&tgt=*&fun=test.ping&username=saltdev&password=saltdev&eauth=pam
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
The /run enpoint can also be used to issue commands using the salt-ssh subsystem.
When using salt-ssh, eauth credentials should not be supplied. Instad, authentication
should be handled by the SSH layer itself. The use of the salt-ssh client does not
require a salt master to be running. Instead, only a roster file must be present
in the salt configuration directory.
All SSH client requests are synchronous.
**Example SSH client request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='ssh' \\
-d tgt='*' \\
-d fun='test.ping'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=ssh&tgt=*&fun=test.ping
**Example SSH response:**
.. code-block:: http
return:
- silver:
fun: test.ping
fun_args: []
id: silver
jid: '20141203103525666185'
retcode: 0
return: true
success: true
'''
return {
'return': list(self.exec_lowstate()),
}
class Events(object):
'''
Expose the Salt event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.resolver = salt.auth.Resolver(self.opts)
def _is_valid_token(self, auth_token):
'''
Check if this is a valid salt-api token or valid Salt token
salt-api tokens are regular session tokens that tie back to a real Salt
token. Salt tokens are tokens generated by Salt's eauth system.
:return bool: True if valid, False if not valid.
'''
if auth_token is None:
return False
# First check if the given token is in our session table; if so it's a
# salt-api token and we need to get the Salt token from there.
orig_session, _ = cherrypy.session.cache.get(auth_token, ({}, None))
# If it's not in the session table, assume it's a regular Salt token.
salt_token = orig_session.get('token', auth_token)
# The eauth system does not currently support perms for the event
# stream, so we're just checking if the token exists not if the token
# allows access.
if salt_token and self.resolver.get_token(salt_token):
return True
return False
def GET(self, token=None, salt_token=None):
r'''
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
:query token: **optional** parameter containing the token
ordinarily supplied via the X-Auth-Token header in order to
allow cross-domain requests in browsers that do not include
CORS support in the EventSource API. E.g.,
``curl -NsS localhost:8000/events?token=308650d``
:query salt_token: **optional** parameter containing a raw Salt
*eauth token* (not to be confused with the token returned from
the /login URL). E.g.,
``curl -NsS localhost:8000/events?salt_token=30742765``
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: http
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
Note, the ``tag`` field is not part of the spec. SSE compliant clients
should ignore unknown fields. This addition allows non-compliant
clients to only watch for certain tags without having to deserialze the
JSON object each time.
.. code-block:: http
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
tag: salt/job/20130802115730568475/new
data: {'tag': 'salt/job/20130802115730568475/new', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
tag: salt/job/20130802115730568475/ret/jerry
data: {'tag': 'salt/job/20130802115730568475/ret/jerry', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
var source = new EventSource('/events');
source.onopen = function() { console.info('Listening ...') };
source.onerror = function(err) { console.error(err) };
source.onmessage = function(message) {
var saltEvent = JSON.parse(message.data);
console.info(saltEvent.tag)
console.debug(saltEvent.data)
};
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75', {withCredentials: true});
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
'''
cookies = cherrypy.request.cookie
auth_token = token or salt_token or (
cookies['session_id'].value if 'session_id' in cookies else None)
if not self._is_valid_token(auth_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
cherrypy.response.headers['Content-Type'] = 'text/event-stream'
cherrypy.response.headers['Cache-Control'] = 'no-cache'
cherrypy.response.headers['Connection'] = 'keep-alive'
def listen():
'''
An iterator to yield Salt events
'''
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True)
yield u'retry: {0}\n'.format(400)
while True:
data = next(stream)
yield u'tag: {0}\n'.format(data.get('tag', ''))
yield u'data: {0}\n\n'.format(json.dumps(data))
return listen()
class WebsocketEndpoint(object):
'''
Open a WebSocket connection to Salt's event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
'tools.websocket.on': True,
'tools.websocket.handler_cls': websockets.SynchronizingWebsocket,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None, **kwargs):
'''
Return a websocket connection of Salt's event stream
.. http:get:: /ws/(token)
:query format_events: The event stream will undergo server-side
formatting if the ``format_events`` URL parameter is included
in the request. This can be useful to avoid formatting on the
client-side:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws?format_events
:reqheader X-Auth-Token: an authentication token from
:py:class:`~Login`.
:status 101: switching to the websockets protocol
:status 401: |401|
:status 406: |406|
**Example request:** ::
curl -NsSk \\
-H 'X-Auth-Token: ffedf49d' \\
-H 'Host: localhost:8000' \\
-H 'Connection: Upgrade' \\
-H 'Upgrade: websocket' \\
-H 'Origin: https://localhost:8000' \\
-H 'Sec-WebSocket-Version: 13' \\
-H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\
localhost:8000/ws
.. code-block:: http
GET /ws HTTP/1.1
Connection: Upgrade
Upgrade: websocket
Host: localhost:8000
Origin: https://localhost:8000
Sec-WebSocket-Version: 13
Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA==
X-Auth-Token: ffedf49d
**Example response**:
.. code-block:: http
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE=
Sec-WebSocket-Version: 13
An authentication token **may optionally** be passed as part of the URL
for browsers that cannot be configured to send the authentication
header or cookie:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws/ffedf49d
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a');
source.onerror = function(e) { console.debug('error!', e); };
source.onmessage = function(e) { console.debug(e.data); };
source.send('websocket client ready')
source.close();
Or via Python, using the Python module `websocket-client
<https://pypi.python.org/pypi/websocket-client/>`_ for example.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/d0ce6c1a')
ws.send('websocket client ready')
# Look at https://pypi.python.org/pypi/websocket-client/ for more
# examples.
while listening_to_events:
print ws.recv()
ws.close()
Above examples show how to establish a websocket connection to Salt and
activating real time updates from Salt's event stream by signaling
``websocket client ready``.
'''
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_session, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_session.get('token')
else:
salt_token = cherrypy.session.get('token')
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
# A handler is the server side end of the websocket connection. Each
# request spawns a new instance of this handler
handler = cherrypy.request.ws_handler
def event_stream(handler, pipe):
'''
An iterator to return Salt events (and optionally format them)
'''
# blocks until send is called on the parent end of this pipe.
pipe.recv()
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True)
SaltInfo = event_processor.SaltInfo(handler)
while True:
data = next(stream)
if data:
try: # work around try to decode catch unicode errors
if 'format_events' in kwargs:
SaltInfo.process(data, salt_token, self.opts)
else:
handler.send('data: {0}\n\n'.format(
json.dumps(data)), False)
except UnicodeDecodeError:
logger.error(
"Error: Salt event has non UTF-8 data:\n{0}"
.format(data))
time.sleep(0.1)
parent_pipe, child_pipe = Pipe()
handler.pipe = parent_pipe
handler.opts = self.opts
# Process to handle async push to a client.
# Each GET request causes a process to be kicked off.
proc = Process(target=event_stream, args=(handler, child_pipe))
proc.start()
class Webhook(object):
'''
A generic web hook entry point that fires an event on Salt's event bus
External services can POST data to this URL to trigger an event in Salt.
For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks.
.. note:: Be mindful of security
Salt's Reactor can run any code. A Reactor SLS that responds to a hook
event is responsible for validating that the event came from a trusted
source and contains valid data.
**This is a generic interface and securing it is up to you!**
This URL requires authentication however not all external services can
be configured to authenticate. For this reason authentication can be
selectively disabled for this URL. Follow best practices -- always use
SSL, pass a secret key, configure the firewall to only allow traffic
from a known source, etc.
The event data is taken from the request body. The
:mailheader:`Content-Type` header is respected for the payload.
The event tag is prefixed with ``salt/netapi/hook`` and the URL path is
appended to the end. For example, a ``POST`` request sent to
``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag
``salt/netapi/hook/mycompany/myapp/mydata``.
The following is an example ``.travis.yml`` file to send notifications to
Salt of successful test runs:
.. code-block:: yaml
language: python
script: python -m unittest tests
after_success:
- |
curl -sSk https://saltapi-url.example.com:8000/hook/travis/build/success \
-d branch="${TRAVIS_BRANCH}" \
-d commit="${TRAVIS_COMMIT}"
.. seealso:: :ref:`events`, :ref:`reactor`
'''
exposed = True
tag_base = ['salt', 'netapi', 'hook']
_cp_config = dict(LowDataAdapter._cp_config, **{
# Don't do any lowdata processing on the POST data
'tools.lowdata_fmt.on': True,
# Auth can be overridden in __init__().
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=False)
if cherrypy.config['apiopts'].get('webhook_disable_auth'):
self._cp_config['tools.salt_token.on'] = False
self._cp_config['tools.salt_auth.on'] = False
def POST(self, *args, **kwargs):
'''
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/hook -d foo='Foo!' -d bar='Bar!'
.. code-block:: http
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/x-www-form-urlencoded
foo=Foo&bar=Bar!
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``https://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: yaml
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
'''
tag = '/'.join(itertools.chain(self.tag_base, args))
data = cherrypy.serving.request.unserialized_data
if not data:
data = {}
raw_body = getattr(cherrypy.serving.request, 'raw_body', '')
headers = dict(cherrypy.request.headers)
ret = self.event.fire_event({
'body': raw_body,
'post': data,
'headers': headers,
}, tag)
return {'success': ret}
class Stats(object):
'''
Expose statistics on the running CherryPy server
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self):
'''
Return a dump of statistics collected from the CherryPy server
.. http:get:: /stats
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
'''
if hasattr(logging, 'statistics'):
return cpstats.extrapolate_statistics(logging.statistics)
return {}
class App(object):
'''
Class to serve HTML5 apps
'''
exposed = True
def GET(self, *args):
'''
Serve a single static file ignoring the remaining path
This is useful in combination with a browser-based app using the HTML5
history API.
.. http::get:: /app
:reqheader X-Auth-Token: |req_token|
:status 200: |200|
:status 401: |401|
'''
apiopts = cherrypy.config['apiopts']
return cherrypy.lib.static.serve_file(apiopts['app'])
class API(object):
'''
Collect configuration and URL map for building the CherryPy app
'''
url_map = {
'index': LowDataAdapter,
'login': Login,
'logout': Logout,
'minions': Minions,
'run': Run,
'jobs': Jobs,
'keys': Keys,
'events': Events,
'stats': Stats,
}
def _setattr_url_map(self):
'''
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
'''
for url, cls in six.iteritems(self.url_map):
setattr(self, url, cls())
def _update_url_map(self):
'''
Assemble any dynamic or configurable URLs
'''
if HAS_WEBSOCKETS:
self.url_map.update({
'ws': WebsocketEndpoint,
})
# Allow the Webhook URL to be overridden from the conf.
self.url_map.update({
self.apiopts.get('webhook_url', 'hook').lstrip('/'): Webhook,
})
# Enable the single-page JS app URL.
if 'app' in self.apiopts:
self.url_map.update({
self.apiopts.get('app_path', 'app').lstrip('/'): App,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.apiopts = cherrypy.config['apiopts']
self._update_url_map()
self._setattr_url_map()
def get_conf(self):
'''
Combine the CherryPy configuration with the rest_cherrypy config values
pulled from the master config and return the CherryPy configuration
'''
conf = {
'global': {
'server.socket_host': self.apiopts.get('host', '0.0.0.0'),
'server.socket_port': self.apiopts.get('port', 8000),
'server.thread_pool': self.apiopts.get('thread_pool', 100),
'server.socket_queue_size': self.apiopts.get('queue_size', 30),
'engine.timeout_monitor.on': self.apiopts.get(
'expire_responses', True),
'max_request_body_size': self.apiopts.get(
'max_request_body_size', 1048576),
'debug': self.apiopts.get('debug', False),
},
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.trailing_slash.on': True,
'tools.gzip.on': True,
'tools.cpstats.on': self.apiopts.get('collect_stats', False),
'tools.html_override.on': True,
'tools.cors_tool.on': True,
},
}
if 'favicon' in self.apiopts:
conf['/favicon.ico'] = {
'tools.staticfile.on': True,
'tools.staticfile.filename': self.apiopts['favicon'],
}
if self.apiopts.get('debug', False) is False:
conf['global']['environment'] = 'production'
# Serve static media if the directory has been set in the configuration
if 'static' in self.apiopts:
conf[self.apiopts.get('static_path', '/static')] = {
'tools.staticdir.on': True,
'tools.staticdir.dir': self.apiopts['static'],
}
# Add to global config
cherrypy.config.update(conf['global'])
return conf
def get_app(opts):
'''
Returns a WSGI app and a configuration dictionary
'''
apiopts = opts.get(__name__.rsplit('.', 2)[-2], {}) # rest_cherrypy opts
# Add Salt and salt-api config options to the main CherryPy config dict
cherrypy.config['saltopts'] = opts
cherrypy.config['apiopts'] = apiopts
root = API() # cherrypy app
cpyopts = root.get_conf() # cherrypy app opts
return root, apiopts, cpyopts
|
HTTPServer.py | #!/usr/bin/python
"""
HTTPServer
"""
import os
import threading
import json
import zlib
from collections import OrderedDict
try:
from urlparse import urlparse, parse_qs
from SocketServer import ThreadingMixIn, TCPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
except ImportError:
from urllib.parse import urlparse, parse_qs
from socketserver import ThreadingMixIn, TCPServer
from http.server import SimpleHTTPRequestHandler
from bacpypes.debugging import bacpypes_debugging, ModuleLogger
from bacpypes.consolelogging import ConfigArgumentParser
from bacpypes.core import run, deferred
from bacpypes.iocb import IOCB
from bacpypes.pdu import Address, GlobalBroadcast
from bacpypes.apdu import ReadPropertyRequest, WhoIsRequest
from bacpypes.primitivedata import Unsigned, ObjectIdentifier
from bacpypes.constructeddata import Array
from bacpypes.app import BIPSimpleApplication
from bacpypes.object import get_object_class, get_datatype
from bacpypes.local.device import LocalDeviceObject
# some debugging
_debug = 0
_log = ModuleLogger(globals())
# settings
HOST = os.getenv("HOST", "")
PORT = int(os.getenv("PORT", 8080))
# reference a simple application
this_application = None
server = None
# favorite icon
favicon = zlib.decompress(
b"x\x9c\xb5\x93\xcdN\xdb@\x14\x85\x07\x95\x07\xc8\x8amYv\xc9#\xe4\x11x\x04\x96}"
b'\x8c\x88\x1dl\xa0\x9b\xb6A\xa2)\x0bVTB\xa9"\xa5?*I\x16\xad"\x84d\x84DE\x93'
b"\x14;v\xc01M\xe2$\x988\xb1l\x9d\xde;v\\\x03\x89TU\xea\xb5N\xe4\xb9\x9a\xef"
b"\x1c\xcfO\x84X\xa0'\x95\x12\xf4\xbb,\x9e/\n\xb1$\x84xF\xa2\x16u\xc2>WzQ\xfc"
b"\xf7\xca\xad\xafo\x91T\xd2\x1ai\xe5\x1fx[\xf9\xf4\x01\xc57\xbb\xd8\xdf\xd8"
b"\x00\x8d\x11\xf9\x95\x12\xda\x9a\xc3\xae\xe5_\xbdDpk\x03\xc3\xaeT\xd0\xb3\xd0"
b">?\x83Z\xfd\x86Z\xa5\x84\x1fG_\xa4\xe7\x1c^\xa9W\xbfJ\xfe\xb4\xf0\x0e^\xdb"
b"\x88}0 \xafA\x0f\xa3+c&O\xbd\xf4\xc1\xf6\xb6d\x9d\xc6\x05\xdcVSz\xb0x\x1c\x10"
b"\x0fo\x02\xc7\xd0\xe7\xf1%\xe5\xf3\xc78\xdb\xf9Y\x93\x1eI\x1f\xf8>\xfa\xb5"
b"\x8bG<\x8dW\x0f^\x84\xd9\xee\xb5~\x8f\xe1w\xaf{\x83\x80\xb2\xbd\xe1\x10\x83"
b"\x88'\xa5\x12\xbcZ?9\x8e\xb3%\xd3\xeb`\xd4\xd2\xffdS\xb9\x96\x89!}W!\xfb\x9a"
b"\xf9t\xc4f\x8aos\x92\x9dtn\xe0\xe8Z\xcc\xc8=\xec\xf7d6\x97\xa3]\xc2Q\x1b(\xec"
b"d\x99_\x8dx\xd4\x15%\xce\x96\xf9\xbf\xacP\xd1:\xfc\xf1\x18\xbe\xeb\xe2\xaey"
b"\x89;]\xc5\xf1\xfb<\xf3\x99\xe9\x99\xefon\xa2\xdb6\xe5\x1c\xbb^\x8b}FV\x1b"
b"\x9es+\xb3\xbd\x81M\xeb\xd1\xe0^5\xf1\xbd|\xc4\xfca\xf2\xde\xf0w\x9cW\xabr."
b"\xe7\xd9\x8dFx\x0e\xa6){\x93\x8e\x85\xf1\xb5\x81\x89\xd9\x82\xa1\x9c\xc8;\xf9"
b"\xe0\x0cV\xb8W\xdc\xdb\x83\xa9i\xb1O@g\xa6T*\xd3=O\xeaP\xcc(^\x17\xfb\xe4\xb3"
b"Y\xc9\xb1\x17{N\xf7\xfbo\x8b\xf7\x97\x94\xe3;\xcd\xff)\xd2\xf2\xacy\xa0\x9b"
b"\xd4g=\x11B\x8bT\x8e\x94Y\x08%\x12\xe2q\x99\xd4\x7f*\x84O\xfa\r\xb5\x916R"
)
#
# ThreadedHTTPRequestHandler
#
@bacpypes_debugging
class ThreadedHTTPRequestHandler(SimpleHTTPRequestHandler):
def do_GET(self):
if _debug:
ThreadedHTTPRequestHandler._debug("do_GET")
global favicon
# get the thread
cur_thread = threading.current_thread()
if _debug:
ThreadedHTTPRequestHandler._debug(" - cur_thread: %r", cur_thread)
# parse query data and params to find out what was passed
parsed_params = urlparse(self.path)
if _debug:
ThreadedHTTPRequestHandler._debug(" - parsed_params: %r", parsed_params)
parsed_query = parse_qs(parsed_params.query)
if _debug:
ThreadedHTTPRequestHandler._debug(" - parsed_query: %r", parsed_query)
# find the pieces
args = parsed_params.path.split("/")
if _debug:
ThreadedHTTPRequestHandler._debug(" - args: %r", args)
if args[1] == "read":
self.do_read(args[2:])
elif args[1] == "whois":
self.do_whois(args[2:])
elif args[1] == "favicon.ico":
self.send_response(200)
self.send_header("Content-type", "image/x-icon")
self.send_header("Content-Length", len(favicon))
self.end_headers()
self.wfile.write(favicon)
else:
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(b"'read' or 'whois' expected")
def do_read(self, args):
if _debug:
ThreadedHTTPRequestHandler._debug("do_read %r", args)
try:
addr, obj_id = args[:2]
obj_id = ObjectIdentifier(obj_id).value
# get the object type
if not get_object_class(obj_id[0]):
raise ValueError("unknown object type")
# implement a default property, the bain of committee meetings
if len(args) == 3:
prop_id = args[2]
else:
prop_id = "presentValue"
# look for its datatype, an easy way to see if the property is
# appropriate for the object
datatype = get_datatype(obj_id[0], prop_id)
if not datatype:
raise ValueError("invalid property for object type")
# build a request
request = ReadPropertyRequest(
objectIdentifier=obj_id, propertyIdentifier=prop_id
)
request.pduDestination = Address(addr)
# look for an optional array index
if len(args) == 5:
request.propertyArrayIndex = int(args[4])
if _debug:
ThreadedHTTPRequestHandler._debug(" - request: %r", request)
# make an IOCB
iocb = IOCB(request)
if _debug:
ThreadedHTTPRequestHandler._debug(" - iocb: %r", iocb)
# give it to the application
deferred(this_application.request_io, iocb)
# wait for it to complete
iocb.wait()
# filter out errors and aborts
if iocb.ioError:
if _debug:
ThreadedHTTPRequestHandler._debug(" - error: %r", iocb.ioError)
result = {"error": str(iocb.ioError)}
else:
if _debug:
ThreadedHTTPRequestHandler._debug(
" - response: %r", iocb.ioResponse
)
apdu = iocb.ioResponse
# find the datatype
datatype = get_datatype(
apdu.objectIdentifier[0], apdu.propertyIdentifier
)
if _debug:
ThreadedHTTPRequestHandler._debug(" - datatype: %r", datatype)
if not datatype:
raise TypeError("unknown datatype")
# special case for array parts, others are managed by cast_out
if issubclass(datatype, Array) and (
apdu.propertyArrayIndex is not None
):
if apdu.propertyArrayIndex == 0:
datatype = Unsigned
else:
datatype = datatype.subtype
if _debug:
ThreadedHTTPRequestHandler._debug(
" - datatype: %r", datatype
)
# convert the value to a dict if possible
value = apdu.propertyValue.cast_out(datatype)
if hasattr(value, "dict_contents"):
value = value.dict_contents(as_class=OrderedDict)
if _debug:
ThreadedHTTPRequestHandler._debug(" - value: %r", value)
result = {"value": value}
except Exception as err:
ThreadedHTTPRequestHandler._exception("exception: %r", err)
result = {"exception": str(err)}
# encode the results as JSON, convert to bytes
result_bytes = json.dumps(result).encode("utf-8")
# write the result
self.wfile.write(result_bytes)
def do_whois(self, args):
if _debug:
ThreadedHTTPRequestHandler._debug("do_whois %r", args)
try:
# build a request
request = WhoIsRequest()
if (len(args) == 1) or (len(args) == 3):
request.pduDestination = Address(args[0])
del args[0]
else:
request.pduDestination = GlobalBroadcast()
if len(args) == 2:
request.deviceInstanceRangeLowLimit = int(args[0])
request.deviceInstanceRangeHighLimit = int(args[1])
if _debug:
ThreadedHTTPRequestHandler._debug(" - request: %r", request)
# make an IOCB
iocb = IOCB(request)
if _debug:
ThreadedHTTPRequestHandler._debug(" - iocb: %r", iocb)
# give it to the application
this_application.request_io(iocb)
# no result -- it would be nice if these were the matching I-Am's
result = {}
except Exception as err:
ThreadedHTTPRequestHandler._exception("exception: %r", err)
result = {"exception": str(err)}
# encode the results as JSON, convert to bytes
result_bytes = json.dumps(result).encode("utf-8")
# write the result
self.wfile.write(result_bytes)
class ThreadedTCPServer(ThreadingMixIn, TCPServer):
pass
#
# __main__
#
try:
# parse the command line arguments
parser = ConfigArgumentParser(description=__doc__)
# add an option for the server host
parser.add_argument("--host", type=str, help="server host", default=HOST)
# add an option for the server port
parser.add_argument("--port", type=int, help="server port", default=PORT)
args = parser.parse_args()
if _debug:
_log.debug("initialization")
if _debug:
_log.debug(" - args: %r", args)
# make a device object
this_device = LocalDeviceObject(ini=args.ini)
if _debug:
_log.debug(" - this_device: %r", this_device)
# make a simple application
this_application = BIPSimpleApplication(this_device, args.ini.address)
# local host, special port
server = ThreadedTCPServer((args.host, args.port), ThreadedHTTPRequestHandler)
if _debug:
_log.debug(" - server: %r", server)
# Start a thread with the server -- that thread will then start a thread for each request
server_thread = threading.Thread(target=server.serve_forever)
if _debug:
_log.debug(" - server_thread: %r", server_thread)
# exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
if _debug:
_log.debug("running")
run()
except Exception as err:
_log.exception("an error has occurred: %s", err)
finally:
if server:
server.shutdown()
if _debug:
_log.debug("finally")
|
engineer.py | import os
import numpy as np
import pandas as pd
from scipy.stats import skew, kurtosis, mode
# from constant import Constant
from .meta_feature_utils import sample_num_strategy # sample strategy
import random
import re
import easyocr # pip install easyocr
import torch
from facenet_pytorch import MTCNN # pip install mtcnn
# python -m pip install 'git+https://github.com/facebookresearch/detectron2.git'
from PIL import Image
from numba import cuda
class EngineerFeatureData(object):
def __init__(self, dict):
self.name = None
self.class_count = None
self.image_count = None
self.color_mode = None
# image per class
self.im_per_class_mean = None
self.im_per_class_median = None
self.im_per_class_mode = None
self.im_per_class_min = None
self.im_per_class_max = None
self.im_per_class_range = None
self.im_per_class_std = None
self.im_per_class_skew = None
self.im_per_class_kurt = None
# image height
self.height_mean = None
self.height_median = None
self.height_mode = None
self.height_min = None
self.height_max = None
self.height_range = None
self.height_std = None
self.height_skew = None
self.height_kurt = None
# image width
self.width_mean = None
self.width_median = None
self.width_mode = None
self.width_min = None
self.width_max = None
self.width_range = None
self.width_std = None
self.width_skew = None
self.width_kurt = None
# image area
self.area_mean = None
self.area_median = None
self.area_mode = None
self.area_min = None
self.area_max = None
self.area_range = None
self.area_std = None
self.area_skew = None
self.area_kurt = None
self.__dict__.update(dict)
import multiprocessing
class EngineerFeature:
#Constant.ENGINEER_FEATURES_CSV
def __init__(self, task_config, csv_path='', save_to_file = False):
''' Calculate engineered meta features to a dataset, such as num of classes, total count of images
Args:
task_config: configs containing job info
csv_path: path to engineerFeatures feature file
save_to_file: whether save current data to file, default is False
Params:
data_name[str]: name of the dataset
data_path[str]: path to the dataset
csv_path[str]: path to the csv file that contains info about previous datasets
df[pd.DataFrame]: data loaded from csv_path
entry[np.ndarray]: engineered meta features of current dataset
'''
self._contain_chars = False
self._contain_faces = False
self._contain_poses = False
self._is_xray = False
self.data_name = task_config["data_name"]
self.data_path = task_config["data_path"]
self.csv_path = csv_path
self.df = self._load_csv()
self.entry = self._generate_feature(save_to_file)
self.contains = self._judge_special_cases(self.data_path)
def get_engineered_feature(self) -> EngineerFeatureData:
''' Wrap entry to current entry in SimpleNamespace and return
Returns:
arg: a SimpleNamespace containing info regarding the dataset.
Ex: arg.name, arg.im_per_class_median
'''
dict = {i : j for i,j in zip(self.df.index, self.entry)}
dict['name'] = self.data_name
arg = EngineerFeatureData(dict)
return arg
def contain_chars(self):
return self._contain_chars
def contain_faces(self):
return self._contain_faces
def contain_poses(self):
return self._contain_poses
def is_xray(self):
return self._is_xray
def _remove_special_chars(self, input) :
input = re.sub('[’!"#$%&\'()*+,-./:;<=>?@,。?★、…【】《》?“”‘’![\\]^_`{|}~\s]+', "", input)
return re.sub(u"([^\u4e00-\u9fa5\u0030-\u0039\u0041-\u005a\u0061-\u007a])", "", input)
def _init_keypoint_detection_predictor(self):
# python -m pip install 'git+https://github.com/facebookresearch/detectron2.git'
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
cfg = get_cfg() # get a fresh new config
cfg.merge_from_file(model_zoo.get_config_file("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set threshold for this model
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml")
predictor = DefaultPredictor(cfg)
return predictor
def _data_has_char(self, images:list, total_sample) -> bool:
chars = 0
reader = easyocr.Reader(['ch_sim', 'en']) # need to run only once to load model into memory
for im in images:
res = reader.readtext(im)
invalid = 0
for i in res :
if (self._remove_special_chars(i[1]) == "") :
invalid += 1
if len(res) - invalid > 0:
chars += 1
# set threshold
if chars / total_sample > 0.9:
self._contain_chars = True
return True
return False
def _data_has_face(self, images:list, total_sample) -> bool:
faces = 0
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
detector = MTCNN(keep_all=True, device=device)
for im in images:
im = np.array(Image.open(im).convert('RGB'))
im = Image.fromarray(im)
boxes, _ = detector.detect(im)
if boxes is not None:
faces +=1
if faces / total_sample > 0.9:
self._contain_faces = True
return True
return False
def _data_has_pose(self, images:list, total_sample) -> bool:
poses = 0
predictor = self._init_keypoint_detection_predictor()
for im in images:
im = np.array(Image.open(im).convert('RGB')).astype(np.float32)
out = predictor(im)
if len(out['instances'].get_fields()['pred_boxes'].tensor) > 0:
poses += 1
if poses/total_sample > 0.9:
self._contain_poses = True
return True
return False
def _judge_special_cases(self, ddir: str) -> None:
''' Get one vector of feature to one dataset
Args:
ddir: path to the dataset
Returns:
entry: feature vector of one dataset
'''
print('Start judging dataset special cases.')
imPerClass = [len(os.listdir(os.path.join(ddir, i))) for i in os.listdir(ddir)]
mean = int(np.mean(imPerClass))
total_sample = 0
images = []
for j, c in enumerate(os.listdir(ddir)) :
im_path = os.path.join(ddir, c) # path to current class folder
im_files = os.listdir(im_path) # image names in the class folder
class_num = len(im_files)
sample_num = sample_num_strategy(mean, class_num)
total_sample += sample_num
index = random.sample(range(class_num), sample_num)
for i in index :
im = os.path.join(im_path, im_files[i])
images.append(im)
# multiprocessing.Process(target=self._data_has_face(images, total_sample), )
if self._data_has_pose(images, total_sample):
return
if self._data_has_char(images, total_sample):
return
device = cuda.get_current_device()
device.reset()
if self._data_has_face(images, total_sample):
return
device = cuda.get_current_device()
device.reset()
def _generate_feature(self, save_to_file:bool) -> np.ndarray:
''' to generate feature
Used Params:
self.data_name,
self.data_path
Args:
save_to_file: whether save to file
Returns:
entry: entry to current dataset
'''
if self.data_name in self.df.columns:
print(f'{self.data_name} already in csv file so stored features will be loaded. '
f'Please use another name if you entered a new dataset.')
return np.array(self.df[self.data_name])
entry = self._get_data_features(self.data_path, self.data_name)
if save_to_file:
self.df[self.data_name] = entry[1:]
self.df.to_csv(self.csv_path, header=True, index=True)
return entry
def _load_csv(self) -> pd.DataFrame:
'''
Args:
csv_path: path to the csv file
Returns:
df: dataframe loaded from the csv file
'''
if not os.path.isfile(self.csv_path):
raise FileNotFoundError(f'Cannot find csv file {self.csv_path}')
df = pd.read_csv(self.csv_path, index_col=0, dtype='str')
# convert string to float
for i in df.index :
if i == 'color_mode' :
continue
df.loc[i] = df.loc[i].astype('float32')
return df
def _get_data_features(self, ddir: str, name: str) -> np.ndarray :
''' Calculate all the features to the one dataset
Args:
ddir: path to the dataset train folder
name: name of the dataset
Returns:
entry: one entry of the engineered features of the dataset
'''
imPerClass = [len(os.listdir(os.path.join(ddir, i))) for i in os.listdir(ddir)]
imPerClass = np.asarray(imPerClass)
num_classes = len(os.listdir(ddir))
num_images = np.sum(imPerClass)
heights = []
widths = []
areas = []
rgb = 0
for c in os.listdir(ddir) :
for i in os.listdir(os.path.join(ddir, c)) :
im = Image.open(os.path.join(ddir, c, i))
size = im.size
heights.append(size[0])
widths.append(size[1])
areas.append(size[0] * size[1])
cmode = im.mode
if im.mode == 'RGB' or im.mode == 'RGBA':
rgb +=1
cmode = 'RGB' if rgb / num_images > 0.5 else 'L'
ipc = self._get_list_distribution(imPerClass)
imh = self._get_list_distribution(np.asarray(heights))
imw = self._get_list_distribution(np.asarray(widths))
ima = self._get_list_distribution(np.asarray(areas))
general = np.asarray([name, num_classes, num_images, cmode], dtype=object)
entry = np.concatenate((general, ipc, imh, imw, ima))
return entry
def _get_list_distribution(self, data: np.ndarray) -> np.ndarray :
''' Calculate the statistical info of a list.
Args:
data: 1d np array
Returns:
out: the following statistics of input data
[ mean, median, mode, min, max,
range,std, skewness, kurtosis]
'''
out = np.array([np.mean(data),
np.median(data),
mode(data)[0][0],
np.min(data),
np.max(data),
np.max(data) - np.min(data),
np.std(data),
skew(data),
kurtosis(data)])
return out
|
device_manager.py | # Copyright 2017 MDSLAB - University of Messina
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__author__ = "Nicola Peditto <n.peditto@gmail.com>"
import importlib as imp
import inspect
import os
import subprocess
import sys
import threading
import time
from datetime import datetime
from iotronic_lightningrod.config import package_path
from iotronic_lightningrod.lightningrod import RPC_devices
from iotronic_lightningrod.lightningrod import SESSION
from iotronic_lightningrod.modules import Module
from iotronic_lightningrod.modules import utils
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
class DeviceManager(Module.Module):
def __init__(self, board, session):
# Module declaration
super(DeviceManager, self).__init__("DeviceManager", board)
self.device_session = session
device_type = board.type
path = package_path + "/devices/" + device_type + ".py"
if os.path.exists(path):
device_module = imp.import_module(
"iotronic_lightningrod.devices." + device_type
)
LOG.info(" - Device '" + device_type + "' module imported!")
device = device_module.System()
dev_meth_list = inspect.getmembers(
device,
predicate=inspect.ismethod
)
RPC_devices[device_type] = dev_meth_list
self._deviceWampRegister(dev_meth_list, board)
board.device = device
else:
LOG.warning("Device '" + device_type + "' not supported!")
def finalize(self):
pass
def restore(self):
pass
def _deviceWampRegister(self, dev_meth_list, board):
LOG.info(" - " + str(board.type).capitalize()
+ " device registering RPCs:")
for meth in dev_meth_list:
if (meth[0] != "__init__") & (meth[0] != "finalize"):
# LOG.info(" - " + str(meth[0]))
# rpc_addr = u'iotronic.' + board.uuid + '.' + meth[0]
rpc_addr = u'iotronic.' + str(board.session_id) + '.' + \
board.uuid + '.' + meth[0]
# LOG.debug(" --> " + str(rpc_addr))
SESSION.register(meth[1], rpc_addr)
LOG.info(" --> " + str(meth[0]) + " registered!")
async def DevicePing(self):
rpc_name = utils.getFuncName()
LOG.info("RPC " + rpc_name + " CALLED")
return datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f')
async def DeviceReboot(self):
rpc_name = utils.getFuncName()
LOG.info("RPC " + rpc_name + " CALLED")
command = "reboot"
subprocess.call(command, shell=True)
return datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f')
async def DeviceHostname(self):
rpc_name = utils.getFuncName()
LOG.info("RPC " + rpc_name + " CALLED")
command = "hostname"
# subprocess.call(command, shell=True)
out = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE
)
output = out.communicate()[0].decode('utf-8').strip()
print(output)
return str(output) + "@" + \
str(datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f'))
async def DeviceRestartLR(self):
rpc_name = utils.getFuncName()
LOG.info("RPC " + rpc_name + " CALLED")
def delayLRrestarting():
time.sleep(5)
python = sys.executable
os.execl(python, python, *sys.argv)
threading.Thread(target=delayLRrestarting).start()
return "Restarting LR in 5 seconds (" + \
datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f') + ")..."
|
tello-command.py | #!/usr/bin/python3
import threading
import socket
import time
tello_ip = "192.168.10.1"
"""
Tello port to send command message.
"""
command_port = 8889
"""
@brief Host IP address. 0.0.0.0 referring to current
host/computer IP address.
"""
host_ip = "0.0.0.0"
"""
@brief UDP port to receive response msg from Tello.
Tello command response will send to this port.
"""
response_port = 9000
""" Welcome note """
print("\nTello Command Program\n")
class Tello:
def __init__(self):
self._running = True
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind((host_ip, response_port)) # Bind for receiving
def terminate(self):
self._running = False
self.sock.close()
def recv(self):
""" Handler for Tello response message """
while self._running:
try:
msg, _ = self.sock.recvfrom(1024) # Read 1024-bytes from UDP socket
print("response: {}".format(msg.decode(encoding="utf-8")))
except Exception as err:
print(err)
def send(self, msg):
""" Handler for send message to Tello """
msg = msg.encode(encoding="utf-8")
self.sock.sendto(msg, (tello_ip, command_port))
print("message: {}".format(msg)) # Print message
""" Start new thread for receive Tello response message """
t = Tello()
recvThread = threading.Thread(target=t.recv)
recvThread.start()
while True:
try:
# Get input from CLI
msg = input()
t.send(msg)
# Check for "end"
if msg == "bye":
t.terminate()
recvThread.join()
print("\nGood Bye\n")
break
except KeyboardInterrupt:
t.terminate()
recvThread.join()
break
|
test_transport.py | """Unit tests for the transport module."""
from datetime import datetime
import logging
import queue
import os
import platform
import socket
import ssl
from struct import pack
import sys
import threading
import time
import pytest
from pydicom import dcmread
import pynetdicom
from pynetdicom import AE, evt, _config, debug_logger
from pynetdicom.association import Association
from pynetdicom.events import Event
from pynetdicom._globals import MODE_REQUESTOR
from pynetdicom.pdu_primitives import A_ASSOCIATE
from pynetdicom import transport
from pynetdicom.transport import (
AssociationSocket,
AssociationServer,
ThreadedAssociationServer,
T_CONNECT,
)
from pynetdicom.sop_class import Verification, RTImageStorage
from .encoded_pdu_items import p_data_tf_rq, a_associate_rq
from .hide_modules import hide_modules
from .utils import wait_for_server_socket
# This is the directory that contains test data
TEST_ROOT = os.path.abspath(os.path.dirname(__file__))
CERT_DIR = os.path.join(TEST_ROOT, "cert_files")
DCM_DIR = os.path.join(TEST_ROOT, "dicom_files")
# SSL Testing
SERVER_CERT, SERVER_KEY = (
os.path.join(CERT_DIR, "server.crt"),
os.path.join(CERT_DIR, "server.key"),
)
CLIENT_CERT, CLIENT_KEY = (
os.path.join(CERT_DIR, "client.crt"),
os.path.join(CERT_DIR, "client.key"),
)
DATASET = dcmread(os.path.join(DCM_DIR, "RTImageStorage.dcm"))
# debug_logger()
class TestTConnect:
"""Tests for T_CONNECT."""
def test_bad_addr_raises(self):
"""Test a bad init parameter raises exception"""
msg = (
r"'request' must be 'pynetdicom.pdu_primitives.A_ASSOCIATE', not 'NoneType'"
)
with pytest.raises(TypeError, match=msg):
T_CONNECT(None)
def test_address_request(self):
"""Test init with an A-ASSOCIATE primitive"""
request = A_ASSOCIATE()
request.called_presentation_address = ("123", 12)
conn = T_CONNECT(request)
assert conn.address == ("123", 12)
assert conn.request is request
msg = r"A connection attempt has not yet been made"
with pytest.raises(ValueError, match=msg):
conn.result
def test_result_setter(self):
"""Test setting the result value."""
request = A_ASSOCIATE()
request.called_presentation_address = ("123", 12)
conn = T_CONNECT(request)
msg = r"Invalid connection result 'foo'"
with pytest.raises(ValueError, match=msg):
conn.result = "foo"
assert conn._result == ""
for result in ("Evt2", "Evt17"):
conn.result = result
assert conn.result == result
class TestAssociationSocket:
"""Tests for the transport.AssociationSocket class."""
def setup(self):
ae = AE()
self.assoc = Association(ae, MODE_REQUESTOR)
def get_listen_socket(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO, pack("ll", 1, 0))
sock.bind(("localhost", 11112))
sock.listen(5)
return sock
def test_init_new(self):
"""Test creating a new AssociationSocket instance."""
sock = AssociationSocket(self.assoc)
assert sock.tls_args is None
assert sock.select_timeout == 0.5
assert sock._assoc == self.assoc
assert isinstance(sock.socket, socket.socket)
assert sock._is_connected is False
with pytest.raises(queue.Empty):
sock.event_queue.get(block=False)
def test_init_address(self):
"""Test creating a new bound AssociationSocket instance."""
sock = AssociationSocket(self.assoc, address=("127.0.0.1", 11112))
assert sock.tls_args is None
assert sock.select_timeout == 0.5
assert sock._assoc == self.assoc
assert isinstance(sock.socket, socket.socket)
assert sock.socket.getsockname()[0] == "127.0.0.1"
assert sock.socket.getsockname()[1] == 11112
assert sock._is_connected is False
with pytest.raises(queue.Empty):
sock.event_queue.get(block=False)
def test_init_existing(self):
"""Test creating a new AssociationSocket around existing socket."""
sock = AssociationSocket(self.assoc, client_socket="abc")
assert sock.tls_args is None
assert sock.select_timeout == 0.5
assert sock._assoc == self.assoc
assert sock.socket == "abc"
assert sock._is_connected is True
assert sock.event_queue.get(block=False) == "Evt5"
def test_init_raises(self, caplog):
"""Test exception is raised if init with client_socket and address."""
msg = (
r"AssociationSocket instantiated with both a 'client_socket' "
r"and bind 'address'. The original socket will not be rebound"
)
with caplog.at_level(logging.WARNING, logger="pynetdicom"):
AssociationSocket(
self.assoc, client_socket="abc", address=("localhost", 11112)
)
assert msg in caplog.text
def test_close_connect(self):
"""Test closing and connecting."""
sock = AssociationSocket(self.assoc)
sock._is_connected = True
assert sock.socket is not None
sock.close()
assert sock.socket is None
# Tries to connect, sets to None if fails
request = A_ASSOCIATE()
request.called_presentation_address = ("123", 12)
sock.connect(T_CONNECT(request))
assert sock.event_queue.get() == "Evt17"
assert sock.socket is None
def test_ready_error(self):
"""Test AssociationSocket.ready."""
sock = AssociationSocket(self.assoc, address=("localhost", 0))
assert sock.ready is False
sock._is_connected = True
if platform.system() in ["Windows", "Darwin"]:
assert sock.ready is False
else:
assert sock.ready is True
sock.socket.close()
assert sock.ready is False
assert sock.event_queue.get() == "Evt17"
def test_print(self):
"""Test str(AssociationSocket)."""
sock = AssociationSocket(self.assoc)
assert sock.__str__() == sock.socket.__str__()
def test_close_socket_none(self):
"""Test trying to close a closed socket."""
def handle_close(event):
event.assoc.dul.socket.socket = None
hh = [(evt.EVT_CONN_CLOSE, handle_close)]
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(("localhost", 11113), block=False, evt_handlers=hh)
ae.add_requested_context(Verification)
assoc = ae.associate("localhost", 11113)
assert assoc.is_established
assoc.release()
assert assoc.is_released
scp.shutdown()
def test_get_local_addr(self):
"""Test get_local_addr()."""
# Normal use
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_requested_context(Verification)
assoc = ae.associate("localhost", 11113)
assert not assoc.is_established
assert isinstance(assoc.requestor.address, str)
# Exceptional use
assert not assoc.is_established
addr = assoc.dul.socket.get_local_addr(("", 111111))
assert "127.0.0.1" == addr
def test_multiple_pdu_req(self):
"""Test what happens if two PDUs are sent before the select call."""
events = []
def handle_echo(event):
events.append(event)
return 0x0000
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context("1.2.840.10008.1.1")
ae.add_requested_context("1.2.840.10008.1.1")
server = ae.start_server(("localhost", 11112), block=False)
assoc = ae.associate(
"localhost", 11112, evt_handlers=[(evt.EVT_C_ECHO, handle_echo)]
)
assert assoc.is_established
# Send data directly to the requestor
socket = server.active_associations[0].dul.socket
socket.send(2 * p_data_tf_rq)
time.sleep(1)
assoc.release()
assert assoc.is_released
server.shutdown()
assert 2 == len(events)
def test_multiple_pdu_acc(self):
"""Test what happens if two PDUs are sent before the select call."""
events = []
def handle_echo(event):
events.append(event)
return 0x0000
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context("1.2.840.10008.1.1")
ae.add_requested_context("1.2.840.10008.1.1")
server = ae.start_server(
("localhost", 11112),
block=False,
evt_handlers=[(evt.EVT_C_ECHO, handle_echo)],
)
assoc = ae.associate(
"localhost",
11112,
)
assert assoc.is_established
# Send data directly to the requestor
socket = assoc.dul.socket
socket.send(2 * p_data_tf_rq)
time.sleep(1)
assoc.release()
assert assoc.is_released
server.shutdown()
assert 2 == len(events)
@pytest.fixture
def server_context(request):
"""Return a good server SSLContext."""
# TLS v1.3 is not currently supported :(
# The actual available attributes/protocols depend on OS, OpenSSL version
# and Python version, ugh
if hasattr(ssl, "TLSVersion"):
# This is the current and future, but heavily depends on OpenSSL
# Python 3.7+, w/ OpenSSL 1.1.0g+
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
context.verify_mode = ssl.CERT_REQUIRED
context.load_cert_chain(certfile=SERVER_CERT, keyfile=SERVER_KEY)
context.load_verify_locations(cafile=CLIENT_CERT)
context.maximum_version = ssl.TLSVersion.TLSv1_2
return context
# Should work with older Python and OpenSSL versions
# Python 3.6
context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLSv1_2)
context.verify_mode = ssl.CERT_REQUIRED
context.load_cert_chain(certfile=SERVER_CERT, keyfile=SERVER_KEY)
context.load_verify_locations(cafile=CLIENT_CERT)
return context
@pytest.fixture
def client_context(request):
"""Return a good client SSLContext."""
context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=SERVER_CERT)
context.verify_mode = ssl.CERT_REQUIRED
context.load_cert_chain(certfile=CLIENT_CERT, keyfile=CLIENT_KEY)
context.check_hostname = False
return context
class TestTLS:
"""Test using TLS to wrap the association."""
def setup(self):
self.ae = None
self.has_ssl = transport._HAS_SSL
def teardown(self):
if self.ae:
self.ae.shutdown()
# Ensure ssl module is available again
import importlib
importlib.reload(pynetdicom.transport)
def test_tls_not_server_not_client(self):
"""Test associating with no TLS on either end."""
self.ae = ae = AE()
ae.add_supported_context("1.2.840.10008.1.1")
server = ae.start_server(("localhost", 11112), block=False)
ae.add_requested_context("1.2.840.10008.1.1")
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
status = assoc.send_c_echo()
assert status.Status == 0x0000
assoc.release()
assert assoc.is_released
server.shutdown()
assert len(server.active_associations) == 0
def test_tls_not_server_yes_client(self, client_context):
"""Test wrapping the requestor socket with TLS (but not server)."""
self.ae = ae = AE()
ae.acse_timeout = 0.5
ae.dimse_timeout = 0.5
ae.network_timeout = 0.5
ae.add_supported_context("1.2.840.10008.1.1")
server = ae.start_server(("localhost", 11112), block=False)
ae.add_requested_context("1.2.840.10008.1.1")
assoc = ae.associate("localhost", 11112, tls_args=(client_context, None))
assert assoc.is_aborted
server.shutdown()
time.sleep(0.5)
assert len(server.active_associations) == 0
def test_tls_yes_server_not_client(self, server_context, caplog):
"""Test wrapping the acceptor socket with TLS (and not client)."""
with caplog.at_level(logging.ERROR, logger="pynetdicom"):
self.ae = ae = AE()
ae.add_supported_context("1.2.840.10008.1.1")
server = ae.start_server(
("localhost", 11112),
block=False,
ssl_context=server_context,
)
ae.add_requested_context("1.2.840.10008.1.1")
assoc = ae.associate("localhost", 11112)
assert assoc.is_aborted
server.shutdown()
assert len(server.active_associations) == 0
assert "Connection closed before the entire PDU was received" in caplog.text
def test_tls_yes_server_yes_client(self, server_context, client_context):
"""Test associating with TLS on both ends."""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context("1.2.840.10008.1.1")
server = ae.start_server(
("localhost", 11112),
block=False,
ssl_context=server_context,
)
wait_for_server_socket(server, 1)
ae.add_requested_context("1.2.840.10008.1.1")
assoc = ae.associate("localhost", 11112, tls_args=(client_context, None))
assert assoc.is_established
assoc.release()
assert assoc.is_released
server.shutdown()
assert len(server.active_associations) == 0
def test_tls_transfer(self, server_context, client_context):
"""Test transferring data after associating with TLS."""
ds = []
def handle_store(event):
ds.append(event.dataset)
return 0x0000
handlers = [(evt.EVT_C_STORE, handle_store)]
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context("1.2.840.10008.1.1")
ae.add_supported_context(RTImageStorage)
server = ae.start_server(
("localhost", 11112),
block=False,
ssl_context=server_context,
evt_handlers=handlers,
)
ae.add_requested_context("1.2.840.10008.1.1")
ae.add_requested_context(RTImageStorage)
assoc = ae.associate("localhost", 11112, tls_args=(client_context, None))
assert assoc.is_established
status = assoc.send_c_store(DATASET)
assert status.Status == 0x0000
assoc.release()
assert assoc.is_released
server.shutdown()
assert len(ds[0].PixelData) == 2097152
@hide_modules(["ssl"])
def test_no_ssl_scp(self):
"""Test exception raised if no SSL available to Python as SCP."""
# Reload pynetdicom package
import importlib
importlib.reload(pynetdicom.transport)
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context("1.2.840.10008.1.1")
msg = r"Your Python installation lacks support for SSL"
with pytest.raises(RuntimeError, match=msg):
ae.start_server(
("localhost", 11112),
block=False,
ssl_context=["random", "object"],
)
@hide_modules(["ssl"])
def test_no_ssl_scu(self):
"""Test exception raised if no SSL available to Python as SCU."""
# Reload pynetdicom package
import importlib
importlib.reload(pynetdicom.transport)
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_requested_context("1.2.840.10008.1.1")
msg = r"Your Python installation lacks support for SSL"
with pytest.raises(RuntimeError, match=msg):
ae.associate("localhost", 11112, tls_args=(["random", "object"], None))
def test_multiple_pdu_req(self, server_context, client_context):
"""Test what happens if two PDUs are sent before the select call."""
events = []
def handle_echo(event):
events.append(event)
return 0x0000
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context("1.2.840.10008.1.1")
ae.add_requested_context("1.2.840.10008.1.1")
server = ae.start_server(
("localhost", 11112),
block=False,
ssl_context=server_context,
)
assoc = ae.associate(
"localhost",
11112,
tls_args=(client_context, None),
evt_handlers=[(evt.EVT_C_ECHO, handle_echo)],
)
assert assoc.is_established
# Send data directly to the requestor
socket = server.active_associations[0].dul.socket
socket.send(2 * p_data_tf_rq)
time.sleep(1)
assoc.release()
timeout = 0
while not assoc.is_released and timeout < 5:
time.sleep(0.05)
timeout += 0.05
assert assoc.is_released
def test_multiple_pdu_acc(self, server_context, client_context):
"""Test what happens if two PDUs are sent before the select call."""
events = []
def handle_echo(event):
events.append(event)
return 0x0000
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context("1.2.840.10008.1.1")
ae.add_requested_context("1.2.840.10008.1.1")
server = ae.start_server(
("localhost", 11112),
block=False,
ssl_context=server_context,
evt_handlers=[(evt.EVT_C_ECHO, handle_echo)],
)
assoc = ae.associate("localhost", 11112, tls_args=(client_context, None))
assert assoc.is_established
# Send data directly to the requestor
socket = assoc.dul.socket
socket.send(2 * p_data_tf_rq)
time.sleep(1)
assoc.release()
timeout = 0
while not assoc.is_released and timeout < 5:
time.sleep(0.05)
timeout += 0.05
assert assoc.is_released
server.shutdown()
assert 2 == len(events)
class TestAssociationServer:
def setup(self):
self.ae = None
def teardown(self):
if self.ae:
self.ae.shutdown()
@pytest.mark.skip()
def test_multi_assoc_block(self):
"""Test that multiple requestors can associate when blocking."""
self.ae = ae = AE()
ae.maximum_associations = 10
ae.add_supported_context("1.2.840.10008.1.1")
ae.start_server(("localhost", 11112))
def test_multi_assoc_non(self):
"""Test that multiple requestors can association when non-blocking."""
self.ae = ae = AE()
ae.maximum_associations = 10
ae.add_supported_context("1.2.840.10008.1.1")
ae.add_requested_context("1.2.840.10008.1.1")
scp = ae.start_server(("localhost", 11112), block=False)
assocs = []
for ii in range(10):
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assocs.append(assoc)
for assoc in assocs:
assoc.release()
scp.shutdown()
def test_init_handlers(self):
"""Test AssociationServer.__init__()."""
def handle(event):
pass
def handle_echo(event):
return 0x0000
def handle_echo_b(event):
return 0x0000
self.ae = ae = AE()
handlers = [
(evt.EVT_DATA_RECV, handle),
(evt.EVT_DATA_RECV, handle),
(evt.EVT_C_ECHO, handle_echo),
(evt.EVT_C_ECHO, handle_echo_b),
(evt.EVT_DATA_SENT, handle_echo_b),
(evt.EVT_DATA_SENT, handle_echo),
(evt.EVT_DATA_SENT, handle),
]
ae.add_supported_context(Verification)
scp = ae.start_server(("localhost", 11112), block=False, evt_handlers=handlers)
assert evt.EVT_DATA_RECV in scp._handlers
assert evt.EVT_C_ECHO in scp._handlers
# Duplicates not added
assert len(scp._handlers[evt.EVT_DATA_RECV]) == 1
# Multiples allowed
assert len(scp._handlers[evt.EVT_DATA_SENT]) == 3
# Only a single handler allowed
assert scp._handlers[evt.EVT_C_ECHO] == (handle_echo_b, None)
def test_get_events(self):
"""Test AssociationServer.get_events()."""
def handle(event):
pass
def handle_echo(event):
return 0x0000
def handle_echo_b(event):
return 0x0000
self.ae = ae = AE()
handlers = [
(evt.EVT_DATA_RECV, handle),
(evt.EVT_DATA_RECV, handle),
(evt.EVT_C_ECHO, handle_echo),
(evt.EVT_C_ECHO, handle_echo_b),
(evt.EVT_DATA_SENT, handle_echo_b),
(evt.EVT_DATA_SENT, handle_echo),
(evt.EVT_DATA_SENT, handle),
]
ae.add_supported_context(Verification)
scp = ae.start_server(("localhost", 11112), block=False, evt_handlers=handlers)
bound_events = scp.get_events()
assert evt.EVT_DATA_RECV in bound_events
assert evt.EVT_DATA_SENT in bound_events
assert evt.EVT_C_ECHO in bound_events
scp.shutdown()
def test_get_handlers(self):
"""Test AssociationServer.get_handlers()."""
_config.LOG_HANDLER_LEVEL = "none"
def handle(event):
pass
def handle_echo(event):
return 0x0000
def handle_echo_b(event):
return 0x0000
self.ae = ae = AE()
handlers = [
(evt.EVT_DATA_RECV, handle),
(evt.EVT_DATA_RECV, handle),
(evt.EVT_C_ECHO, handle_echo),
(evt.EVT_C_ECHO, handle_echo_b),
(evt.EVT_DATA_SENT, handle_echo_b),
(evt.EVT_DATA_SENT, handle_echo),
(evt.EVT_DATA_SENT, handle),
]
ae.add_supported_context(Verification)
scp = ae.start_server(("localhost", 11112), block=False, evt_handlers=handlers)
assert scp.get_handlers(evt.EVT_DATA_RECV) == [(handle, None)]
assert (handle, None) in scp.get_handlers(evt.EVT_DATA_SENT)
assert (handle_echo, None) in scp.get_handlers(evt.EVT_DATA_SENT)
assert (handle_echo_b, None) in scp.get_handlers(evt.EVT_DATA_SENT)
assert scp.get_handlers(evt.EVT_C_ECHO) == (handle_echo_b, None)
assert scp.get_handlers(evt.EVT_PDU_SENT) == []
scp.shutdown()
def test_shutdown(self):
"""test trying to shutdown a socket that's already closed."""
self.ae = ae = AE()
ae.add_supported_context(Verification)
server = ae.start_server(("localhost", 11112), block=False)
server.socket.close()
server.shutdown()
def test_exception_in_handler(self):
"""Test exc raised by the handler doesn't shut down the server."""
class DummyAE:
network_timeout = 5
_servers = []
dummy = DummyAE()
server = ThreadedAssociationServer(dummy, ("localhost", 11112), b"a", [])
dummy._servers.append(server)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
ae = AE()
ae.add_requested_context("1.2.840.10008.1.1")
ae.associate("localhost", 11112)
assert server.socket.fileno() != -1
server.shutdown()
if sys.version_info[0] == 2:
with pytest.raises(socket.error):
server.socket.fileno()
else:
assert server.socket.fileno() == -1
def test_blocking_process_request(self):
"""Test AssociationServer.process_request."""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
t = threading.Thread(
target=ae.start_server, args=(("localhost", 11112),), kwargs={"block": True}
)
t.start()
ae.add_requested_context(Verification)
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assoc.release()
ae.shutdown()
def test_split_pdu_windows(self):
"""Regression test for #653"""
events = []
def handle_echo(event):
events.append(event)
return 0x0000
req_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
req_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
req_sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO, pack("ll", 6000, 0))
req_sock.bind(("localhost", 0))
self.ae = ae = AE()
ae.network_timeout = 1
ae.add_supported_context("1.2.840.10008.1.1")
ae.add_requested_context("1.2.840.10008.1.1")
server = ae.start_server(
("localhost", 11112),
block=False,
evt_handlers=[(evt.EVT_C_ECHO, handle_echo)],
)
# Set AE requestor connection timeout
req_sock.settimeout(30)
req_sock.connect(("localhost", 11112))
req_sock.settimeout(None)
# Send data directly to the acceptor
req_sock.send(a_associate_rq)
# Give the acceptor time to send the A-ASSOCIATE-AC
while not server.active_associations:
time.sleep(0.0001)
assoc = server.active_associations[0]
while not assoc.is_established:
time.sleep(0.0001)
# Forcibly split the P-DATA PDU into two TCP segments
req_sock.send(p_data_tf_rq[:12])
time.sleep(0.5)
req_sock.send(p_data_tf_rq[12:])
# Give the acceptor time to process the C-ECHO-RQ
while assoc.is_established and not events:
time.sleep(0.0001)
server.shutdown()
req_sock.close()
assert 1 == len(events)
def test_gc(self):
"""Test garbage collection."""
self.ae = ae = AE()
ae.add_supported_context(Verification)
server = ae.start_server(("localhost", 11112), block=False)
server._gc[0] = 59
# Default poll interval is 0.5 s
while server._gc[0] == server._gc[1]:
time.sleep(0.1)
assert server._gc[0] < server._gc[1]
server.shutdown()
class TestEventHandlingAcceptor:
"""Test the transport events and handling as acceptor."""
def setup(self):
self.ae = None
def teardown(self):
if self.ae:
self.ae.shutdown()
def test_no_handlers(self):
"""Test with no transport event handlers bound."""
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("localhost", 11112), block=False)
assert scp.get_handlers(evt.EVT_CONN_OPEN) == []
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_CONN_OPEN) == []
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_CONN_OPEN) == []
assert child.get_handlers(evt.EVT_CONN_CLOSE) == []
assoc.release()
scp.shutdown()
def test_bind_evt_conn_open(self):
"""Test associations as acceptor with EVT_CONN_OPEN bound."""
triggered_events = []
def on_conn_open(event):
triggered_events.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(
("localhost", 11112),
block=False,
evt_handlers=[(evt.EVT_CONN_OPEN, on_conn_open)],
)
assert scp.get_handlers(evt.EVT_CONN_OPEN) == [(on_conn_open, None)]
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_CONN_OPEN) == [(on_conn_open, None)]
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_CONN_OPEN) == [(on_conn_open, None)]
assert child.get_handlers(evt.EVT_CONN_CLOSE) == []
assert len(triggered_events) == 1
event = triggered_events[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(event.address[0], str)
assert isinstance(event.address[1], int)
assert event.event.name == "EVT_CONN_OPEN"
assoc.release()
scp.shutdown()
def test_bind_evt_conn_open_running(self):
"""Test binding EVT_CONN_OPEN while running."""
triggered_events = []
def on_conn_open(event):
triggered_events.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("localhost", 11112), block=False)
assert scp.get_handlers(evt.EVT_CONN_OPEN) == []
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_CONN_OPEN) == []
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_CONN_OPEN) == []
assert child.get_handlers(evt.EVT_CONN_CLOSE) == []
assert len(scp.active_associations) == 1
assert len(triggered_events) == 0
# Bind
scp.bind(evt.EVT_CONN_OPEN, on_conn_open)
assert scp.get_handlers(evt.EVT_CONN_OPEN) == [(on_conn_open, None)]
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_CONN_OPEN) == [(on_conn_open, None)]
assert child.get_handlers(evt.EVT_CONN_CLOSE) == []
assoc2 = ae.associate("localhost", 11112)
assert assoc2.is_established
assert len(scp.active_associations) == 2
assert scp.get_handlers(evt.EVT_CONN_OPEN) == [(on_conn_open, None)]
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
assert assoc2.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc2.get_handlers(evt.EVT_CONN_CLOSE) == []
child2 = scp.active_associations[1]
assert child2.get_handlers(evt.EVT_CONN_OPEN) == [(on_conn_open, None)]
assert child2.get_handlers(evt.EVT_CONN_CLOSE) == []
assert len(triggered_events) == 1
event = triggered_events[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(event.address[0], str)
assert isinstance(event.address[1], int)
assoc.release()
assoc2.release()
scp.shutdown()
def test_unbind_evt_conn_open(self):
"""Test unbinding an event while running."""
triggered_events = []
def on_conn_open(event):
triggered_events.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(
("localhost", 11112),
block=False,
evt_handlers=[(evt.EVT_CONN_OPEN, on_conn_open)],
)
assert scp.get_handlers(evt.EVT_CONN_OPEN) == [(on_conn_open, None)]
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_CONN_OPEN) == [(on_conn_open, None)]
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_CONN_OPEN) == [(on_conn_open, None)]
assert child.get_handlers(evt.EVT_CONN_CLOSE) == []
assert len(triggered_events) == 1
event = triggered_events[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(event.address[0], str)
assert isinstance(event.address[1], int)
# Unbind
scp.unbind(evt.EVT_CONN_OPEN, on_conn_open)
assert scp.get_handlers(evt.EVT_CONN_OPEN) == []
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
assert child.get_handlers(evt.EVT_CONN_OPEN) == []
assert child.get_handlers(evt.EVT_CONN_CLOSE) == []
assoc2 = ae.associate("localhost", 11112)
assert assoc2.is_established
assert len(scp.active_associations) == 2
assert scp.get_handlers(evt.EVT_CONN_OPEN) == []
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
assert assoc2.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc2.get_handlers(evt.EVT_CONN_CLOSE) == []
child2 = scp.active_associations[1]
assert child2.get_handlers(evt.EVT_CONN_OPEN) == []
assert child2.get_handlers(evt.EVT_CONN_CLOSE) == []
assert len(triggered_events) == 1
assoc.release()
assoc2.release()
scp.shutdown()
def test_unbind_no_event(self):
"""Test unbinding if no event bound."""
def dummy(event):
pass
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("localhost", 11112), block=False)
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
scp.unbind(evt.EVT_CONN_CLOSE, dummy)
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
scp.shutdown()
def test_unbind_last_handler(self):
"""Test unbinding if no event bound."""
def dummy(event):
pass
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("localhost", 11112), block=False)
scp.bind(evt.EVT_CONN_CLOSE, dummy)
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == [(dummy, None)]
scp.unbind(evt.EVT_CONN_CLOSE, dummy)
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
assert evt.EVT_CONN_CLOSE not in scp._handlers
scp.shutdown()
def test_conn_open_raises(self, caplog):
"""Test the handler for EVT_CONN_OPEN raising exception."""
def handle(event):
raise NotImplementedError("Exception description")
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
handlers = [(evt.EVT_CONN_OPEN, handle)]
scp = ae.start_server(("localhost", 11112), block=False, evt_handlers=handlers)
with caplog.at_level(logging.ERROR, logger="pynetdicom"):
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assoc.release()
while scp.active_associations:
time.sleep(0.05)
scp.shutdown()
msg = (
"Exception raised in user's 'evt.EVT_CONN_OPEN' event handler"
" 'handle'"
)
assert msg in caplog.text
assert "Exception description" in caplog.text
def test_bind_evt_conn_close(self):
"""Test associations as acceptor with EVT_CONN_CLOSE bound."""
triggered_events = []
def on_conn_close(event):
with threading.Lock():
triggered_events.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(
("localhost", 11112),
block=False,
evt_handlers=[(evt.EVT_CONN_CLOSE, on_conn_close)],
)
assert scp.get_handlers(evt.EVT_CONN_OPEN) == []
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == [(on_conn_close, None)]
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_CONN_OPEN) == []
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == [(on_conn_close, None)]
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_CONN_OPEN) == []
assert child.get_handlers(evt.EVT_CONN_CLOSE) == [(on_conn_close, None)]
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered_events) == 1
event = triggered_events[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(event.address[0], str)
assert isinstance(event.address[1], int)
assert event.event.name == "EVT_CONN_CLOSE"
scp.shutdown()
def test_bind_evt_conn_close_running(self):
"""Test binding EVT_CONN_CLOSE while running."""
triggered_events = []
def on_conn_close(event):
triggered_events.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(
("localhost", 11112),
block=False,
)
assert scp.get_handlers(evt.EVT_CONN_OPEN) == []
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_CONN_OPEN) == []
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_CONN_OPEN) == []
assert child.get_handlers(evt.EVT_CONN_CLOSE) == []
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
scp.bind(evt.EVT_CONN_CLOSE, on_conn_close)
assert scp.get_handlers(evt.EVT_CONN_OPEN) == []
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == [(on_conn_close, None)]
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_CONN_OPEN) == []
assert child.get_handlers(evt.EVT_CONN_CLOSE) == [(on_conn_close, None)]
assoc.release()
assert assoc.is_released
time.sleep(0.1)
assert len(triggered_events) == 1
event = triggered_events[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
scp.shutdown()
def test_unbind_evt_conn_close(self):
"""Test unbinding EVT_CONN_CLOSE."""
triggered_events = []
def on_conn_close(event):
triggered_events.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(
("localhost", 11112),
block=False,
evt_handlers=[(evt.EVT_CONN_CLOSE, on_conn_close)],
)
assert scp.get_handlers(evt.EVT_CONN_OPEN) == []
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == [(on_conn_close, None)]
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_CONN_OPEN) == []
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == [(on_conn_close, None)]
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_CONN_OPEN) == []
assert child.get_handlers(evt.EVT_CONN_CLOSE) == [(on_conn_close, None)]
scp.unbind(evt.EVT_CONN_CLOSE, on_conn_close)
assert scp.get_handlers(evt.EVT_CONN_OPEN) == []
assert scp.get_handlers(evt.EVT_CONN_CLOSE) == []
assert child.get_handlers(evt.EVT_CONN_OPEN) == []
assert child.get_handlers(evt.EVT_CONN_CLOSE) == []
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered_events) == 0
scp.shutdown()
def test_conn_close_raises(self, caplog):
"""Test the handler for EVT_CONN_CLOSE raising exception."""
def handle(event):
raise NotImplementedError("Exception description")
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
handlers = [(evt.EVT_CONN_CLOSE, handle)]
scp = ae.start_server(("localhost", 11112), block=False, evt_handlers=handlers)
with caplog.at_level(logging.ERROR, logger="pynetdicom"):
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assoc.release()
while scp.active_associations:
time.sleep(0.05)
scp.shutdown()
msg = (
"Exception raised in user's 'evt.EVT_CONN_CLOSE' event handler"
" 'handle'"
)
assert msg in caplog.text
assert "Exception description" in caplog.text
def test_data_sent(self):
"""Test binding to EVT_DATA_SENT."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
handlers = [(evt.EVT_DATA_SENT, handle)]
scp = ae.start_server(("localhost", 11112), block=False, evt_handlers=handlers)
assert scp.get_handlers(evt.EVT_DATA_SENT) == [(handle, None)]
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_DATA_SENT) == [(handle, None)]
assert assoc.get_handlers(evt.EVT_DATA_SENT) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_DATA_SENT) == [(handle, None)]
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 2
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(event.data, bytes)
assert event.event.name == "EVT_DATA_SENT"
assert triggered[0].data[0:1] == b"\x02" # A-ASSOCIATE-AC
assert triggered[1].data[0:1] == b"\x06" # A-RELEASE-RP
scp.shutdown()
def test_data_sent_bind(self):
"""Test binding to EVT_DATA_SENT."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("localhost", 11112), block=False)
assert scp.get_handlers(evt.EVT_DATA_SENT) == []
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
time.sleep(0.5)
scp.bind(evt.EVT_DATA_SENT, handle)
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_DATA_SENT) == [(handle, None)]
assert assoc.get_handlers(evt.EVT_DATA_SENT) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_DATA_SENT) == [(handle, None)]
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 1
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(event.data, bytes)
assert event.event.name == "EVT_DATA_SENT"
assert event.data[0:1] == b"\x06" # A-RELEASE-RP
scp.shutdown()
def test_data_sent_unbind(self):
"""Test unbinding EVT_DATA_SENT."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
handlers = [(evt.EVT_DATA_SENT, handle)]
scp = ae.start_server(("localhost", 11112), block=False, evt_handlers=handlers)
assert scp.get_handlers(evt.EVT_DATA_SENT) == [(handle, None)]
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
time.sleep(0.5)
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_DATA_SENT) == [(handle, None)]
assert assoc.get_handlers(evt.EVT_DATA_SENT) == []
child = scp.active_associations[0]
assert child.dul.state_machine.current_state == "Sta6"
assert child.get_handlers(evt.EVT_DATA_SENT) == [(handle, None)]
scp.unbind(evt.EVT_DATA_SENT, handle)
assoc.release()
while scp.active_associations:
time.sleep(0.05)
time.sleep(0.1)
assert len(triggered) == 1
assert triggered[0].data[0:1] == b"\x02" # A-ASSOCIATE-AC
scp.shutdown()
def test_data_sent_raises(self, caplog):
"""Test the handler for EVT_DATA_SENT raising exception."""
def handle(event):
raise NotImplementedError("Exception description")
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
handlers = [(evt.EVT_DATA_SENT, handle)]
scp = ae.start_server(("localhost", 11112), block=False, evt_handlers=handlers)
with caplog.at_level(logging.ERROR, logger="pynetdicom"):
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assoc.release()
while scp.active_associations:
time.sleep(0.05)
scp.shutdown()
msg = (
"Exception raised in user's 'evt.EVT_DATA_SENT' event handler"
" 'handle'"
)
assert msg in caplog.text
assert "Exception description" in caplog.text
def test_data_recv(self):
"""Test starting bound to EVT_DATA_RECV."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
handlers = [(evt.EVT_DATA_RECV, handle)]
scp = ae.start_server(("localhost", 11112), block=False, evt_handlers=handlers)
assert scp.get_handlers(evt.EVT_DATA_RECV) == [(handle, None)]
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_DATA_RECV) == [(handle, None)]
assert assoc.get_handlers(evt.EVT_DATA_RECV) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_DATA_RECV) == [(handle, None)]
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 2
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(event.data, bytes)
assert triggered[0].data[0:1] == b"\x01" # Should be A-ASSOCIATE-RQ PDU
assert triggered[1].data[0:1] == b"\x05" # Should be A-RELEASE-RQ PDU
assert event.event.name == "EVT_DATA_RECV"
scp.shutdown()
def test_data_recv_bind(self):
"""Test binding to EVT_DATA_RECV."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("localhost", 11112), block=False)
assert scp.get_handlers(evt.EVT_DATA_RECV) == []
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
scp.bind(evt.EVT_DATA_RECV, handle)
assert scp.get_handlers(evt.EVT_DATA_RECV) == [(handle, None)]
assert assoc.get_handlers(evt.EVT_DATA_RECV) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_DATA_RECV) == [(handle, None)]
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 1
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(event.data, bytes)
assert event.data[0:1] == b"\x05" # Should be A-RELEASE-RQ PDU
assert event.event.name == "EVT_DATA_RECV"
scp.shutdown()
def test_data_recv_unbind(self):
"""Test unbinding to EVT_DATA_RECV."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
handlers = [(evt.EVT_DATA_RECV, handle)]
scp = ae.start_server(("localhost", 11112), block=False, evt_handlers=handlers)
assert scp.get_handlers(evt.EVT_DATA_RECV) == [(handle, None)]
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
scp.unbind(evt.EVT_DATA_RECV, handle)
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_DATA_RECV) == []
assert assoc.get_handlers(evt.EVT_DATA_RECV) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_DATA_RECV) == []
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 1
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(event.data, bytes)
assert triggered[0].data[0:1] == b"\x01" # Should be A-ASSOCIATE-RQ PDU
assert event.event.name == "EVT_DATA_RECV"
scp.shutdown()
def test_data_recv_raises(self, caplog):
"""Test the handler for EVT_DATA_RECV raising exception."""
def handle(event):
raise NotImplementedError("Exception description")
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
handlers = [(evt.EVT_DATA_RECV, handle)]
scp = ae.start_server(("localhost", 11112), block=False, evt_handlers=handlers)
with caplog.at_level(logging.ERROR, logger="pynetdicom"):
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assoc.release()
while scp.active_associations:
time.sleep(0.05)
scp.shutdown()
msg = (
"Exception raised in user's 'evt.EVT_DATA_RECV' event handler"
" 'handle'"
)
assert msg in caplog.text
assert "Exception description" in caplog.text
class TestEventHandlingRequestor:
"""Test the transport events and handling as requestor."""
def setup(self):
self.ae = None
def teardown(self):
if self.ae:
self.ae.shutdown()
def test_no_handlers(self):
"""Test associations as requestor with no handlers bound."""
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("localhost", 11112), block=False)
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == []
assoc.release()
scp.shutdown()
def test_bind_evt_conn_open(self):
"""Test start with a bound EVT_CONN_OPEN"""
triggered_events = []
def on_conn_open(event):
triggered_events.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("localhost", 11112), block=False)
assoc = ae.associate(
"localhost", 11112, evt_handlers=[(evt.EVT_CONN_OPEN, on_conn_open)]
)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == [(on_conn_open, None)]
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_CONN_OPEN) == []
assert child.get_handlers(evt.EVT_CONN_CLOSE) == []
assert len(triggered_events) == 1
event = triggered_events[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(event.address[0], str)
assert isinstance(event.address[1], int)
assoc.release()
scp.shutdown()
def test_unbind_evt_conn_open(self):
"""Test unbinding EVT_CONN_OPEN"""
triggered_events = []
def on_conn_open(event):
triggered_events.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("localhost", 11112), block=False)
assoc = ae.associate(
"localhost", 11112, evt_handlers=[(evt.EVT_CONN_OPEN, on_conn_open)]
)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == [(on_conn_open, None)]
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == []
assoc.unbind(evt.EVT_CONN_OPEN, on_conn_open)
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == []
assert len(triggered_events) == 1
event = triggered_events[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(event.address[0], str)
assert isinstance(event.address[1], int)
assoc.release()
scp.shutdown()
def test_bind_evt_conn_close(self):
"""Test start with a bound EVT_CONN_CLOSED"""
triggered_events = []
def on_conn_close(event):
triggered_events.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("localhost", 11112), block=False)
assoc = ae.associate(
"localhost", 11112, evt_handlers=[(evt.EVT_CONN_CLOSE, on_conn_close)]
)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == [(on_conn_close, None)]
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_CONN_OPEN) == []
assert child.get_handlers(evt.EVT_CONN_CLOSE) == []
assert len(triggered_events) == 0
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered_events) == 1
event = triggered_events[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
scp.shutdown()
def test_bind_evt_conn_close_running(self):
"""Test binding EVT_CONN_CLOSED after assoc running."""
triggered_events = []
def on_conn_close(event):
triggered_events.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("localhost", 11112), block=False)
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_CONN_OPEN) == []
assert child.get_handlers(evt.EVT_CONN_CLOSE) == []
assert len(triggered_events) == 0
assoc.bind(evt.EVT_CONN_CLOSE, on_conn_close)
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == [(on_conn_close, None)]
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered_events) == 1
event = triggered_events[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
scp.shutdown()
def test_unbind_evt_conn_close(self):
"""Test unbinding EVT_CONN_CLOSED"""
triggered_events = []
def on_conn_close(event):
triggered_events.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("localhost", 11112), block=False)
assoc = ae.associate(
"localhost", 11112, evt_handlers=[(evt.EVT_CONN_CLOSE, on_conn_close)]
)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == [(on_conn_close, None)]
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_CONN_OPEN) == []
assert child.get_handlers(evt.EVT_CONN_CLOSE) == []
assoc.unbind(evt.EVT_CONN_CLOSE, on_conn_close)
assert assoc.get_handlers(evt.EVT_CONN_OPEN) == []
assert assoc.get_handlers(evt.EVT_CONN_CLOSE) == []
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered_events) == 0
scp.shutdown()
def test_connection_failure_log(self, caplog):
"""Test that a connection failure is logged."""
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("localhost", 11112), block=False)
with caplog.at_level(logging.ERROR, logger="pynetdicom"):
assoc = ae.associate("localhost", 11113)
assert assoc.is_aborted
messages = [
"Association request failed: unable to connect to remote",
"TCP Initialisation Error",
]
for msg in messages:
assert msg in caplog.text
scp.shutdown()
def test_data_sent(self):
"""Test binding to EVT_DATA_SENT."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
handlers = [(evt.EVT_DATA_SENT, handle)]
scp = ae.start_server(("localhost", 11112), block=False)
assert scp.get_handlers(evt.EVT_DATA_SENT) == []
assoc = ae.associate("localhost", 11112, evt_handlers=handlers)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_DATA_SENT) == []
assert assoc.get_handlers(evt.EVT_DATA_SENT) == [(handle, None)]
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_DATA_SENT) == []
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 2
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(event.data, bytes)
assert event.event.name == "EVT_DATA_SENT"
assert triggered[0].data[0:1] == b"\x01" # A-ASSOCIATE-RQ
assert triggered[1].data[0:1] == b"\x05" # A-RELEASE-RQ
scp.shutdown()
def test_data_sent_bind(self):
"""Test binding to EVT_DATA_SENT."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("localhost", 11112), block=False)
assert scp.get_handlers(evt.EVT_DATA_SENT) == []
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assoc.bind(evt.EVT_DATA_SENT, handle)
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_DATA_SENT) == []
assert assoc.get_handlers(evt.EVT_DATA_SENT) == [(handle, None)]
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_DATA_SENT) == []
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 1
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(event.data, bytes)
assert event.event.name == "EVT_DATA_SENT"
assert event.data[0:1] == b"\x05" # A-RELEASE-RQ
scp.shutdown()
def test_data_sent_unbind(self):
"""Test unbinding EVT_DATA_SENT."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
handlers = [(evt.EVT_DATA_SENT, handle)]
scp = ae.start_server(("localhost", 11112), block=False)
assert scp.get_handlers(evt.EVT_DATA_SENT) == []
assoc = ae.associate("localhost", 11112, evt_handlers=handlers)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_DATA_SENT) == []
assert assoc.get_handlers(evt.EVT_DATA_SENT) == [(handle, None)]
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_DATA_SENT) == []
assoc.unbind(evt.EVT_DATA_SENT, handle)
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 1
assert triggered[0].data[0:1] == b"\x01" # A-ASSOCIATE-RQ
scp.shutdown()
def test_data_recv(self):
"""Test starting bound to EVT_DATA_RECV."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
handlers = [(evt.EVT_DATA_RECV, handle)]
scp = ae.start_server(("localhost", 11112), block=False)
assert scp.get_handlers(evt.EVT_DATA_RECV) == []
assoc = ae.associate("localhost", 11112, evt_handlers=handlers)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_DATA_RECV) == []
assert assoc.get_handlers(evt.EVT_DATA_RECV) == [(handle, None)]
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_DATA_RECV) == []
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 2
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(event.data, bytes)
assert triggered[0].data[0:1] == b"\x02" # Should be A-ASSOCIATE-AC PDU
assert triggered[1].data[0:1] == b"\x06" # Should be A-RELEASE-RP PDU
assert event.event.name == "EVT_DATA_RECV"
scp.shutdown()
def test_data_recv_bind(self):
"""Test binding to EVT_DATA_RECV."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
scp = ae.start_server(("localhost", 11112), block=False)
assert scp.get_handlers(evt.EVT_DATA_RECV) == []
assoc = ae.associate("localhost", 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
assoc.bind(evt.EVT_DATA_RECV, handle)
assert scp.get_handlers(evt.EVT_DATA_RECV) == []
assert assoc.get_handlers(evt.EVT_DATA_RECV) == [(handle, None)]
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_DATA_RECV) == []
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 1
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(event.data, bytes)
assert event.data[0:1] == b"\x06" # Should be A-RELEASE-RP PDU
assert event.event.name == "EVT_DATA_RECV"
scp.shutdown()
def test_data_recv_unbind(self):
"""Test unbinding to EVT_DATA_RECV."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(Verification)
ae.add_requested_context(Verification)
handlers = [(evt.EVT_DATA_RECV, handle)]
scp = ae.start_server(("localhost", 11112), block=False)
assert scp.get_handlers(evt.EVT_DATA_RECV) == []
assoc = ae.associate("localhost", 11112, evt_handlers=handlers)
assert assoc.is_established
assert assoc.get_handlers(evt.EVT_DATA_RECV) == [(handle, None)]
assoc.unbind(evt.EVT_DATA_RECV, handle)
assert len(scp.active_associations) == 1
assert scp.get_handlers(evt.EVT_DATA_RECV) == []
assert assoc.get_handlers(evt.EVT_DATA_RECV) == []
child = scp.active_associations[0]
assert child.get_handlers(evt.EVT_DATA_RECV) == []
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 1
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert isinstance(event.data, bytes)
assert triggered[0].data[0:1] == b"\x02" # Should be A-ASSOCIATE-AC PDU
assert event.event.name == "EVT_DATA_RECV"
scp.shutdown()
|
__init__.py | """
Flexible RPC client and server for Python classes based on ZeroMQ and MessagePack.
outrun primarily relies on RPC to expose the local file system by simply executing calls
like open(), readdir(), truncate(), symlink() over the network. With that it has a
couple of requirements that were not met by popular existing libraries.
* Ease of exposing a large number of different functions with minimal boilerplate
* gRPC is not suitable due to its reliance on .proto files and code generation.
* outrun only needs to communicate with itself, so its benefits are not worth it.
* Low overhead per call
* Latency is key with file system operations.
* xmlrpc suffers from HTTP overhead.
* Multithreading support
* zerorpc (https://github.com/0rpc/zerorpc-python) does not function well with
multithreading due to its reliance on gevent.
For that reason outrun ships with a custom RPC implementation that is largely inspired
by zerorpc, but with the following differences:
* Native support for multithreading
* On the server side with multiple workers
* On the client side with a socket per thread
* Automatic serialization and deserialization of dataclasses based on type annotations
* Transport and faithful recreation of builtin exceptions
* As opposed to wrapping all exceptions into a generic RPC exception type
* This makes it easy to forward things like FileNotFoundError.
* Support for shared secret authentication
* There is no need for encryption because the SSH tunnels take care of this.
MessagePack supports fast and compact serialization, and allows for custom types without
the need for writing a custom spec and generating code (like gRPC). ZeroMQ is perfect
for protocol handling thanks to its builtin DEALER/ROUTER and REQUEST/REPLY patterns.
"""
from abc import ABC
import builtins
from dataclasses import is_dataclass
from enum import auto, Enum
import json
import logging
import threading
import time
import typing
from typing import Any, Callable, Dict, IO, List, NoReturn, Optional, Tuple
import msgpack
import zmq
from outrun.logger import log, summarize
class Encoding:
"""
Serialization and deserialization of objects using JSON or MessagePack.
MessagePack serialization can be used for efficient network transfer and JSON
serialization for simple disk storage.
"""
def __init__(self, *dataclasses: type):
"""Initialize a (de)serializer with support for the given dataclass types."""
self._dataclasses: Dict[str, type] = {}
for dataclass in dataclasses:
self.register_dataclasses(dataclass)
def register_dataclasses(self, seed_type: type) -> None:
"""
Register all dataclass types used within the specified type.
This includes the class itself, its class members, nested dataclasses, and
container types like List and Optional.
"""
for dataclass in self._discover_dataclasses(seed_type):
self._dataclasses[dataclass.__qualname__] = dataclass
def pack(self, obj: Any) -> bytes:
"""Serialize an object using MessagePack."""
return msgpack.packb(obj, default=self.serialize_obj)
def unpack(self, data: bytes) -> Any:
"""Deserialize an object using MessagePack."""
return msgpack.unpackb(data, object_hook=self.deserialize_obj)
def dump_json(self, obj: Any, fp: IO[str]) -> None:
"""Serialize an object to JSON."""
json.dump(obj, fp, default=self.serialize_obj)
def load_json(self, fp: IO[str]) -> Any:
"""Deserialize an object from JSON."""
return json.load(fp, object_hook=self.deserialize_obj)
def serialize_obj(self, obj: Any) -> Any:
"""Turn a dataclass or object into a serialization friendly representation."""
if isinstance(obj, BaseException):
return self._serialize_exception(obj)
elif obj.__class__.__qualname__ in self._dataclasses:
return self._serialize_dataclass(obj)
else:
raise ValueError(f"unserializable object {obj}")
def deserialize_obj(self, obj: Any) -> Any:
"""Reconstruct a dataclass or exception from a serialized representation."""
if isinstance(obj, dict) and "__exception__" in obj:
return self._deserialize_exception(obj)
elif isinstance(obj, dict) and "__data__" in obj:
return self._deserialize_dataclass(obj)
else:
return obj
#
# Exception serialization
#
@staticmethod
def _serialize_exception(exc: BaseException) -> Dict:
"""Turn an exception into a serialization friendly dict."""
return {"__exception__": {"name": exc.__class__.__qualname__, "args": exc.args}}
@staticmethod
def _deserialize_exception(obj: Dict) -> BaseException:
"""
Reconstruct an exception from its serialized representation.
If it was a builtin exception (like IOError) then it is reconstructed
faithfully, otherwise as a generic Exception with the original arguments.
"""
name = obj["__exception__"]["name"]
args = obj["__exception__"]["args"]
builtin_exc = getattr(builtins, name, None.__class__)
if isinstance(builtin_exc, type) and issubclass(builtin_exc, BaseException):
return builtin_exc(*args)
else:
return Exception(*args)
#
# Data class serialization
#
@classmethod
def _serialize_dataclass(cls, obj: Any) -> Dict:
"""Turn a dataclass into a serialization friendly dict."""
return {"__data__": {"type": obj.__class__.__qualname__, "data": obj.__dict__}}
def _deserialize_dataclass(self, obj: Dict) -> Any:
"""
Reconstruct a dataclass from its serialized representation.
Only previously registered dataclass types can be deserialized.
"""
type_name = obj["__data__"]["type"]
type_data = obj["__data__"]["data"]
if type_name in self._dataclasses:
try:
return self._dataclasses[type_name](**type_data)
except Exception as e:
raise TypeError(f"failed to deserialize {type_name}: {e}")
else:
raise TypeError(f"unknown dataclass '{type_name}'")
@staticmethod
def _discover_dataclasses(*seed_types: type) -> List[type]:
"""
Find all dataclass types used with the specified type.
This includes the class itself, its class members, nested dataclasses, and
container types like List and Optional.
"""
candidates = set(seed_types)
explored = set()
dataclasses = set()
while len(candidates) > 0:
candidate = candidates.pop()
if candidate not in explored:
explored.add(candidate)
else:
continue
if is_dataclass(candidate):
dataclasses.add(candidate)
# Discover member types of dataclass
for subtype in typing.get_type_hints(candidate).values():
candidates.add(subtype)
elif hasattr(candidate, "__origin__"):
# Discover types nested in constructs like Union[T] and List[T]
# (No typing.get_args/get_origin for Python 3.7 compatibility)
for subtype in getattr(candidate, "__args__"):
candidates.add(subtype)
return list(dataclasses)
class ReturnType(Enum):
"""Type of result for an RPC call."""
NORMAL = auto()
EXCEPTION = auto()
TOKEN_ERROR = auto()
class InvalidTokenError(RuntimeError):
"""Exception raised when an RPC call is made with a wrong authentication token."""
class Base(ABC):
"""Shared logic between RPC client and server implementation."""
def __init__(self, service_type: type):
"""Initialize RPC (de)serialization to support the specified service class."""
function_types = self._discover_function_types(service_type)
self._encoding = Encoding(*function_types)
@staticmethod
def _discover_function_types(service_type: type) -> List[type]:
"""Discover all types used as parameters or return values in the RPC service."""
exposed_functions = [
getattr(service_type, name)
for name in dir(service_type)
if callable(getattr(service_type, name))
]
function_types: List[type] = []
for func in exposed_functions:
function_types += typing.get_type_hints(func).values()
return function_types
class Server(Base):
"""
RPC server to expose a service defined through members of a class instance.
Example:
```
class Foo:
def bar(a, b):
return a + b
server = rpc.Server(Foo())
server.serve("tcp://0.0.0.0:1234")
```
"""
def __init__(
self, service: Any, token: Optional[str] = None, worker_count: int = 1
):
"""
Instantiate an RPC server for the given service class instance.
The server will expose all methods in the class to clients. If a token is
specified then clients will need to be initialized with that same token to be
allowed to make calls. Incoming calls will be distributed across the specified
number of worker threads.
"""
super().__init__(service.__class__)
self.context = zmq.Context()
self.service = service
self.token = token
self.worker_count = worker_count
def serve(self, endpoint: str) -> NoReturn:
"""
Start listening and handling calls for clients on the specified endpoint.
The endpoint should have the format of endpoint in zmq_bind
(http://api.zeromq.org/2-1:zmq-bind), for example "tcp://0.0.0.0:1234".
"""
socket = self.context.socket(zmq.ROUTER)
socket.bind(endpoint)
workers_socket = self.context.socket(zmq.DEALER)
workers_socket.bind(f"inproc://{id(self)}")
for _ in range(self.worker_count):
t = threading.Thread(target=self._run_worker, daemon=True)
t.start()
zmq.proxy(socket, workers_socket)
assert False, "unreachable"
def _run_worker(self) -> NoReturn:
"""Request/response loop to handle calls for a single worker thread."""
socket = self.context.socket(zmq.REP)
socket.connect(f"inproc://{id(self)}")
while True:
# Wait for a call to come in
token, function, *args = self._encoding.unpack(socket.recv())
if token != self.token:
# Authentication token mismatch between client/server
socket.send(self._encoding.pack((ReturnType.TOKEN_ERROR.value, None)))
else:
# Invoke the method and return the response (value/raised exception)
try:
if function is None:
ret = None
else:
ret = getattr(self.service, function)(*args)
socket.send(self._encoding.pack((ReturnType.NORMAL.value, ret)))
except Exception as e:
socket.send(self._encoding.pack((ReturnType.EXCEPTION.value, e)))
class Client(Base):
"""
RPC client to invoke methods on a service instance exposed by an RPC server.
A single client can be used by multiple threads and will internally create multiple
socket connections as needed.
Example:
```
foo = rpc.Client(Foo, "tcp://localhost:1234")
c = foo.bar(1, 2)
```
"""
def __init__(
self,
service_type: type,
endpoint: str,
token: Optional[str] = None,
timeout_ms: int = -1,
) -> None:
"""
Instantiate an RPC client for the service type at the given endpoint.
The endpoint should follow the format of endpoint in zmq_connect
(http://api.zeromq.org/3-2:zmq-connect), for example "tcp://localhost:1234".
"""
super().__init__(service_type)
self.endpoint = endpoint
self.token = token
self.timeout_ms = timeout_ms
self.context = zmq.Context()
self._socket_pool: Dict[threading.Thread, zmq.Socket] = {}
self._socket_pool_lock = threading.Lock()
def _socket(self, timeout_ms: Optional[int] = None) -> zmq.Socket:
"""
Return a socket to be used for the current thread.
Each thread needs its own socket because REQUEST-REPLY need to happen in
lockstep per socket. The (initial) timeout is set to the constructor specified
timeout, but can be overridden.
"""
if timeout_ms is None:
timeout_ms = self.timeout_ms
t = threading.current_thread()
with self._socket_pool_lock:
if t not in self._socket_pool:
sock = self.context.socket(zmq.REQ)
sock.setsockopt(zmq.RCVTIMEO, timeout_ms)
sock.setsockopt(zmq.SNDTIMEO, timeout_ms)
sock.connect(self.endpoint)
self._socket_pool[t] = sock
return self._socket_pool[t]
def ping(self, timeout_ms: Optional[int] = None) -> None:
"""
Check if the service is available.
The check will use the timeout from the constructor by default, but this timeout
can be overridden using the parameter.
"""
sock = self._socket(timeout_ms)
# Temporarily override timeout
if timeout_ms is not None:
sock.setsockopt(zmq.RCVTIMEO, timeout_ms)
sock.setsockopt(zmq.SNDTIMEO, timeout_ms)
try:
self.__getattr__(None)()
finally:
# Restore to the constructor timeout
if timeout_ms is not None:
sock.setsockopt(zmq.RCVTIMEO, self.timeout_ms)
sock.setsockopt(zmq.SNDTIMEO, self.timeout_ms)
def __del__(self) -> None:
"""Close the client sockets and their ZeroMQ context."""
with self._socket_pool_lock:
for sock in self._socket_pool.values():
sock.close(linger=0)
self.context.destroy()
@property
def socket_count(self) -> int:
"""Return the number of sockets for this client."""
with self._socket_pool_lock:
return len(self._socket_pool)
@staticmethod
def _summarize_args(args: tuple) -> Tuple[str, ...]:
"""Summarize a tuple of function arguments."""
return tuple([summarize(arg) for arg in args])
def __getattr__(self, name: Optional[str]) -> Callable[..., Any]:
"""Retrieve a wrapper to call the specified remote function."""
def fn(*args: Any) -> Any:
"""
Call wrapped remote function with the given arguments.
Serializes the arguments, makes the call and deserializes the resulting
return value or raises the resulting exception.
ZeroMQ connections are stateless so the token is sent again with every call.
"""
sock = self._socket()
t_call = time.time()
# Serialize arguments and invoke remote function
call = self._encoding.pack((self.token, name, *args))
sock.send(call)
# Wait for answer (return value, exception, token error, or RPC error)
try:
typ, *ret = self._encoding.unpack(sock.recv())
except zmq.ZMQError:
raise IOError("rpc call timed out")
t_return = time.time()
# Explicit check before logging because _summarize_args is relatively slow
if log.isEnabledFor(logging.DEBUG):
t_millis = round((t_return - t_call) * 1000)
log.debug(f"rpc::{name}{self._summarize_args(args)} - {t_millis} ms")
if typ == ReturnType.NORMAL.value:
if len(ret) == 1:
return ret[0]
else:
return ret
elif typ == ReturnType.EXCEPTION.value:
raise ret[0]
elif typ == ReturnType.TOKEN_ERROR.value:
raise InvalidTokenError("token mismatch between client and server")
else:
raise ValueError(f"unexpected return type {typ}")
return fn
|
test.py | #!/usr/bin/env python3
import os, sys, json, time, requests, copy, traceback, tempfile, threading, subprocess
from collections import OrderedDict
from subprocess import check_output
from multiprocessing import Pool
from contextlib import contextmanager
OLDIR = 'test-dir'
results = OrderedDict({"runs": []})
curr_conf = None
def post(path, data=None):
return requests.post('http://localhost:5000/'+path, json.dumps(data))
def raise_for_status(r):
if r.status_code != 200:
raise Exception("STATUS %d: %s" % (r.status_code, r.text))
def test_in_filter(name):
if len(sys.argv) < 2:
return True
return name in sys.argv[1:]
def get_mem_stat_mb(stat):
with open('/proc/meminfo') as f:
for l in f:
if l.startswith(stat+":"):
parts = l.strip().split()
assert(parts[-1] == 'kB')
return int(parts[1]) / 1024
raise Exception('could not get stat')
def ol_oom_killer():
while True:
if get_mem_stat_mb('MemAvailable') < 128:
print("out of memory, trying to kill OL")
os.system('pkill ol')
time.sleep(1)
def test(fn):
def wrapper(*args, **kwargs):
if len(args):
raise Exception("positional args not supported for tests")
name = fn.__name__
if not test_in_filter(name):
return None
print('='*40)
if len(kwargs):
print(name, kwargs)
else:
print(name)
print('='*40)
result = OrderedDict()
result["test"] = name
result["params"] = kwargs
result["pass"] = None
result["conf"] = curr_conf
result["seconds"] = None
result["total_seconds"] = None
result["stats"] = None
result["ol-stats"] = None
result["errors"] = []
result["worker_tail"] = None
total_t0 = time.time()
mounts0 = mounts()
try:
# setup worker
run(['./ol', 'worker', '-p='+OLDIR, '--detach'])
# run test/benchmark
test_t0 = time.time()
rv = fn(**kwargs)
test_t1 = time.time()
result["seconds"] = test_t1 - test_t0
result["pass"] = True
except Exception:
rv = None
result["pass"] = False
result["errors"].append(traceback.format_exc().split("\n"))
# cleanup worker
try:
run(['./ol', 'kill', '-p='+OLDIR])
except Exception:
result["pass"] = False
result["errors"].append(traceback.format_exc().split("\n"))
mounts1 = mounts()
if len(mounts0) != len(mounts1):
result["pass"] = False
result["errors"].append(["mounts are leaking (%d before, %d after), leaked: %s"
% (len(mounts0), len(mounts1), str(mounts1 - mounts0))])
# get internal stats from OL
if os.path.exists(OLDIR+"/worker/stats.json"):
with open(OLDIR+"/worker/stats.json") as f:
olstats = json.load(f)
result["ol-stats"] = OrderedDict(sorted(list(olstats.items())))
total_t1 = time.time()
result["total_seconds"] = total_t1-total_t0
result["stats"] = rv
with open(os.path.join(OLDIR, "worker.out")) as f:
result["worker_tail"] = f.read().split("\n")
if result["pass"]:
# truncate because we probably won't use it for debugging
result["worker_tail"] = result["worker_tail"][-10:]
results["runs"].append(result)
print(json.dumps(result, indent=2))
return rv
return wrapper
def put_conf(conf):
global curr_conf
with open(os.path.join(OLDIR, "config.json"), "w") as f:
json.dump(conf, f, indent=2)
curr_conf = conf
def mounts():
output = check_output(["mount"])
output = str(output, "utf-8")
output = output.split("\n")
return set(output)
@contextmanager
def TestConf(**keywords):
with open(os.path.join(OLDIR, "config.json")) as f:
orig = json.load(f)
new = copy.deepcopy(orig)
for k in keywords:
if not k in new:
raise Exception("unknown config param: %s" % k)
if type(keywords[k]) == dict:
for k2 in keywords[k]:
new[k][k2] = keywords[k][k2]
else:
new[k] = keywords[k]
# setup
print("PUSH conf:", keywords)
put_conf(new)
yield new
# cleanup
print("POP conf:", keywords)
put_conf(orig)
def run(cmd):
print("RUN", " ".join(cmd))
try:
out = check_output(cmd, stderr=subprocess.STDOUT)
fail = False
except subprocess.CalledProcessError as e:
out = e.output
fail = True
out = str(out, 'utf-8')
if len(out) > 500:
out = out[:500] + "..."
if fail:
raise Exception("command (%s) failed: %s" % (" ".join(cmd), out))
@test
def install_tests():
# we want to make sure we see the expected number of pip installs,
# so we don't want installs lying around from before
rc = os.system('rm -rf test-dir/lambda/packages/*')
assert(rc == 0)
# try something that doesn't install anything
msg = 'hello world'
r = post("run/echo", msg)
raise_for_status(r)
if r.json() != msg:
raise Exception("found %s but expected %s" % (r.json(), msg))
r = post("stats", None)
raise_for_status(r)
installs = r.json().get('pull-package.cnt', 0)
assert(installs == 0)
for i in range(3):
name = "install"
if i != 0:
name += str(i+1)
r = post("run/"+name, {})
raise_for_status(r)
assert r.json() == "imported"
r = post("stats", None)
raise_for_status(r)
installs = r.json()['pull-package.cnt']
if i < 2:
# with deps, requests should give us these:
# certifi, chardet, idna, requests, urllib3
assert(installs == 5)
else:
assert(installs == 6)
@test
def numpy_test():
# try adding the nums in a few different matrixes. Also make sure
# we can have two different numpy versions co-existing.
r = post("run/numpy15", [1, 2])
if r.status_code != 200:
raise Exception("STATUS %d: %s" % (r.status_code, r.text))
j = r.json()
assert j['result'] == 3
assert j['version'].startswith('1.15')
r = post("run/numpy16", [[1, 2], [3, 4]])
if r.status_code != 200:
raise Exception("STATUS %d: %s" % (r.status_code, r.text))
j = r.json()
assert j['result'] == 10
assert j['version'].startswith('1.16')
r = post("run/numpy15", [[[1, 2], [3, 4]], [[1, 2], [3, 4]]])
if r.status_code != 200:
raise Exception("STATUS %d: %s" % (r.status_code, r.text))
j = r.json()
assert j['result'] == 20
assert j['version'].startswith('1.15')
r = post("run/pandas15", [[1, 2, 3],[1, 2, 3]])
if r.status_code != 200:
raise Exception("STATUS %d: %s" % (r.status_code, r.text))
j = r.json()
assert j['result'] == 12
assert j['version'].startswith('1.15')
r = post("run/pandas", [[0, 1, 2], [3, 4, 5]])
if r.status_code != 200:
raise Exception("STATUS %d: %s" % (r.status_code, r.text))
j = r.json()
assert j['result'] == 15
assert float(".".join(j['version'].split('.')[:2])) >= 1.16
def stress_one_lambda_task(args):
t0, seconds = args
i = 0
while time.time() < t0 + seconds:
r = post("run/echo", i)
raise_for_status(r)
assert r.text == str(i)
i += 1
return i
@test
def stress_one_lambda(procs, seconds):
t0 = time.time()
with Pool(procs) as p:
reqs = sum(p.map(stress_one_lambda_task, [(t0, seconds)] * procs, chunksize=1))
return {"reqs_per_sec": reqs/seconds}
@test
def call_each_once_exec(lambda_count, alloc_mb):
# TODO: do in parallel
t0 = time.time()
for i in range(lambda_count):
r = post("run/L%d"%i, {"alloc_mb": alloc_mb})
raise_for_status(r)
assert r.text == str(i)
seconds = time.time() - t0
return {"reqs_per_sec": lambda_count/seconds}
def call_each_once(lambda_count, alloc_mb=0):
with tempfile.TemporaryDirectory() as reg_dir:
# create dummy lambdas
for i in range(lambda_count):
with open(os.path.join(reg_dir, "L%d.py"%i), "w") as f:
f.write("def f(event):\n")
f.write(" global s\n")
f.write(" s = '*' * %d * 1024**2\n" % alloc_mb)
f.write(" return %d\n" % i)
with TestConf(registry=reg_dir):
call_each_once_exec(lambda_count=lambda_count, alloc_mb=alloc_mb)
@test
def fork_bomb():
limit = curr_conf["limits"]["procs"]
r = post("run/fbomb", {"times": limit*2})
raise_for_status(r)
# the function returns the number of children that we were able to fork
actual = int(r.text)
assert(1 <= actual <= limit)
@test
def max_mem_alloc():
limit = curr_conf["limits"]["mem_mb"]
r = post("run/max_mem_alloc", None)
raise_for_status(r)
# the function returns the MB that was able to be allocated
actual = int(r.text)
assert(limit-16 <= actual <= limit)
@test
def ping_test():
pings = 1000
t0 = time.time()
for i in range(pings):
r = requests.get("http://localhost:5000/status")
raise_for_status(r)
seconds = time.time() - t0
return {"pings_per_sec": pings/seconds}
def sock_churn_task(args):
echo_path, parent, t0, seconds = args
i = 0
while time.time() < t0 + seconds:
args = {"code": echo_path, "leaf": True, "parent": parent}
r = post("create", args)
raise_for_status(r)
sandbox_id = r.text.strip()
r = post("destroy/"+sandbox_id, {})
raise_for_status(r)
i += 1
return i
@test
def sock_churn(baseline, procs, seconds, fork):
# baseline: how many sandboxes are sitting idly throughout the experiment
# procs: how many procs are concurrently creating and deleting other sandboxes
echo_path = os.path.abspath("test-registry/echo")
if fork:
r = post("create", {"code": "", "leaf": False})
raise_for_status(r)
parent = r.text.strip()
else:
parent = ""
for i in range(baseline):
r = post("create", {"code": echo_path, "leaf": True, "parent": parent})
raise_for_status(r)
t0 = time.time()
with Pool(procs) as p:
reqs = sum(p.map(sock_churn_task, [(echo_path, parent, t0, seconds)] * procs, chunksize=1))
return {"sandboxes_per_sec": reqs/seconds}
@test
def update_code():
reg_dir = curr_conf['registry']
cache_seconds = curr_conf['registry_cache_ms'] / 1000
latencies = []
for i in range(3):
# update function code
with open(os.path.join(reg_dir, "version.py"), "w") as f:
f.write("def f(event):\n")
f.write(" return %d\n" % i)
# how long does it take for us to start seeing the latest code?
t0 = time.time()
while True:
r = post("run/version", None)
raise_for_status(r)
num = int(r.text)
assert(num >= i-1)
t1 = time.time()
# make sure the time to grab new code is about the time
# specified for the registry cache (within ~1 second)
assert(t1 - t0 <= cache_seconds + 1)
if num == i:
if i > 0:
assert(t1 - t0 >= cache_seconds - 1)
break
@test
def recursive_kill(depth):
parent = ""
for i in range(depth):
r = post("create", {"code": "", "leaf": False, "parent": parent})
raise_for_status(r)
if parent:
# don't need this parent any more, so pause it to get
# memory back (so we can run this test with low memory)
post("pause/"+parent)
parent = r.text.strip()
r = post("destroy/1", None)
raise_for_status(r)
r = post("stats", None)
raise_for_status(r)
destroys = r.json()['Destroy():ms.cnt']
assert destroys == depth
def tests():
test_reg = os.path.abspath("test-registry")
with TestConf(registry=test_reg):
ping_test()
# do smoke tests under various configs
with TestConf(handler_cache_mb=500, import_cache_mb=0):
install_tests()
with TestConf(handler_cache_mb=250, import_cache_mb=250):
install_tests()
with TestConf(sandbox="docker", handler_cache_mb=500, import_cache_mb=0):
install_tests()
# test resource limits
fork_bomb()
max_mem_alloc()
# numpy pip install needs a larger mem cap
with TestConf(handler_cache_mb=250, import_cache_mb=250):
numpy_test()
# test SOCK directly (without lambdas)
with TestConf(server_mode="sock", handler_cache_mb=250, import_cache_mb=250):
sock_churn(baseline=0, procs=1, seconds=5, fork=False)
sock_churn(baseline=0, procs=1, seconds=15, fork=True)
sock_churn(baseline=0, procs=15, seconds=15, fork=True)
# TODO: make these work (we don't have enough mem now)
#sock_churn(baseline=32, procs=1, seconds=15, fork=True)
#sock_churn(baseline=32, procs=15, seconds=15, fork=True)
# make sure code updates get pulled within the cache time
with tempfile.TemporaryDirectory() as reg_dir:
with TestConf(sandbox="sock", registry=reg_dir, registry_cache_ms=3000):
update_code()
# test heavy load
with TestConf(sandbox="sock", handler_cache_mb=250, import_cache_mb=250, registry=test_reg):
stress_one_lambda(procs=1, seconds=15)
stress_one_lambda(procs=2, seconds=15)
stress_one_lambda(procs=8, seconds=15)
with TestConf(sandbox="sock", handler_cache_mb=250, import_cache_mb=250):
call_each_once(lambda_count=100, alloc_mb=1)
call_each_once(lambda_count=1000, alloc_mb=10)
def main():
t0 = time.time()
# so our test script doesn't hang if we have a memory leak
timerThread = threading.Thread(target=ol_oom_killer, daemon=True)
timerThread.start()
# general setup
if os.path.exists(OLDIR):
try:
run(['./ol', 'kill', '-p='+OLDIR])
except:
print('could not kill cluster')
run(['rm', '-rf', OLDIR])
run(['./ol', 'new', '-p='+OLDIR])
# run tests with various configs
tests()
# save test results
passed = len([t for t in results["runs"] if t["pass"]])
failed = len([t for t in results["runs"] if not t["pass"]])
results["passed"] = passed
results["failed"] = failed
results["seconds"] = time.time() - t0
print("PASSED: %d, FAILED: %d" % (passed, failed))
with open("test.json", "w") as f:
json.dump(results, f, indent=2)
sys.exit(failed)
if __name__ == '__main__':
main()
|
get_proxies.py | import json
import requests
import time
from bs4 import BeautifulSoup as Soup
import threading
header = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/64.0.3282.186 Safari/537.36'}
def dict2proxy(dic):
s = dic['type'] + '://' + dic['ip'] + ':' + str(dic['port'])
return {'http': s, 'https': s}
def parse_items(items):
# 存放ip信息字典的列表
ips = []
for item in items:
tds = item.find_all('td')
# 从对应位置获取ip,端口,类型
ip, port, _type = tds[1].text, int(tds[2].text), tds[5].text.lower()
ips.append({'ip': ip, 'port': port, 'type': _type})
return ips
def check_ip(ip, good_proxies):
try:
pro = dict2proxy(ip)
# print(pro)
url = 'https://www.ipip.net/'
r = requests.get(url, headers=header, proxies=pro, timeout=5)
r.raise_for_status()
print(r.status_code, ip['ip'])
except Exception as e:
# print(e)
pass
else:
good_proxies.append(ip)
def write_to_json(ips):
with open('proxies.json', 'w', encoding='utf-8') as f:
json.dump(ips, f, indent=4)
class GetThread(threading.Thread):
'''对Thread进行封装'''
def __init__(self, args):
threading.Thread.__init__(self, args=args)
self.good_proxies = []
def run(self):
url = 'http://www.xicidaili.com/nt/%d' % self._args[0]
# 发起网络访问
r = requests.get(url, headers=header)
r.encoding = r.apparent_encoding
r.raise_for_status()
soup = Soup(r.text, 'lxml')
# 第一个是显示最上方的信息的,需要丢掉
items = soup.find_all('tr')[1:]
ips = parse_items(items)
threads = []
for ip in ips:
# 开启多线程
t = threading.Thread(target=check_ip, args=[ip, self.good_proxies])
t.start()
time.sleep(0.1)
threads.append(t)
[t.join() for t in threads]
def get_result(self):
return self.good_proxies
if __name__ == '__main__':
# 主函数使用多线程
threads = []
for i in range(1, 30):
t = GetThread(args=[i])
t.start()
time.sleep(10)
threads.append(t)
[t.join() for t in threads]
for t in threads:
proxies = t.get_result()
|
1.1_rms_bankers_Speak.py | from functools import reduce
from sys import *
import numpy as np
import random as r
import ping_code as pc
import socket
import struct
import subprocess as sp
from threading import Thread
import paramiko
import ast
import time
import os
import getpass as gp
import data
hosts = {} # {hostname: ip}
multicast_group = '224.3.29.71'
server_address = ('', 10000)
# Create the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
_tasks = {'t1': {'wcet': 3, 'period': 20},
't2': {'wcet': 1, 'period': 5},
't3': {'wcet': 2, 'period': 10},
't4': {'wcet': 1, 'period': 10},
't5': {'wcet': 3, 'period': 15}
}
# mat = {'p0': ['cpu', 'mem', 'storage']}
_need = {
't1': [7, 4, 3],
't2': [1, 2, 2],
't3': [6, 0, 0],
't4': [0, 1, 1],
't5': [4, 3, 1]
}
allocation = {
't1': [0, 1, 0],
't2': [2, 0, 0],
't3': [3, 0, 2],
't4': [2, 1, 1],
't5': [0, 0, 2]
}
mec_waiting_time = {} # {ip : [moving (waiting time + rtt)]}
offload_register = {} # {task: host_ip}
discovering = 0 # if discovering == 0 update host
_pos = 0 # counting position of task and time
def ip_address():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def get_rtt(host):
rtt = pc.verbose_ping(host)
return rtt
def gcd(a, b):
if b == 0: return a
return gcd(b, a % b)
def lcm(a, b):
return int(a * b / gcd(a, b))
def LCM(list):
return reduce(lcm, list)
def gosh_dist(_range):
return ((23 ** r.randrange(1331)) % r.randrange(1777)) % _range
def get_rms():
global tasks
global _pos
tasks = data.task[_pos]
_pos += 1
'''
while len(tasks) < 3:
a = list(_tasks.keys())[gosh_dist(5)]
tasks[a] = _tasks[a]
'''
print('Running RMS on Tasks: ', tasks, '\n')
waiting_time_init()
a = load_tasks()
return scheduler(a)
def waiting_time_init():
global t_time
t_time = {i: [round(r.uniform(0.4, 0.8), 3), round((tasks[i]['period']) / (tasks[i]['wcet']), 3)] for i in
tasks} # t_time = {'ti': [execution_time, latency], ..}
t_time = {**t_time, **check_mec_offload()}
print('[Execution_time, Latency]: ', t_time)
def load_tasks():
global tasks
period_list = [tasks[i]['period'] for i in tasks]
lcm_period = LCM(period_list)
# insert idle task
tasks['idle'] = {'wcet': lcm_period, 'period': lcm_period + 1}
return lcm_period
def scheduler(D):
queue = list(tasks.keys()) # initialize task queue
schedule = []
rms = []
curr = '' # current task
prev = '' # previous task
tmp = {}
for task in tasks.keys():
tmp[task] = {} # temporary data for each task
tmp[task]['deadline'] = tasks[task]['period']
tmp[task]['executed'] = 0
# start scheduling...
# proceed by one timestamp to handle preemption
for time in range(D):
# insert new tasks into the queue
for t in tmp.keys():
if time == tmp[t]['deadline']:
if tasks[t]['wcet'] > tmp[t]['executed']:
# print('Scheduling Failed at %d' % time)
exit(1)
else:
tmp[t]['deadline'] += tasks[t]['period']
tmp[t]['executed'] = 0
queue.append(t)
# select next task to be scheduled
min = D * 2
for task in queue:
if tmp[task]['deadline'] < min:
min = tmp[task]['deadline']
curr = task
tmp[curr]['executed'] += 1
# print(time, queue, curr)
# dequeue the execution-completed task
if tmp[curr]['executed'] == tasks[curr]['wcet']:
for i in range(len(queue)):
if curr == queue[i]:
del queue[i]
break
# record to the schedule trace
if prev != curr:
if prev in queue and prev != 'idle': # previous task is preempted..
s = schedule.pop()
schedule.append([s[0], s[1], '*'])
rms.append(s[1])
schedule.append([time, curr])
if curr != 'idle': rms.append(curr)
prev = curr
return offloaded + rms
# safe state or not
def isSafe(processes, avail, need, allot):
# tasks to offload if exit
offload = []
# Mark all processes as infinish
finish = [0] * P
# To store safe sequence
safeSeq = [0] * P
# Make a copy of available resources
work = [0] * R
for i in range(R):
work[i] = avail[i]
# While all processes are not finished
# or system is not in safe state.
count = 0
while (count < P):
# Find a process which is not finish
# and whose needs can be satisfied
# with current work[] resources.
found = False
for p in range(P):
# First check if a process is finished,
# if no, go for next condition
if (finish[p] == 0):
# Check if for all resources
# of current P need is less
# than work
for j in range(R):
if (need[p][j] > work[j]):
break
# If all needs of p were satisfied.
if (j == R - 1):
# Add the allocated resources of
# current P to the available/work
# resources i.e.free the resources
for k in range(R):
work[k] += allot[p][k]
# Add this process to safe sequence.
safeSeq[count] = processes[p]
count += 1
# Mark this p as finished
finish[p] = 1
found = True
# If we could not find a next process
# in safe sequence.
if (found == False):
print("System is not in safe state")
a = list(set(processes) - set(safeSeq) - set(offload))
_max = np.array([0, 0, 0])
n = {}
for i in a:
n[i] = sum(allocation[i[:2]])
_max = max(n, key=n.get)
print('work: ', work, 'need: ', _need[_max[:2]])
offload.append(_max)
work = np.array(work) + np.array(allocation[_max[:2]])
count += 1
# Mark this p as finished
finish[processes.index(_max)] = 1
found = True
# If system is in safe state then
# safe sequence will be as below
if len(offload) > 0:
safeSeq = safeSeq[:safeSeq.index(0)]
print('offloading tasks: ', offload)
cooperative_mec(offload, 0)
print("System is in safe state.",
"\nSafe sequence is: ", end=" ")
print('safe seq: ', safeSeq)
return safeSeq
def get_safe_seq(pro):
global P
global R
# Number of processes
P = len(pro)
# Number of resources
R = 3
processes = ['{}_{}'.format(pro[i], i) for i in range(P)]
# Available instances of resources
avail = [5, 5, 5]
n_need = [_need[i[:2]] for i in pro]
# print('need', n_need)
# Resources allocated to processes
allot = [allocation[i[:2]] for i in pro]
# print('allocation', allot)
# Maximum R that can be allocated
# to processes
# maxm = [np.array(allot[i]) + np.array(n_need[i]) for i in range(len(n_need))]
# print('max_matrix:', maxm)
# Check system is in safe state or not
return isSafe(processes, avail, n_need, allot)
def calc_wait_time(list_seq):
pre = 0
time_dic = {}
for i in list_seq:
j = '_'.join(i.split('_')[:-1]) # i = 't5_3_3', j = 't5_3'
time_dic[i] = round(t_time[j][0] + pre, 3)
pre += t_time[j][0]
w_send = time_dic[list(time_dic.keys())[-1]]
send_message(str(w_send)) # Broadcasting waiting time to cooperative MECs
return time_dic
def compare_local_mec(list_seq):
time_compare_dict = {i: t_time['_'.join(i.split('_')[:-1])][1] > list_seq[i] for i in list_seq}
print('local vs MEC comparison: ', time_compare_dict)
execute_mec = []
execute_locally = []
for i in time_compare_dict:
if time_compare_dict[i]:
execute_locally.append(i)
else:
execute_mec.append(i)
return execute_mec, execute_locally
def calculate_mov_avg(ma1, a1):
if ma1 in mec_waiting_time:
_count = len(mec_waiting_time[ma1])
avg1 = mec_waiting_time[ma1][-1]
else:
_count = 0
avg1 = 0
_count += 1
avg1 = ((_count - 1) * avg1 + a1) / _count
# ma1.append(avg1) #cumulative average formula
# μ_n=((n-1) μ_(n-1) + x_n)/n
return avg1
def send_message(mg):
_multicast_group = ('224.3.29.71', 10000)
try:
# Send data to the multicast group
if mg == 'hello':
smg = mg + ' ' + message()
sock.sendto(str.encode(smg), _multicast_group)
print('\nHello message sent')
elif mg == 'update':
ho = hosts.copy()
ho[message()] = host_ip
smg = mg + ' ' + str(ho)
sock.sendto(str.encode(smg), _multicast_group)
# print('\n===**====**==update message sent===**======**=========')
else:
sock.sendto(str.encode(mg), _multicast_group)
except Exception as e:
print(e)
def message():
cmd = ['cat /etc/hostname']
hostname = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
return hostname
def receive_message():
global hosts
while True:
data, address = sock.recvfrom(1024)
if data.decode()[:5] == 'hello':
hosts[data.decode()[6:]] = address[0]
elif (data.decode()[:6] == 'update') and (discovering == 0):
hosts = ast.literal_eval(data.decode()[7:])
# print('received: ', hosts)
elif (data.decode()[:6] != 'update') and (address[0] != host_ip):
w_time = calculate_mov_avg(address[0], float(data.decode()) + get_rtt(
address[0])) # calcuate moving average of mec wait time => w_time = wait time + rtt
if address[0] in mec_waiting_time:
mec_waiting_time[address[0]].append(w_time)
else:
mec_waiting_time[address[0]] = [w_time]
def mec_comparison():
# returns min average waiting for all mecs
if len(mec_waiting_time) == 0:
return 0
min_mec = {i: mec_waiting_time[i][-1] for i in mec_waiting_time}
min_wt = min(min_mec, key=min_mec.get)
return min_wt
def mec_task_unicast(task, host_):
try:
c = paramiko.SSHClient()
un = 'mec'
pw = 'password'
port = 22
c.set_missing_host_key_policy(paramiko.AutoAddPolicy())
c.connect(host_, port, un, pw)
cmd = ('echo "{} {} {}" >> /home/mec/deadlock_project/temp/task_share.txt'.format(host_ip, task, t_time[
task[:2]])) # task share : host ip task
stdin, stdout, stderr = c.exec_command(cmd)
except Exception as e:
print(e)
print('error line 420')
def cooperative_mec(mec_list, n):
for i in mec_list:
_host = mec_comparison()
if _host == 0:
mec_task_unicast(i, cloud_ip)
print('\n=========SENDING {} TO CLOUD==========='.format(i))
elif n == 0:
j = '_'.join(i.split('_')[:-1])
if mec_waiting_time[_host][-1] < t_time[j][
1]: # CHECK IF THE MINIMUM MEC WAIT TIME IS LESS THAN TASK LATENCY
mec_task_unicast(i, _host) # SENDS TASK TO MEC FOR EXECUTION
mec_waiting_time[_host].append(
mec_waiting_time[_host][-1] + t_time[j][0]) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
else:
mec_task_unicast(i, cloud_ip)
print('\n=========SENDING {} TO CLOUD==========='.format(i))
else:
j = '_'.join(i.split('_')[:-1])
if mec_waiting_time[_host][-1] < t_time[j][
1]: # CHECK IF THE MINIMUM MEC WAIT TIME IS LESS THAN TASK LATENCY
mec_task_unicast(i, _host) # SENDS TASK TO MEC FOR EXECUTION
mec_waiting_time[_host].append(
mec_waiting_time[_host][-1] + t_time[j][0]) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
else:
mec_task_unicast(i, cloud_ip)
print('\n=========SENDING {} TO CLOUD==========='.format(i))
def check_mec_offload():
global offloaded
offloaded = []
t_mec = {} # {t1: [execution, latency}
try:
fr = open('/home/mec/deadlock_project/temp/task_share.txt', 'r')
t = fr.readlines()
for i in t:
ta = i[:-1].split()[1][:2] + '_' + str(t.index(i))
offloaded.append(ta)
offload_register[ta] = i[:-1].split()[0]
t_mec[ta] = ast.literal_eval(''.join(i[:-1].split()[2:]))
fr.close()
os.system('rm /home/mec/deadlock_project/temp/task_share.txt')
print('Tasks Offloaded to MEC: {}'.format(offloaded))
except Exception as e:
print('no offloaded Task!')
return t_mec
def execute(local):
print('\nExecuting :', local)
send = []
for i in local:
j = '_'.join(i.split('_')[:-1])
time.sleep(t_time[j][0])
print('#' * ((local.index(i) + 1) * 3), ' Executed: ', i)
if len(j) > 2:
send.append(j)
print('============== EXECUTION DONE ===============')
return send
def send_back_task(l_list):
_host_ip = ip_address()
for i in l_list:
try:
c = paramiko.SSHClient()
un = 'mec'
pw = 'password'
port = 22
c.set_missing_host_key_policy(paramiko.AutoAddPolicy())
c.connect(offload_register[i], port, un, pw)
cmd = ('echo "{} {}" >> /home/mec/deadlock_project/temp/executed.txt'.format(i,
_host_ip)) # task share : host ip task
stdin, stdout, stderr = c.exec_command(cmd)
except Exception as e:
print(e)
def receive_executed_task():
try:
fr = open('/home/mec/deadlock_project/temp/executed.txt', 'r')
t = fr.readlines()
for i in t:
i = i[:-1].split()
print('Received Executed task {} from {}'.format(i[0], i[1]))
fr.close()
os.system('rm /home/mec/deadlock_project/temp/executed.txt')
except Exception as e:
print('No Executed Tasks from MEC Received')
def run_me():
global discovering
initialization()
while True:
if len(hosts) == mec_no:
print('MEC Details: ', hosts)
del hosts[message()]
discovering = 1
break
time.sleep(2)
speak = Thread(target=speaking_node)
speak.start()
start_loop()
def start_loop():
print('\n============* WELCOME TO THE DEADLOCK EMULATION PROGRAM *=============\n')
while True:
x = gp.getpass('Press any key to Start...').lower()
if x != 'exit':
for i in range(500):
rms_list = get_rms()
print('RMS List of Processes: ', rms_list, '\n')
print('\nRunning Bankers Algorithm')
list_seq = get_safe_seq(rms_list)
if len(list_seq) > 0: # do only when there is a task in safe sequence
wait_list = calc_wait_time(list_seq)
print('\nWaiting Time List: ', wait_list)
compare_result = compare_local_mec(wait_list)
print('\nExecute Locally: ', compare_result[1])
print('\nExecute in MEC: ', compare_result[0])
print('\nSending to cooperative platform')
if len(compare_result[0]) > 0:
cooperative_mec(compare_result[0], 1)
local_ = execute(compare_result[1])
if len(local_) > 0: # do only when there is a task to send back
send_back_task(local_)
receive_executed_task()
time.sleep(3)
print('\nEnter "Exit" to stop Programme!')
if x == 'exit':
print('\nProgramme Terminated')
break
def speaking_node():
global mec_no
while True:
if len(hosts) > (mec_no - 1):
send_message('update')
mec_no = len(hosts) + 1
time.sleep(2)
def initialization():
global mec_no
global host_ip
global cloud_ip
host_ip = ip_address()
try:
mec_no = int(input('Number of MECs: ').strip())
cloud_ip = input('Cloud Server IP: ').strip()
print('\nCompiling MEC Details')
h1 = Thread(target=receive_message)
h1.start()
while True:
b = input('Send Hello Message (Y/N): ').strip().lower()
if b == 'y':
send_message('hello')
break
else:
print('\nPlease Type "y" to send Hello message\n')
except KeyboardInterrupt:
print('\nProgramme Terminated')
exit(0)
def main():
os.system('clear')
run_me()
if __name__ == "__main__":
main()
|
raft.py | import json
import logging
import os
import threading
import time
from patroni.dcs import AbstractDCS, ClusterConfig, Cluster, Failover, Leader, Member, SyncState, TimelineHistory
from ..utils import validate_directory
from pysyncobj import SyncObj, SyncObjConf, replicated, FAIL_REASON
from pysyncobj.transport import Node, TCPTransport, CONNECTION_STATE
logger = logging.getLogger(__name__)
class MessageNode(Node):
def __init__(self, address):
self.address = address
class UtilityTransport(TCPTransport):
def __init__(self, syncObj, selfNode, otherNodes):
super(UtilityTransport, self).__init__(syncObj, selfNode, otherNodes)
self._selfIsReadonlyNode = False
def _connectIfNecessarySingle(self, node):
pass
def connectionState(self, node):
return self._connections[node].state
def isDisconnected(self, node):
return self.connectionState(node) == CONNECTION_STATE.DISCONNECTED
def connectIfRequiredSingle(self, node):
if self.isDisconnected(node):
return self._connections[node].connect(node.ip, node.port)
def disconnectSingle(self, node):
self._connections[node].disconnect()
class SyncObjUtility(SyncObj):
def __init__(self, otherNodes, conf):
autoTick = conf.autoTick
conf.autoTick = False
super(SyncObjUtility, self).__init__(None, otherNodes, conf, transportClass=UtilityTransport)
conf.autoTick = autoTick
self._SyncObj__transport.setOnMessageReceivedCallback(self._onMessageReceived)
self.__result = None
def setPartnerNode(self, partner):
self.__node = partner
def sendMessage(self, message):
# Abuse the fact that node address is send as a first message
self._SyncObj__transport._selfNode = MessageNode(message)
self._SyncObj__transport.connectIfRequiredSingle(self.__node)
while not self._SyncObj__transport.isDisconnected(self.__node):
self._poller.poll(0.5)
return self.__result
def _onMessageReceived(self, _, message):
self.__result = message
self._SyncObj__transport.disconnectSingle(self.__node)
class MyTCPTransport(TCPTransport):
def _onIncomingMessageReceived(self, conn, message):
if self._syncObj.encryptor and not conn.sendRandKey:
conn.sendRandKey = message
conn.recvRandKey = os.urandom(32)
conn.send(conn.recvRandKey)
return
# Utility messages
if isinstance(message, list) and message[0] == 'members':
conn.send(self._syncObj._get_members())
return True
return super(MyTCPTransport, self)._onIncomingMessageReceived(conn, message)
class DynMemberSyncObj(SyncObj):
def __init__(self, selfAddress, partnerAddrs, conf):
add_self = False
utility = SyncObjUtility(partnerAddrs, conf)
for node in utility._SyncObj__otherNodes:
utility.setPartnerNode(node)
response = utility.sendMessage(['members'])
if response:
partnerAddrs = [member['addr'] for member in response if member['addr'] != selfAddress]
add_self = selfAddress and len(partnerAddrs) == len(response)
break
super(DynMemberSyncObj, self).__init__(selfAddress, partnerAddrs, conf, transportClass=MyTCPTransport)
if add_self:
threading.Thread(target=utility.sendMessage, args=(['add', selfAddress],)).start()
def _get_members(self):
ret = [{'addr': node.id, 'leader': node == self._getLeader(),
'status': CONNECTION_STATE.CONNECTED if node in self._SyncObj__connectedNodes
else CONNECTION_STATE.DISCONNECTED} for node in self._SyncObj__otherNodes]
ret.append({'addr': self._SyncObj__selfNode.id, 'leader': self._isLeader(),
'status': CONNECTION_STATE.CONNECTED})
return ret
def _SyncObj__doChangeCluster(self, request, reverse=False):
ret = False
if not self._SyncObj__selfNode or request[0] != 'add' or reverse or request[1] != self._SyncObj__selfNode.id:
ret = super(DynMemberSyncObj, self)._SyncObj__doChangeCluster(request, reverse)
if ret:
self.forceLogCompaction()
return ret
class KVStoreTTL(DynMemberSyncObj):
def __init__(self, selfAddress, partnerAddrs, conf, on_set=None, on_delete=None):
self.__on_set = on_set
self.__on_delete = on_delete
self.__limb = {}
self.__retry_timeout = None
self.__early_apply_local_log = selfAddress is not None
self.applied_local_log = False
super(KVStoreTTL, self).__init__(selfAddress, partnerAddrs, conf)
self.__data = {}
@staticmethod
def __check_requirements(old_value, **kwargs):
return ('prevExist' not in kwargs or bool(kwargs['prevExist']) == bool(old_value)) and \
('prevValue' not in kwargs or old_value and old_value['value'] == kwargs['prevValue']) and \
(not kwargs.get('prevIndex') or old_value and old_value['index'] == kwargs['prevIndex'])
def set_retry_timeout(self, retry_timeout):
self.__retry_timeout = retry_timeout
def retry(self, func, *args, **kwargs):
event = threading.Event()
ret = {'result': None, 'error': -1}
def callback(result, error):
ret.update(result=result, error=error)
event.set()
kwargs['callback'] = callback
timeout = kwargs.pop('timeout', None) or self.__retry_timeout
deadline = timeout and time.time() + timeout
while True:
event.clear()
func(*args, **kwargs)
event.wait(timeout)
if ret['error'] == FAIL_REASON.SUCCESS:
return ret['result']
elif ret['error'] == FAIL_REASON.REQUEST_DENIED:
break
elif deadline:
timeout = deadline - time.time()
if timeout <= 0:
break
time.sleep(1)
return False
@replicated
def _set(self, key, value, **kwargs):
old_value = self.__data.get(key, {})
if not self.__check_requirements(old_value, **kwargs):
return False
if old_value and old_value['created'] != value['created']:
value['created'] = value['updated']
value['index'] = self._SyncObj__raftLastApplied + 1
self.__data[key] = value
if self.__on_set:
self.__on_set(key, value)
return True
def set(self, key, value, ttl=None, **kwargs):
old_value = self.__data.get(key, {})
if not self.__check_requirements(old_value, **kwargs):
return False
value = {'value': value, 'updated': time.time()}
value['created'] = old_value.get('created', value['updated'])
if ttl:
value['expire'] = value['updated'] + ttl
return self.retry(self._set, key, value, **kwargs)
def __pop(self, key):
self.__data.pop(key)
if self.__on_delete:
self.__on_delete(key)
@replicated
def _delete(self, key, recursive=False, **kwargs):
if recursive:
for k in list(self.__data.keys()):
if k.startswith(key):
self.__pop(k)
elif not self.__check_requirements(self.__data.get(key, {}), **kwargs):
return False
else:
self.__pop(key)
return True
def delete(self, key, recursive=False, **kwargs):
if not recursive and not self.__check_requirements(self.__data.get(key, {}), **kwargs):
return False
return self.retry(self._delete, key, recursive=recursive, **kwargs)
@staticmethod
def __values_match(old, new):
return all(old.get(n) == new.get(n) for n in ('created', 'updated', 'expire', 'value'))
@replicated
def _expire(self, key, value, callback=None):
current = self.__data.get(key)
if current and self.__values_match(current, value):
self.__pop(key)
def __expire_keys(self):
for key, value in self.__data.items():
if value and 'expire' in value and value['expire'] <= time.time() and \
not (key in self.__limb and self.__values_match(self.__limb[key], value)):
self.__limb[key] = value
def callback(*args):
if key in self.__limb and self.__values_match(self.__limb[key], value):
self.__limb.pop(key)
self._expire(key, value, callback=callback)
def get(self, key, recursive=False):
if not recursive:
return self.__data.get(key)
return {k: v for k, v in self.__data.items() if k.startswith(key)}
def _onTick(self, timeToWait=0.0):
# The SyncObj starts applying the local log only when there is at least one node connected.
# We want to change this behavior and apply the local log even when there is nobody except us.
# It gives us at least some picture about the last known cluster state.
if self.__early_apply_local_log and not self.applied_local_log and self._SyncObj__needLoadDumpFile:
self._SyncObj__raftCommitIndex = self._SyncObj__getCurrentLogIndex()
self._SyncObj__raftCurrentTerm = self._SyncObj__getCurrentLogTerm()
super(KVStoreTTL, self)._onTick(timeToWait)
# The SyncObj calls onReady callback only when cluster got the leader and is ready for writes.
# In some cases for us it is safe to "signal" the Raft object when the local log is fully applied.
# We are using the `applied_local_log` property for that, but not calling the callback function.
if self.__early_apply_local_log and not self.applied_local_log and self._SyncObj__raftCommitIndex != 1 and \
self._SyncObj__raftLastApplied == self._SyncObj__raftCommitIndex:
self.applied_local_log = True
if self._isLeader():
self.__expire_keys()
else:
self.__limb.clear()
class Raft(AbstractDCS):
def __init__(self, config):
super(Raft, self).__init__(config)
self._ttl = int(config.get('ttl') or 30)
self_addr = config.get('self_addr')
partner_addrs = config.get('partner_addrs', [])
if self._ctl:
if self_addr:
partner_addrs.append(self_addr)
self_addr = None
# Create raft data_dir if necessary
raft_data_dir = config.get('data_dir', '')
if raft_data_dir != '':
validate_directory(raft_data_dir)
ready_event = threading.Event()
file_template = os.path.join(config.get('data_dir', ''), (self_addr or ''))
conf = SyncObjConf(password=config.get('password'), appendEntriesUseBatch=False,
bindAddress=config.get('bind_addr'), commandsWaitLeader=False,
fullDumpFile=(file_template + '.dump' if self_addr else None),
journalFile=(file_template + '.journal' if self_addr else None),
onReady=ready_event.set, dynamicMembershipChange=True)
self._sync_obj = KVStoreTTL(self_addr, partner_addrs, conf, self._on_set, self._on_delete)
while True:
ready_event.wait(5)
if ready_event.isSet() or self._sync_obj.applied_local_log:
break
else:
logger.info('waiting on raft')
self._sync_obj.forceLogCompaction()
self.set_retry_timeout(int(config.get('retry_timeout') or 10))
def _on_set(self, key, value):
leader = (self._sync_obj.get(self.leader_path) or {}).get('value')
if key == value['created'] == value['updated'] and \
(key.startswith(self.members_path) or key == self.leader_path and leader != self._name) or \
key == self.leader_optime_path and leader != self._name or key in (self.config_path, self.sync_path):
self.event.set()
def _on_delete(self, key):
if key == self.leader_path:
self.event.set()
def set_ttl(self, ttl):
self._ttl = ttl
@property
def ttl(self):
return self._ttl
def set_retry_timeout(self, retry_timeout):
self._sync_obj.set_retry_timeout(retry_timeout)
@staticmethod
def member(key, value):
return Member.from_node(value['index'], os.path.basename(key), None, value['value'])
def _load_cluster(self):
prefix = self.client_path('')
response = self._sync_obj.get(prefix, recursive=True)
if not response:
return Cluster(None, None, None, None, [], None, None, None)
nodes = {os.path.relpath(key, prefix).replace('\\', '/'): value for key, value in response.items()}
# get initialize flag
initialize = nodes.get(self._INITIALIZE)
initialize = initialize and initialize['value']
# get global dynamic configuration
config = nodes.get(self._CONFIG)
config = config and ClusterConfig.from_node(config['index'], config['value'])
# get timeline history
history = nodes.get(self._HISTORY)
history = history and TimelineHistory.from_node(history['index'], history['value'])
# get last leader operation
last_leader_operation = nodes.get(self._LEADER_OPTIME)
last_leader_operation = 0 if last_leader_operation is None else int(last_leader_operation['value'])
# get list of members
members = [self.member(k, n) for k, n in nodes.items() if k.startswith(self._MEMBERS) and k.count('/') == 1]
# get leader
leader = nodes.get(self._LEADER)
if leader:
member = Member(-1, leader['value'], None, {})
member = ([m for m in members if m.name == leader['value']] or [member])[0]
leader = Leader(leader['index'], None, member)
# failover key
failover = nodes.get(self._FAILOVER)
if failover:
failover = Failover.from_node(failover['index'], failover['value'])
# get synchronization state
sync = nodes.get(self._SYNC)
sync = SyncState.from_node(sync and sync['index'], sync and sync['value'])
return Cluster(initialize, config, leader, last_leader_operation, members, failover, sync, history)
def _write_leader_optime(self, last_operation):
return self._sync_obj.set(self.leader_optime_path, last_operation, timeout=1)
def _update_leader(self):
ret = self._sync_obj.set(self.leader_path, self._name, ttl=self._ttl, prevValue=self._name)
if not ret and self._sync_obj.get(self.leader_path) is None:
ret = self.attempt_to_acquire_leader()
return ret
def attempt_to_acquire_leader(self, permanent=False):
return self._sync_obj.set(self.leader_path, self._name, prevExist=False,
ttl=None if permanent else self._ttl)
def set_failover_value(self, value, index=None):
return self._sync_obj.set(self.failover_path, value, prevIndex=index)
def set_config_value(self, value, index=None):
return self._sync_obj.set(self.config_path, value, prevIndex=index)
def touch_member(self, data, permanent=False):
data = json.dumps(data, separators=(',', ':'))
return self._sync_obj.set(self.member_path, data, None if permanent else self._ttl, timeout=2)
def take_leader(self):
return self._sync_obj.set(self.leader_path, self._name, ttl=self._ttl)
def initialize(self, create_new=True, sysid=''):
return self._sync_obj.set(self.initialize_path, sysid, prevExist=(not create_new))
def _delete_leader(self):
return self._sync_obj.delete(self.leader_path, prevValue=self._name, timeout=1)
def cancel_initialization(self):
return self._sync_obj.delete(self.initialize_path)
def delete_cluster(self):
return self._sync_obj.delete(self.client_path(''), recursive=True)
def set_history_value(self, value):
return self._sync_obj.set(self.history_path, value)
def set_sync_state_value(self, value, index=None):
return self._sync_obj.set(self.sync_path, value, prevIndex=index)
def delete_sync_state(self, index=None):
return self._sync_obj.delete(self.sync_path, prevIndex=index)
def watch(self, leader_index, timeout):
try:
return super(Raft, self).watch(leader_index, timeout)
finally:
self.event.clear()
|
pyshell.py | #! /usr/bin/env python3
import sys
if __name__ == "__main__":
sys.modules['idlelib.pyshell'] = sys.modules['__main__']
try:
from tkinter import *
except ImportError:
print("** IDLE can't import Tkinter.\n"
"Your Python may not be configured for Tk. **", file=sys.__stderr__)
raise SystemExit(1)
# Valid arguments for the ...Awareness call below are defined in the following.
# https://msdn.microsoft.com/en-us/library/windows/desktop/dn280512(v=vs.85).aspx
if sys.platform == 'win32':
try:
import ctypes
PROCESS_SYSTEM_DPI_AWARE = 1 # Int required.
ctypes.OleDLL('shcore').SetProcessDpiAwareness(PROCESS_SYSTEM_DPI_AWARE)
except (ImportError, AttributeError, OSError):
pass
import tkinter.messagebox as tkMessageBox
if TkVersion < 8.5:
root = Tk() # otherwise create root in main
root.withdraw()
from idlelib.run import fix_scaling
fix_scaling(root)
tkMessageBox.showerror("Idle Cannot Start",
"Idle requires tcl/tk 8.5+, not %s." % TkVersion,
parent=root)
raise SystemExit(1)
from code import InteractiveInterpreter
import linecache
import os
import os.path
from platform import python_version
import re
import socket
import subprocess
from textwrap import TextWrapper
import threading
import time
import tokenize
import warnings
from idlelib.colorizer import ColorDelegator
from idlelib.config import idleConf
from idlelib import debugger
from idlelib import debugger_r
from idlelib.editor import EditorWindow, fixwordbreaks
from idlelib.filelist import FileList
from idlelib.outwin import OutputWindow
from idlelib import rpc
from idlelib.run import idle_formatwarning, StdInputFile, StdOutputFile
from idlelib.undo import UndoDelegator
HOST = '127.0.0.1' # python execution server on localhost loopback
PORT = 0 # someday pass in host, port for remote debug capability
# Override warnings module to write to warning_stream. Initialize to send IDLE
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
warning_stream = sys.__stderr__ # None, at least on Windows, if no console.
def idle_showwarning(
message, category, filename, lineno, file=None, line=None):
"""Show Idle-format warning (after replacing warnings.showwarning).
The differences are the formatter called, the file=None replacement,
which can be None, the capture of the consequence AttributeError,
and the output of a hard-coded prompt.
"""
if file is None:
file = warning_stream
try:
file.write(idle_formatwarning(
message, category, filename, lineno, line=line))
file.write(">>> ")
except (AttributeError, OSError):
pass # if file (probably __stderr__) is invalid, skip warning.
_warnings_showwarning = None
def capture_warnings(capture):
"Replace warning.showwarning with idle_showwarning, or reverse."
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = idle_showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
capture_warnings(True)
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(skipping them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for key in list(cache):
if key[:1] + key[-1:] == '<>':
save[key] = cache.pop(key)
orig_checkcache(filename)
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window in IDLE, supports breakpoints"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
#TODO: don't read/write this from/to .idlerc when testing
self.breakpointPath = os.path.join(
idleConf.userdir, 'breakpoints.lst')
# whenever a file is changed, restore breakpoints
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
if self.io.filename:
self.restore_file_breaks()
self.color_breakpoint_text()
rmenu_specs = [
("Cut", "<<cut>>", "rmenu_check_cut"),
("Copy", "<<copy>>", "rmenu_check_copy"),
("Paste", "<<paste>>", "rmenu_check_paste"),
(None, None, None),
("Set Breakpoint", "<<set-breakpoint-here>>", None),
("Clear Breakpoint", "<<clear-breakpoint-here>>", None)
]
def color_breakpoint_text(self, color=True):
"Turn colorizing of breakpoint text on or off"
if self.io is None:
# possible due to update in restore_file_breaks
return
if color:
theme = idleConf.CurrentTheme()
cfg = idleConf.GetHighlight(theme, "break")
else:
cfg = {'foreground': '', 'background': ''}
self.text.tag_config('BREAK', cfg)
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the save breaks functionality
# needs to be re-verified, since the breaks at the time the
# temp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text.
# Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
except OSError:
lines = []
try:
with open(self.breakpointPath, "w") as new_file:
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
except OSError as err:
if not getattr(self.root, "breakpoint_error_displayed", False):
self.root.breakpoint_error_displayed = True
tkMessageBox.showerror(title='IDLE Error',
message='Unable to update breakpoint list:\n%s'
% str(err),
parent=self.text)
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
if self.io is None:
# can happen if IDLE closes due to the .update() call
return
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename)+1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
def update_breakpoints(self):
"Retrieves all the breakpoints in the current window"
text = self.text
ranges = text.tag_ranges("BREAK")
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(ranges[index].string))
end = int(float(ranges[index+1].string))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
# XXX 13 Dec 2002 KBK Not used currently
# def saved_change_hook(self):
# "Extend base method - clear breaks if module is modified"
# if not self.get_saved():
# self.clear_file_breaks()
# EditorWindow.saved_change_hook(self)
def _close(self):
"Extend base method - clear breaks when module is closed"
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"Extend base class: IDLE supports a shell and breakpoints"
# override FileList's class variable, instances return PyShellEditorWindow
# instead of EditorWindow when new edit windows are created.
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.top.wakeup()
else:
self.pyshell = PyShell(self)
if self.pyshell:
if not self.pyshell.begin():
return None
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"Extend base class: colorizer for the shell window itself"
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove("TODO", "1.0", "iomark")
self.tag_add("SYNC", "1.0", "iomark")
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.CurrentTheme()
self.tagdefs.update({
"stdin": {'background':None,'foreground':None},
"stdout": idleConf.GetHighlight(theme, "stdout"),
"stderr": idleConf.GetHighlight(theme, "stderr"),
"console": idleConf.GetHighlight(theme, "console"),
})
def removecolors(self):
# Don't remove shell color tags before "iomark"
for tag in self.tagdefs:
self.tag_remove(tag, "iomark", "end")
class ModifiedUndoDelegator(UndoDelegator):
"Extend base class: forbid insert/delete before the I/O mark"
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"Override the base class - just re-raise EOFError"
raise EOFError
def restart_line(width, filename): # See bpo-38141.
"""Return width long restart line formatted with filename.
Fill line with balanced '='s, with any extras and at least one at
the beginning. Do not end with a trailing space.
"""
tag = f"= RESTART: {filename or 'Shell'} ="
if width >= len(tag):
div, mod = divmod((width -len(tag)), 2)
return f"{(div+mod)*'='}{tag}{div*'='}"
else:
return tag[:-2] # Remove ' ='.
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.restarting = False
self.subprocess_arglist = None
self.port = PORT
self.original_compiler_flags = self.compile.compiler.flags
_afterid = None
rpcclt = None
rpcsubproc = None
def spawn_subprocess(self):
if self.subprocess_arglist is None:
self.subprocess_arglist = self.build_subprocess_arglist()
self.rpcsubproc = subprocess.Popen(self.subprocess_arglist)
def build_subprocess_arglist(self):
assert (self.port!=0), (
"Socket should have been assigned a port number.")
w = ['-W' + s for s in sys.warnoptions]
# Maybe IDLE is installed and is being accessed via sys.path,
# or maybe it's not installed and the idle.py script is being
# run from the IDLE source directory.
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
default=False, type='bool')
command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,)
return [sys.executable] + w + ["-c", command, str(self.port)]
def start_subprocess(self):
addr = (HOST, self.port)
# GUI makes several attempts to acquire socket, listens for connection
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except OSError:
pass
else:
self.display_port_binding_error()
return None
# if PORT was 0, system will assign an 'ephemeral' port. Find it out:
self.port = self.rpcclt.listening_sock.getsockname()[1]
# if PORT was not 0, probably working with a remote execution server
if PORT != 0:
# To allow reconnection within the 2MSL wait (cf. Stevens TCP
# V1, 18.6), set SO_REUSEADDR. Note that this can be problematic
# on Windows since the implementation allows two active sockets on
# the same address!
self.rpcclt.listening_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self.spawn_subprocess()
#time.sleep(20) # test to simulate GUI not accepting connection
# Accept the connection from the Python execution server
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
except socket.timeout:
self.display_no_subprocess_error()
return None
self.rpcclt.register("console", self.tkconsole)
self.rpcclt.register("stdin", self.tkconsole.stdin)
self.rpcclt.register("stdout", self.tkconsole.stdout)
self.rpcclt.register("stderr", self.tkconsole.stderr)
self.rpcclt.register("flist", self.tkconsole.flist)
self.rpcclt.register("linecache", linecache)
self.rpcclt.register("interp", self)
self.transfer_path(with_cwd=True)
self.poll_subprocess()
return self.rpcclt
def restart_subprocess(self, with_cwd=False, filename=''):
if self.restarting:
return self.rpcclt
self.restarting = True
# close only the subprocess debugger
debug = self.getdebugger()
if debug:
try:
# Only close subprocess debugger, don't unregister gui_adap!
debugger_r.close_subprocess_debugger(self.rpcclt)
except:
pass
# Kill subprocess, spawn a new one, accept connection.
self.rpcclt.close()
self.terminate_subprocess()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
try:
self.rpcclt.accept()
except socket.timeout:
self.display_no_subprocess_error()
return None
self.transfer_path(with_cwd=with_cwd)
console.stop_readline()
# annotate restart in shell window and mark it
console.text.delete("iomark", "end-1c")
console.write('\n')
console.write(restart_line(console.width, filename))
console.text.mark_set("restart", "end-1c")
console.text.mark_gravity("restart", "left")
if not filename:
console.showprompt()
# restart subprocess debugger
if debug:
# Restarted debugger connects to current instance of debug GUI
debugger_r.restart_subprocess_debugger(self.rpcclt)
# reload remote debugger breakpoints for all PyShellEditWindows
debug.load_breakpoints()
self.compile.compiler.flags = self.original_compiler_flags
self.restarting = False
return self.rpcclt
def __request_interrupt(self):
self.rpcclt.remotecall("exec", "interrupt_the_server", (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
if self._afterid is not None:
self.tkconsole.text.after_cancel(self._afterid)
try:
self.rpcclt.listening_sock.close()
except AttributeError: # no socket
pass
try:
self.rpcclt.close()
except AttributeError: # no socket
pass
self.terminate_subprocess()
self.tkconsole.executing = False
self.rpcclt = None
def terminate_subprocess(self):
"Make sure subprocess is terminated"
try:
self.rpcsubproc.kill()
except OSError:
# process already terminated
return
else:
try:
self.rpcsubproc.wait()
except OSError:
return
def transfer_path(self, with_cwd=False):
if with_cwd: # Issue 13506
path = [''] # include Current Working Directory
path.extend(sys.path)
else:
path = sys.path
self.runcommand("""if 1:
import sys as _sys
_sys.path = %r
del _sys
\n""" % (path,))
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, OSError, KeyboardInterrupt):
# lost connection or subprocess terminated itself, restart
# [the KBI is from rpc.SocketIO.handle_EOF()]
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == "OK":
if what is not None:
print(repr(what), file=console)
elif how == "EXCEPTION":
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.remote_stack_viewer()
elif how == "ERROR":
errmsg = "pyshell.ModifiedInterpreter: Subprocess ERROR:\n"
print(errmsg, what, file=sys.__stderr__)
print(errmsg, what, file=console)
# we received a response to the currently active seq number:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
# Reschedule myself
if not self.tkconsole.closing:
self._afterid = self.tkconsole.text.after(
self.tkconsole.pollinterval, self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exception. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
from idlelib import debugobj_r
oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
if oid is None:
self.tkconsole.root.bell()
return
item = debugobj_r.StubObjectTreeItem(self.rpcclt, oid)
from idlelib.tree import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.CurrentTheme()
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
node = TreeNode(sc.canvas, None, item)
node.expand()
# XXX Should GC the remote tree when closing the window
gid = 0
def execsource(self, source):
"Like runsource() but assumes complete exec source"
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
with tokenize.open(filename) as fp:
source = fp.read()
if use_subprocess:
source = (f"__file__ = r'''{os.path.abspath(filename)}'''\n"
+ source + "\ndel __file__")
try:
code = compile(source, filename, "exec")
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
print('*** Error in script or command!\n'
'Traceback (most recent call last):',
file=self.tkconsole.stderr)
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"Extend base class method: Stuff the source in the line cache first"
filename = self.stuffsource(source)
# at the moment, InteractiveInterpreter expects str
assert isinstance(source, str)
# InteractiveInterpreter.runsource() calls its runcode() method,
# which is overridden (see below)
return InteractiveInterpreter.runsource(self, source, filename)
def stuffsource(self, source):
"Stuff source in the filename cache"
filename = "<pyshell#%d>" % self.gid
self.gid = self.gid + 1
lines = source.split("\n")
linecache.cache[filename] = len(source)+1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"Prepend sys.path with file's directory if not already included"
self.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
\n""" % (filename,))
def showsyntaxerror(self, filename=None):
"""Override Interactive Interpreter method: Use Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
tkconsole = self.tkconsole
text = tkconsole.text
text.tag_remove("ERROR", "1.0", "end")
type, value, tb = sys.exc_info()
msg = getattr(value, 'msg', '') or value or "<no detail available>"
lineno = getattr(value, 'lineno', '') or 1
offset = getattr(value, 'offset', '') or 0
if offset == 0:
lineno += 1 #mark end of offending line
if lineno == 1:
pos = "iomark + %d chars" % (offset-1)
else:
pos = "iomark linestart + %d lines + %d chars" % \
(lineno-1, offset-1)
tkconsole.colorize_syntax_error(text, pos)
tkconsole.resetoutput()
self.write("SyntaxError: %s\n" % msg)
tkconsole.showprompt()
def showtraceback(self):
"Extend base class method to reset output properly"
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in list(c.keys()):
if key[:1] + key[-1:] != "<>":
del c[key]
def runcommand(self, code):
"Run the code without invoking the debugger"
# The code better not raise an exception!
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue("exec", "runcode", (code,), {})
else:
exec(code, self.locals)
return 1
def runcode(self, code):
"Override base class method"
if self.tkconsole.executing:
self.interp.restart_subprocess()
self.checklinecache()
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
(code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec(code, self.locals)
except SystemExit:
if not self.tkconsole.closing:
if tkMessageBox.askyesno(
"Exit?",
"Do you want to exit altogether?",
default="yes",
parent=self.tkconsole.text):
raise
else:
self.showtraceback()
else:
raise
except:
if use_subprocess:
print("IDLE internal error in runcode()",
file=self.tkconsole.stderr)
self.showtraceback()
self.tkconsole.endexecuting()
else:
if self.tkconsole.canceled:
self.tkconsole.canceled = False
print("KeyboardInterrupt", file=self.tkconsole.stderr)
else:
self.showtraceback()
finally:
if not use_subprocess:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
def write(self, s):
"Override base class method"
return self.tkconsole.stderr.write(s)
def display_port_binding_error(self):
tkMessageBox.showerror(
"Port Binding Error",
"IDLE can't bind to a TCP/IP port, which is necessary to "
"communicate with its Python execution server. This might be "
"because no networking is installed on this computer. "
"Run IDLE with the -n command line switch to start without a "
"subprocess and refer to Help/IDLE Help 'Running without a "
"subprocess' for further details.",
parent=self.tkconsole.text)
def display_no_subprocess_error(self):
tkMessageBox.showerror(
"Subprocess Connection Error",
"IDLE's subprocess didn't make connection.\n"
"See the 'Startup failure' section of the IDLE doc, online at\n"
"https://docs.python.org/3/library/idle.html#startup-failure",
parent=self.tkconsole.text)
def display_executing_dialog(self):
tkMessageBox.showerror(
"Already executing",
"The Python Shell window is already executing a command; "
"please wait until it is finished.",
parent=self.tkconsole.text)
class PyShell(OutputWindow):
shell_title = "IDLE Shell " + python_version()
# Override classes
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
# Override menus
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("debug", "_Debug"),
("options", "_Options"),
("window", "_Window"),
("help", "_Help"),
]
# Extend right-click context menu
rmenu_specs = OutputWindow.rmenu_specs + [
("Squeeze", "<<squeeze-current-text>>"),
]
allow_line_numbers = False
# New classes
from idlelib.history import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != "shell":
ms.insert(2, ("shell", "She_ll"))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
OutputWindow.__init__(self, flist, None, None)
self.usetabs = True
# indentwidth must be 8 when using tabs. See note in EditorWindow:
self.indentwidth = 8
self.sys_ps1 = sys.ps1 if hasattr(sys, 'ps1') else '>>> '
self.prompt_last_line = self.sys_ps1.split('\n')[-1]
self.prompt = self.sys_ps1 # Changes when debug active
text = self.text
text.configure(wrap="char")
text.bind("<<newline-and-indent>>", self.enter_callback)
text.bind("<<plain-newline-and-indent>>", self.linefeed_callback)
text.bind("<<interrupt-execution>>", self.cancel_callback)
text.bind("<<end-of-file>>", self.eof_callback)
text.bind("<<open-stack-viewer>>", self.open_stack_viewer)
text.bind("<<toggle-debugger>>", self.toggle_debugger)
text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer)
if use_subprocess:
text.bind("<<view-restart>>", self.view_restart_mark)
text.bind("<<restart-shell>>", self.restart_shell)
squeezer = self.Squeezer(self)
text.bind("<<squeeze-current-text>>",
squeezer.squeeze_current_text_event)
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
from idlelib import iomenu
self.stdin = StdInputFile(self, "stdin",
iomenu.encoding, iomenu.errors)
self.stdout = StdOutputFile(self, "stdout",
iomenu.encoding, iomenu.errors)
self.stderr = StdOutputFile(self, "stderr",
iomenu.encoding, "backslashreplace")
self.console = StdOutputFile(self, "console",
iomenu.encoding, iomenu.errors)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self.stdin
try:
# page help() text to shell.
import pydoc # import must be done here to capture i/o rebinding.
# XXX KBK 27Dec07 use text viewer someday, but must work w/o subproc
pydoc.pager = pydoc.plainpager
except:
sys.stderr = sys.__stderr__
raise
#
self.history = self.History(self.text)
#
self.pollinterval = 50 # millisec
def get_standard_extension_names(self):
return idleConf.GetExtensions(shell_only=True)
reading = False
executing = False
canceled = False
endoffile = False
closing = False
_stop_readline_flag = False
def set_warning_stream(self, stream):
global warning_stream
warning_stream = stream
def get_warning_stream(self):
return warning_stream
def toggle_debugger(self, event=None):
if self.executing:
tkMessageBox.showerror("Don't debug now",
"You can only toggle the debugger when idle",
parent=self.text)
self.set_debugger_indicator()
return "break"
else:
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar("<<toggle-debugger>>", not not db)
def toggle_jit_stack_viewer(self, event=None):
pass # All we need is the variable
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
debugger_r.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write("[DEBUG OFF]\n")
self.prompt = self.sys_ps1
self.showprompt()
self.set_debugger_indicator()
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = debugger_r.start_remote_debugger(self.interp.rpcclt,
self)
else:
dbg_gui = debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
self.prompt = "[DEBUG ON]\n" + self.sys_ps1
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"Helper for ModifiedInterpreter"
self.resetoutput()
self.executing = True
def endexecuting(self):
"Helper for ModifiedInterpreter"
self.executing = False
self.canceled = False
self.showprompt()
def close(self):
"Extend EditorWindow.close()"
if self.executing:
response = tkMessageBox.askokcancel(
"Kill?",
"Your program is still running!\n Do you want to kill it?",
default="ok",
parent=self.text)
if response is False:
return "cancel"
self.stop_readline()
self.canceled = True
self.closing = True
return EditorWindow.close(self)
def _close(self):
"Extend EditorWindow._close(), shut down debugger and execution server"
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
# Restore std streams
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
# Break cycles
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
def ispythonsource(self, filename):
"Override EditorWindow method: never remove the colorizer"
return True
def short_title(self):
return self.shell_title
COPYRIGHT = \
'Type "help", "copyright", "credits" or "license()" for more information.'
def begin(self):
self.text.mark_set("iomark", "insert")
self.resetoutput()
if use_subprocess:
nosub = ''
client = self.interp.start_subprocess()
if not client:
self.close()
return False
else:
nosub = ("==== No Subprocess ====\n\n" +
"WARNING: Running IDLE without a Subprocess is deprecated\n" +
"and will be removed in a later version. See Help/IDLE Help\n" +
"for details.\n\n")
sys.displayhook = rpc.displayhook
self.write("Python %s on %s\n%s\n%s" %
(sys.version, sys.platform, self.COPYRIGHT, nosub))
self.text.focus_force()
self.showprompt()
import tkinter
tkinter._default_root = None # 03Jan04 KBK What's this?
return True
def stop_readline(self):
if not self.reading: # no nested mainloop to exit.
return
self._stop_readline_flag = True
self.top.quit()
def readline(self):
save = self.reading
try:
self.reading = True
self.top.mainloop() # nested mainloop()
finally:
self.reading = save
if self._stop_readline_flag:
self._stop_readline_flag = False
return ""
line = self.text.get("iomark", "end-1c")
if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C
line = "\n"
self.resetoutput()
if self.canceled:
self.canceled = False
if not use_subprocess:
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = False
line = ""
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare("sel.first", "!=", "sel.last"):
return # Active selection -- always use default binding
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write("KeyboardInterrupt\n")
self.showprompt()
return "break"
self.endoffile = False
self.canceled = True
if (self.executing and self.interp.rpcclt):
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
if self.reading:
self.top.quit() # exit the nested mainloop() in readline()
return "break"
def eof_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (delete next char) take over
if not (self.text.compare("iomark", "==", "insert") and
self.text.compare("insert", "==", "end-1c")):
return # Let the default binding (delete next char) take over
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = False
self.endoffile = True
self.top.quit()
return "break"
def linefeed_callback(self, event):
# Insert a linefeed without entering anything (still autoindented)
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
return "break"
def enter_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (insert '\n') take over
# If some text is selected, recall the selection
# (but only if this before the I/O mark)
try:
sel = self.text.get("sel.first", "sel.last")
if sel:
if self.text.compare("sel.last", "<=", "iomark"):
self.recall(sel, event)
return "break"
except:
pass
# If we're strictly before the line containing iomark, recall
# the current line, less a leading prompt, less leading or
# trailing whitespace
if self.text.compare("insert", "<", "iomark linestart"):
# Check if there's a relevant stdin range -- if so, use it
prev = self.text.tag_prevrange("stdin", "insert")
if prev and self.text.compare("insert", "<", prev[1]):
self.recall(self.text.get(prev[0], prev[1]), event)
return "break"
next = self.text.tag_nextrange("stdin", "insert")
if next and self.text.compare("insert lineend", ">=", next[0]):
self.recall(self.text.get(next[0], next[1]), event)
return "break"
# No stdin mark -- just get the current line, less any prompt
indices = self.text.tag_nextrange("console", "insert linestart")
if indices and \
self.text.compare(indices[0], "<=", "insert linestart"):
self.recall(self.text.get(indices[1], "insert lineend"), event)
else:
self.recall(self.text.get("insert linestart", "insert lineend"), event)
return "break"
# If we're between the beginning of the line and the iomark, i.e.
# in the prompt area, move to the end of the prompt
if self.text.compare("insert", "<", "iomark"):
self.text.mark_set("insert", "iomark")
# If we're in the current input and there's only whitespace
# beyond the cursor, erase that whitespace first
s = self.text.get("insert", "end-1c")
if s and not s.strip():
self.text.delete("insert", "end-1c")
# If we're in the current input before its last line,
# insert a newline right at the insert point
if self.text.compare("insert", "<", "end-1c linestart"):
self.newline_and_indent_event(event)
return "break"
# We're in the last line; append a newline and submit it
self.text.mark_set("insert", "end-1c")
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
self.text.tag_add("stdin", "iomark", "end-1c")
self.text.update_idletasks()
if self.reading:
self.top.quit() # Break out of recursive mainloop()
else:
self.runit()
return "break"
def recall(self, s, event):
# remove leading and trailing empty or whitespace lines
s = re.sub(r'^\s*\n', '' , s)
s = re.sub(r'\n\s*$', '', s)
lines = s.split('\n')
self.text.undo_block_start()
try:
self.text.tag_remove("sel", "1.0", "end")
self.text.mark_set("insert", "end-1c")
prefix = self.text.get("insert linestart", "insert")
if prefix.rstrip().endswith(':'):
self.newline_and_indent_event(event)
prefix = self.text.get("insert linestart", "insert")
self.text.insert("insert", lines[0].strip())
if len(lines) > 1:
orig_base_indent = re.search(r'^([ \t]*)', lines[0]).group(0)
new_base_indent = re.search(r'^([ \t]*)', prefix).group(0)
for line in lines[1:]:
if line.startswith(orig_base_indent):
# replace orig base indentation with new indentation
line = new_base_indent + line[len(orig_base_indent):]
self.text.insert('insert', '\n'+line.rstrip())
finally:
self.text.see("insert")
self.text.undo_block_stop()
def runit(self):
line = self.text.get("iomark", "end-1c")
# Strip off last newline and surrounding whitespace.
# (To allow you to hit return twice to end a statement.)
i = len(line)
while i > 0 and line[i-1] in " \t":
i = i-1
if i > 0 and line[i-1] == "\n":
i = i-1
while i > 0 and line[i-1] in " \t":
i = i-1
line = line[:i]
self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
tkMessageBox.showerror("No stack trace",
"There is no stack trace yet.\n"
"(sys.last_traceback is not defined)",
parent=self.text)
return
from idlelib.stackviewer import StackBrowser
StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
self.text.see("iomark")
self.text.see("restart")
def restart_shell(self, event=None):
"Callback for Run/Restart Shell Cntl-F6"
self.interp.restart_subprocess(with_cwd=True)
def showprompt(self):
self.resetoutput()
self.console.write(self.prompt)
self.text.mark_set("insert", "end-1c")
self.set_line_and_column()
self.io.reset_undo()
def show_warning(self, msg):
width = self.interp.tkconsole.width
wrapper = TextWrapper(width=width, tabsize=8, expand_tabs=True)
wrapped_msg = '\n'.join(wrapper.wrap(msg))
if not wrapped_msg.endswith('\n'):
wrapped_msg += '\n'
self.per.bottom.insert("iomark linestart", wrapped_msg, "stderr")
def resetoutput(self):
source = self.text.get("iomark", "end-1c")
if self.history:
self.history.store(source)
if self.text.get("end-2c") != "\n":
self.text.insert("end-1c", "\n")
self.text.mark_set("iomark", "end-1c")
self.set_line_and_column()
self.ctip.remove_calltip_window()
def write(self, s, tags=()):
try:
self.text.mark_gravity("iomark", "right")
count = OutputWindow.write(self, s, tags, "iomark")
self.text.mark_gravity("iomark", "left")
except:
raise ###pass # ### 11Aug07 KBK if we are expecting exceptions
# let's find out what they are and be specific.
if self.canceled:
self.canceled = False
if not use_subprocess:
raise KeyboardInterrupt
return count
def rmenu_check_cut(self):
try:
if self.text.compare('sel.first', '<', 'iomark'):
return 'disabled'
except TclError: # no selection, so the index 'sel.first' doesn't exist
return 'disabled'
return super().rmenu_check_cut()
def rmenu_check_paste(self):
if self.text.compare('insert','<','iomark'):
return 'disabled'
return super().rmenu_check_paste()
def fix_x11_paste(root):
"Make paste replace selection on x11. See issue #5124."
if root._windowingsystem == 'x11':
for cls in 'Text', 'Entry', 'Spinbox':
root.bind_class(
cls,
'<<Paste>>',
'catch {%W delete sel.first sel.last}\n' +
root.bind_class(cls, '<<Paste>>'))
usage_msg = """\
USAGE: idle [-deins] [-t title] [file]*
idle [-dns] [-t title] (-c cmd | -r file) [arg]*
idle [-dns] [-t title] - [arg]*
-h print this help message and exit
-n run IDLE without a subprocess (DEPRECATED,
see Help/IDLE Help for details)
The following options will override the IDLE 'settings' configuration:
-e open an edit window
-i open a shell window
The following options imply -i and will open a shell:
-c cmd run the command in a shell, or
-r file run script from file
-d enable the debugger
-s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
-t title set title of shell window
A default edit window will be bypassed when -c, -r, or - are used.
[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
Examples:
idle
Open an edit window or shell depending on IDLE's configuration.
idle foo.py foobar.py
Edit the files, also open a shell if configured to start with shell.
idle -est "Baz" foo.py
Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
window with the title "Baz".
idle -c "import sys; print(sys.argv)" "foo"
Open a shell window and run the command, passing "-c" in sys.argv[0]
and "foo" in sys.argv[1].
idle -d -s -r foo.py "Hello World"
Open a shell window, run a startup script, enable the debugger, and
run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
sys.argv[1].
echo "import sys; print(sys.argv)" | idle - "foobar"
Open a shell window, run the script piped in, passing '' in sys.argv[0]
and "foobar" in sys.argv[1].
"""
def main():
import getopt
from platform import system
from idlelib import testing # bool value
from idlelib import macosx
global flist, root, use_subprocess
capture_warnings(True)
use_subprocess = True
enable_shell = False
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
except getopt.error as msg:
print("Error: %s\n%s" % (msg, usage_msg), file=sys.stderr)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
print(" Warning: running IDLE without a subprocess is deprecated.",
file=sys.stderr)
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print("No script file: ", script)
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
# process sys.argv and sys.path:
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [''] + args[1:]
elif cmd:
sys.argv = ['-c'] + args
elif script:
sys.argv = [script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if not dir in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if dir not in sys.path:
sys.path.insert(0, dir)
# check the IDLE settings configuration (but command line overrides)
edit_start = idleConf.GetOption('main', 'General',
'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
enable_shell = enable_shell or not enable_edit
# Setup root. Don't break user code run in IDLE process.
# Don't change environment when testing.
if use_subprocess and not testing:
NoDefaultRoot()
root = Tk(className="Idle")
root.withdraw()
from idlelib.run import fix_scaling
fix_scaling(root)
# set application icon
icondir = os.path.join(os.path.dirname(__file__), 'Icons')
if system() == 'Windows':
iconfile = os.path.join(icondir, 'idle.ico')
root.wm_iconbitmap(default=iconfile)
elif not macosx.isAquaTk():
if TkVersion >= 8.6:
ext = '.png'
sizes = (16, 32, 48, 256)
else:
ext = '.gif'
sizes = (16, 32, 48)
iconfiles = [os.path.join(icondir, 'idle_%d%s' % (size, ext))
for size in sizes]
icons = [PhotoImage(master=root, file=iconfile)
for iconfile in iconfiles]
root.wm_iconphoto(True, *icons)
# start editor and/or shell windows:
fixwordbreaks(root)
fix_x11_paste(root)
flist = PyShellFileList(root)
macosx.setupApp(root, flist)
if enable_edit:
if not (cmd or script):
for filename in args[:]:
if flist.open(filename) is None:
# filename is a directory actually, disconsider it
args.remove(filename)
if not args:
flist.new()
if enable_shell:
shell = flist.open_shell()
if not shell:
return # couldn't open shell
if macosx.isAquaTk() and flist.dict:
# On OSX: when the user has double-clicked on a file that causes
# IDLE to be launched the shell window will open just in front of
# the file she wants to see. Lower the interpreter window when
# there are open files.
shell.top.lower()
else:
shell = flist.pyshell
# Handle remaining options. If any of these are set, enable_shell
# was set also, so shell must be true to reach here.
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get("IDLESTARTUP") or \
os.environ.get("PYTHONSTARTUP")
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if cmd or script:
shell.interp.runcommand("""if 1:
import sys as _sys
_sys.argv = %r
del _sys
\n""" % (sys.argv,))
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
elif shell:
# If there is a shell window and no cmd or script in progress,
# check for problematic issues and print warning message(s) in
# the IDLE shell window; this is less intrusive than always
# opening a separate window.
# Warn if using a problematic OS X Tk version.
tkversionwarning = macosx.tkVersionWarning(root)
if tkversionwarning:
shell.show_warning(tkversionwarning)
# Warn if the "Prefer tabs when opening documents" system
# preference is set to "Always".
prefer_tabs_preference_warning = macosx.preferTabsPreferenceWarning()
if prefer_tabs_preference_warning:
shell.show_warning(prefer_tabs_preference_warning)
while flist.inversedict: # keep IDLE running while files are open.
root.mainloop()
root.destroy()
capture_warnings(False)
if __name__ == "__main__":
main()
capture_warnings(False) # Make sure turned off; see issue 18081
|
bug_state.py | """
This script goes over lists of bugs per predefined bugzilla query and outputs
a CSV with data to be digested elsewhere.
"""
from os import path
from os import remove
from sys import argv
from threading import Thread
import bug_state_statistics
import update_sheet
import common_functions
from bug_state_functions import get_totals
import bug_state_data
import common_data
if '--help' in argv:
print("{}".format(bug_state_data.HELP))
exit(0)
# Setting a default name for the CSV file.
LOG_FILE = common_functions.get_log_name(argv, 'bug_state.csv')
if path.isfile(LOG_FILE):
remove(LOG_FILE)
# These lists are globals for THREADS and RESULTS and need to have fixed size.
THREADS = [None] * len(common_data.DFGS) * len(common_data.VERSIONS)
RESULTS = [None] * len(common_data.DFGS) * len(common_data.VERSIONS)
THREAD_INDEX = 0
for dfg in common_data.DFGS:
for version in common_data.VERSIONS:
STATS = bug_state_statistics.BugStatistics(
version, dfg, RESULTS, THREAD_INDEX)
THREADS[THREAD_INDEX] = Thread(target=STATS.main)
THREADS[THREAD_INDEX].daemon = True
print("Starting thread for {} in {}".format(dfg, version[0]))
THREADS[THREAD_INDEX].start()
THREAD_INDEX += 1
print("Waiting for threads to finish.")
for index in range(len(THREADS)):
THREADS[index].join()
print("Writing to {}".format(LOG_FILE))
LOG = open("{}".format(LOG_FILE), "a")
LOG.write("{}\n".format(bug_state_data.HEADERS))
LOG.write("".join(RESULTS))
LOG.close()
for version in common_data.VERSIONS:
LOG = open("{}".format(LOG_FILE), "a")
TOTALS = get_totals(LOG_FILE, version[0])
LOG.write("Total averages, {},{}\n".format(version[0], TOTALS))
LOG.write("\n{}\n".format(common_functions.get_time_now()))
LOG.close()
UPDATE = update_sheet.UpdateSheet(
bug_state_data.SHEET,
common_data.API_SECRET,
common_data.API_TOKEN,
LOG_FILE,
common_data.RANGE,
)
UPDATE()
# Finally
print("DONE!")
|
stim_server_client.py | # Author: Mainak Jas <mainak@neuro.hut.fi>
# License: BSD (3-clause)
import queue
import time
import socket
import socketserver
import threading
import numpy as np
from ..utils import logger, verbose, fill_doc, deprecated
RT_MSG = ('The realtime module is being deprecated from `mne-python` '
'and moved to its own package, `mne-realtime`. '
'To install, please use `$ pip install mne_realtime`.')
class _ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
"""Create a threaded TCP server.
Parameters
----------
server_address : str
Address on which server is listening
request_handler_class : subclass of BaseRequestHandler
_TriggerHandler which defines the handle method
stim_server : instance of StimServer
object of StimServer class
"""
def __init__(self, server_address, request_handler_class,
stim_server): # noqa: D102
# Basically, this server is the same as a normal TCPServer class
# except that it has an additional attribute stim_server
# Create the server and bind it to the desired server address
socketserver.TCPServer.__init__(self, server_address,
request_handler_class,
False)
self.stim_server = stim_server
class _TriggerHandler(socketserver.BaseRequestHandler):
"""Request handler on the server side."""
def handle(self):
"""Handle requests on the server side."""
self.request.settimeout(None)
while self.server.stim_server._running:
data = self.request.recv(1024) # clip input at 1Kb
data = data.decode() # need to turn it into a string (Py3k)
if data == 'add client':
# Add stim_server._client
client_id = self.server.stim_server \
._add_client(self.client_address[0],
self)
# Instantiate queue for communication between threads
# Note: new queue for each handler
if not hasattr(self, '_tx_queue'):
self._tx_queue = queue.Queue()
self.request.sendall("Client added".encode('utf-8'))
# Mark the client as running
for client in self.server.stim_server._clients:
if client['id'] == client_id:
client['running'] = True
elif data == 'get trigger':
# Pop triggers and send them
if (self._tx_queue.qsize() > 0 and
self.server.stim_server, '_clients'):
trigger = self._tx_queue.get()
self.request.sendall(str(trigger).encode('utf-8'))
else:
self.request.sendall("Empty".encode('utf-8'))
@deprecated(RT_MSG)
class StimServer(object):
"""Stimulation Server.
Server to communicate with StimClient(s).
Parameters
----------
port : int
The port to which the stimulation server must bind to.
n_clients : int
The number of clients which will connect to the server.
See Also
--------
StimClient
"""
def __init__(self, port=4218, n_clients=1): # noqa: D102
# Start a threaded TCP server, binding to localhost on specified port
self._data = _ThreadedTCPServer(('', port),
_TriggerHandler, self)
self.n_clients = n_clients
def __enter__(self): # noqa: D105
# This is done to avoid "[Errno 98] Address already in use"
self._data.allow_reuse_address = True
self._data.server_bind()
self._data.server_activate()
# Start a thread for the server
self._thread = threading.Thread(target=self._data.serve_forever)
# Ctrl-C will cleanly kill all spawned threads
# Once the main thread exits, other threads will exit
self._thread.daemon = True
self._thread.start()
self._running = False
self._clients = list()
return self
def __exit__(self, type, value, traceback): # noqa: D105
self.shutdown()
@verbose
def start(self, timeout=np.inf, verbose=None):
"""Start the server.
Parameters
----------
timeout : float
Maximum time to wait for clients to be added.
%(verbose)s
"""
# Start server
if not self._running:
logger.info('RtServer: Start')
self._running = True
start_time = time.time() # init delay counter.
# wait till n_clients are added
while (len(self._clients) < self.n_clients):
current_time = time.time()
if (current_time > start_time + timeout):
raise StopIteration
time.sleep(0.1)
@verbose
def _add_client(self, ip, sock, verbose=None):
"""Add client.
Parameters
----------
ip : str
IP address of the client.
sock : instance of socket.socket
The client socket.
%(verbose)s
"""
logger.info("Adding client with ip = %s" % ip)
client = dict(ip=ip, id=len(self._clients), running=False, socket=sock)
self._clients.append(client)
return client['id']
@verbose
def shutdown(self, verbose=None):
"""Shutdown the client and server.
Parameters
----------
%(verbose)s
"""
logger.info("Shutting down ...")
# stop running all the clients
if hasattr(self, '_clients'):
for client in self._clients:
client['running'] = False
self._running = False
self._data.shutdown()
self._data.server_close()
self._data.socket.close()
@verbose
def add_trigger(self, trigger, verbose=None):
"""Add a trigger.
Parameters
----------
trigger : int
The trigger to be added to the queue for sending to StimClient.
%(verbose_meth)s
See Also
--------
StimClient.get_trigger
"""
for client in self._clients:
client_id = client['id']
logger.info("Sending trigger %d to client %d"
% (trigger, client_id))
client['socket']._tx_queue.put(trigger)
@fill_doc
@deprecated(RT_MSG)
class StimClient(object):
"""Stimulation Client.
Client to communicate with StimServer
Parameters
----------
host : str
Hostname (or IP address) of the host where StimServer is running.
port : int
Port to use for the connection.
timeout : float
Communication timeout in seconds.
%(verbose)s
See Also
--------
StimServer
"""
@verbose
def __init__(self, host, port=4218, timeout=5.0,
verbose=None): # noqa: D102
try:
logger.info("Setting up client socket")
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.settimeout(timeout)
self._sock.connect((host, port))
logger.info("Establishing connection with server")
data = "add client".encode('utf-8')
n_sent = self._sock.send(data)
if n_sent != len(data):
raise RuntimeError('Could not communicate with server')
resp = self._sock.recv(1024).decode() # turn bytes into str (Py3k)
if resp == 'Client added':
logger.info("Connection established")
else:
raise RuntimeError('Client not added')
except Exception:
raise RuntimeError('Setting up acquisition <-> stimulation '
'computer connection (host: %s '
'port: %d) failed. Make sure StimServer '
'is running.' % (host, port))
def close(self):
"""Close the socket object."""
self._sock.close()
@verbose
def get_trigger(self, timeout=5.0, verbose=None):
"""Get triggers from StimServer.
Parameters
----------
timeout : float
maximum time to wait for a valid trigger from the server
%(verbose_meth)s
See Also
--------
StimServer.add_trigger
"""
start_time = time.time() # init delay counter. Will stop iterations
while True:
try:
current_time = time.time()
# Raise timeout error
if current_time > (start_time + timeout):
logger.info("received nothing")
return None
self._sock.send("get trigger".encode('utf-8'))
trigger = self._sock.recv(1024)
if trigger != 'Empty':
logger.info("received trigger %s" % str(trigger))
return int(trigger)
except RuntimeError as err:
logger.info('Cannot receive triggers: %s' % (err))
|
wallet_multiwallet.py | #!/usr/bin/env python3
# Copyright (c) 2017-2020 The Mantle Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiwallet.
Verify that a mantled node can load multiple wallet files
"""
from decimal import Decimal
from threading import Thread
import os
import shutil
import stat
import time
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import MantleTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
get_rpc_proxy,
)
got_loading_error = False
def test_load_unload(node, name):
global got_loading_error
for _ in range(10):
if got_loading_error:
return
try:
node.loadwallet(name)
node.unloadwallet(name)
except JSONRPCException as e:
if e.error['code'] == -4 and 'Wallet already being loading' in e.error['message']:
got_loading_error = True
return
class MultiWalletTest(MantleTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.rpc_timeout = 120
self.extra_args = [["-nowallet"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument(
'--data_wallets_dir',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/wallets/'),
help='Test data with wallet directories (default: %(default)s)',
)
def run_test(self):
node = self.nodes[0]
data_dir = lambda *p: os.path.join(node.datadir, self.chain, *p)
wallet_dir = lambda *p: data_dir('wallets', *p)
wallet = lambda name: node.get_wallet_rpc(name)
def wallet_file(name):
if name == self.default_wallet_name:
return wallet_dir(self.default_wallet_name, self.wallet_data_filename)
if os.path.isdir(wallet_dir(name)):
return wallet_dir(name, "wallet.dat")
return wallet_dir(name)
assert_equal(self.nodes[0].listwalletdir(), { 'wallets': [{ 'name': self.default_wallet_name }] })
# check wallet.dat is created
self.stop_nodes()
assert_equal(os.path.isfile(wallet_dir(self.default_wallet_name, self.wallet_data_filename)), True)
# create symlink to verify wallet directory path can be referenced
# through symlink
os.mkdir(wallet_dir('w7'))
os.symlink('w7', wallet_dir('w7_symlink'))
os.symlink('..', wallet_dir('recursive_dir_symlink'))
os.mkdir(wallet_dir('self_walletdat_symlink'))
os.symlink('wallet.dat', wallet_dir('self_walletdat_symlink/wallet.dat'))
# rename wallet.dat to make sure plain wallet file paths (as opposed to
# directory paths) can be loaded
# create another dummy wallet for use in testing backups later
self.start_node(0)
node.createwallet("empty")
node.createwallet("plain")
node.createwallet("created")
self.stop_nodes()
empty_wallet = os.path.join(self.options.tmpdir, 'empty.dat')
os.rename(wallet_file("empty"), empty_wallet)
shutil.rmtree(wallet_dir("empty"))
empty_created_wallet = os.path.join(self.options.tmpdir, 'empty.created.dat')
os.rename(wallet_dir("created", self.wallet_data_filename), empty_created_wallet)
shutil.rmtree(wallet_dir("created"))
os.rename(wallet_file("plain"), wallet_dir("w8"))
shutil.rmtree(wallet_dir("plain"))
# restart node with a mix of wallet names:
# w1, w2, w3 - to verify new wallets created when non-existing paths specified
# w - to verify wallet name matching works when one wallet path is prefix of another
# sub/w5 - to verify relative wallet path is created correctly
# extern/w6 - to verify absolute wallet path is created correctly
# w7_symlink - to verify symlinked wallet path is initialized correctly
# w8 - to verify existing wallet file is loaded correctly. Not tested for SQLite wallets as this is a deprecated BDB behavior.
# '' - to verify default wallet file is created correctly
to_create = ['w1', 'w2', 'w3', 'w', 'sub/w5', 'w7_symlink']
in_wallet_dir = [w.replace('/', os.path.sep) for w in to_create] # Wallets in the wallet dir
in_wallet_dir.append('w7') # w7 is not loaded or created, but will be listed by listwalletdir because w7_symlink
to_create.append(os.path.join(self.options.tmpdir, 'extern/w6')) # External, not in the wallet dir, so we need to avoid adding it to in_wallet_dir
to_load = [self.default_wallet_name]
if not self.options.descriptors:
to_load.append('w8')
wallet_names = to_create + to_load # Wallet names loaded in the wallet
in_wallet_dir += to_load # The loaded wallets are also in the wallet dir
self.start_node(0)
for wallet_name in to_create:
self.nodes[0].createwallet(wallet_name)
for wallet_name in to_load:
self.nodes[0].loadwallet(wallet_name)
os.mkdir(wallet_dir('no_access'))
os.chmod(wallet_dir('no_access'), 0)
try:
with self.nodes[0].assert_debug_log(expected_msgs=['Too many levels of symbolic links', 'Error scanning']):
walletlist = self.nodes[0].listwalletdir()['wallets']
finally:
# Need to ensure access is restored for cleanup
os.chmod(wallet_dir('no_access'), stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
assert_equal(sorted(map(lambda w: w['name'], walletlist)), sorted(in_wallet_dir))
assert_equal(set(node.listwallets()), set(wallet_names))
# should raise rpc error if wallet path can't be created
err_code = -4 if self.options.descriptors else -1
assert_raises_rpc_error(err_code, "boost::filesystem::create_directory:", self.nodes[0].createwallet, "w8/bad")
# check that all requested wallets were created
self.stop_node(0)
for wallet_name in wallet_names:
assert_equal(os.path.isfile(wallet_file(wallet_name)), True)
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" does not exist')
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" is a relative path', cwd=data_dir())
self.nodes[0].assert_start_raises_init_error(['-walletdir=debug.log'], 'Error: Specified -walletdir "debug.log" is not a directory', cwd=data_dir())
self.start_node(0, ['-wallet=w1', '-wallet=w1'])
self.stop_node(0, 'Warning: Ignoring duplicate -wallet w1.')
if not self.options.descriptors:
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# should not initialize if one wallet is a copy of another
shutil.copyfile(wallet_dir('w8'), wallet_dir('w8_copy'))
in_wallet_dir.append('w8_copy')
exp_stderr = r"BerkeleyDatabase: Can't open database w8_copy \(duplicates fileid \w+ from w8\)"
self.nodes[0].assert_start_raises_init_error(['-wallet=w8', '-wallet=w8_copy'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
# should not initialize if wallet file is a symlink
os.symlink('w8', wallet_dir('w8_symlink'))
self.nodes[0].assert_start_raises_init_error(['-wallet=w8_symlink'], r'Error: Invalid -wallet path \'w8_symlink\'\. .*', match=ErrorMatch.FULL_REGEX)
# should not initialize if the specified walletdir does not exist
self.nodes[0].assert_start_raises_init_error(['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist')
# should not initialize if the specified walletdir is not a directory
not_a_dir = wallet_dir('notadir')
open(not_a_dir, 'a', encoding="utf8").close()
self.nodes[0].assert_start_raises_init_error(['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory')
self.log.info("Do not allow -upgradewallet with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-upgradewallet'], "Error: Error parsing command line arguments: Invalid parameter -upgradewallet")
# if wallets/ doesn't exist, datadir should be the default wallet dir
wallet_dir2 = data_dir('walletdir')
os.rename(wallet_dir(), wallet_dir2)
self.start_node(0)
self.nodes[0].createwallet("w4")
self.nodes[0].createwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
node.generatetoaddress(nblocks=1, address=w5.getnewaddress())
# now if wallets/ exists again, but the rootdir is specified as the walletdir, w4 and w5 should still be loaded
os.rename(wallet_dir2, wallet_dir())
self.restart_node(0, ['-nowallet', '-walletdir=' + data_dir()])
self.nodes[0].loadwallet("w4")
self.nodes[0].loadwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
w5_info = w5.getwalletinfo()
assert_equal(w5_info['immature_balance'], 50)
competing_wallet_dir = os.path.join(self.options.tmpdir, 'competing_walletdir')
os.mkdir(competing_wallet_dir)
self.restart_node(0, ['-nowallet', '-walletdir=' + competing_wallet_dir])
self.nodes[0].createwallet(self.default_wallet_name)
if self.options.descriptors:
exp_stderr = r"Error: SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another mantled?"
else:
exp_stderr = r"Error: Error initializing wallet database environment \"\S+competing_walletdir\S*\"!"
self.nodes[1].assert_start_raises_init_error(['-walletdir=' + competing_wallet_dir], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
self.restart_node(0)
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
wallets = [wallet(w) for w in wallet_names]
wallet_bad = wallet("bad")
# check wallet names and balances
node.generatetoaddress(nblocks=1, address=wallets[0].getnewaddress())
for wallet_name, wallet in zip(wallet_names, wallets):
info = wallet.getwalletinfo()
assert_equal(info['immature_balance'], 50 if wallet is wallets[0] else 0)
assert_equal(info['walletname'], wallet_name)
# accessing invalid wallet fails
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo)
# accessing wallet RPC without using wallet endpoint fails
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w1, w2, w3, w4, *_ = wallets
node.generatetoaddress(nblocks=101, address=w1.getnewaddress())
assert_equal(w1.getbalance(), 100)
assert_equal(w2.getbalance(), 0)
assert_equal(w3.getbalance(), 0)
assert_equal(w4.getbalance(), 0)
w1.sendtoaddress(w2.getnewaddress(), 1)
w1.sendtoaddress(w3.getnewaddress(), 2)
w1.sendtoaddress(w4.getnewaddress(), 3)
node.generatetoaddress(nblocks=1, address=w1.getnewaddress())
assert_equal(w2.getbalance(), 1)
assert_equal(w3.getbalance(), 2)
assert_equal(w4.getbalance(), 3)
batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()])
assert_equal(batch[0]["result"]["chain"], self.chain)
assert_equal(batch[1]["result"]["walletname"], "w1")
self.log.info('Check for per-wallet settxfee call')
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], 0)
w2.settxfee(0.001)
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], Decimal('0.00100000'))
self.log.info("Test dynamic wallet loading")
self.restart_node(0, ['-nowallet'])
assert_equal(node.listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", node.getwalletinfo)
self.log.info("Load first wallet")
loadwallet_name = node.loadwallet(wallet_names[0])
assert_equal(loadwallet_name['name'], wallet_names[0])
assert_equal(node.listwallets(), wallet_names[0:1])
node.getwalletinfo()
w1 = node.get_wallet_rpc(wallet_names[0])
w1.getwalletinfo()
self.log.info("Load second wallet")
loadwallet_name = node.loadwallet(wallet_names[1])
assert_equal(loadwallet_name['name'], wallet_names[1])
assert_equal(node.listwallets(), wallet_names[0:2])
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w2 = node.get_wallet_rpc(wallet_names[1])
w2.getwalletinfo()
self.log.info("Concurrent wallet loading")
threads = []
for _ in range(3):
n = node.cli if self.options.usecli else get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
t = Thread(target=test_load_unload, args=(n, wallet_names[2], ))
t.start()
threads.append(t)
for t in threads:
t.join()
global got_loading_error
assert_equal(got_loading_error, True)
self.log.info("Load remaining wallets")
for wallet_name in wallet_names[2:]:
loadwallet_name = self.nodes[0].loadwallet(wallet_name)
assert_equal(loadwallet_name['name'], wallet_name)
assert_equal(set(self.nodes[0].listwallets()), set(wallet_names))
# Fail to load if wallet doesn't exist
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallets")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Path does not exist.".format(path), self.nodes[0].loadwallet, 'wallets')
# Fail to load duplicate wallets
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w1", "wallet.dat")
if self.options.descriptors:
assert_raises_rpc_error(-4, "Wallet file verification failed. SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another mantled?", self.nodes[0].loadwallet, wallet_names[0])
else:
assert_raises_rpc_error(-4, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, wallet_names[0])
# This tests the default wallet that BDB makes, so SQLite wallet doesn't need to test this
# Fail to load duplicate wallets by different ways (directory and filepath)
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallet.dat")
assert_raises_rpc_error(-4, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, 'wallet.dat')
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# Fail to load if one wallet is a copy of another
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if one wallet is a copy of another, test this twice to make sure that we don't re-introduce #14304
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if wallet file is a symlink
assert_raises_rpc_error(-4, "Wallet file verification failed. Invalid -wallet path 'w8_symlink'", self.nodes[0].loadwallet, 'w8_symlink')
# Fail to load if a directory is specified that doesn't contain a wallet
os.mkdir(wallet_dir('empty_wallet_dir'))
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "empty_wallet_dir")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Data is not in recognized format.".format(path), self.nodes[0].loadwallet, 'empty_wallet_dir')
self.log.info("Test dynamic wallet creation.")
# Fail to create a wallet if it already exists.
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w2")
assert_raises_rpc_error(-4, "Failed to create database path '{}'. Database already exists.".format(path), self.nodes[0].createwallet, 'w2')
# Successfully create a wallet with a new name
loadwallet_name = self.nodes[0].createwallet('w9')
in_wallet_dir.append('w9')
assert_equal(loadwallet_name['name'], 'w9')
w9 = node.get_wallet_rpc('w9')
assert_equal(w9.getwalletinfo()['walletname'], 'w9')
assert 'w9' in self.nodes[0].listwallets()
# Successfully create a wallet using a full path
new_wallet_dir = os.path.join(self.options.tmpdir, 'new_walletdir')
new_wallet_name = os.path.join(new_wallet_dir, 'w10')
loadwallet_name = self.nodes[0].createwallet(new_wallet_name)
assert_equal(loadwallet_name['name'], new_wallet_name)
w10 = node.get_wallet_rpc(new_wallet_name)
assert_equal(w10.getwalletinfo()['walletname'], new_wallet_name)
assert new_wallet_name in self.nodes[0].listwallets()
self.log.info("Test dynamic wallet unloading")
# Test `unloadwallet` errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].unloadwallet)
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", self.nodes[0].unloadwallet, "dummy")
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", node.get_wallet_rpc("dummy").unloadwallet)
assert_raises_rpc_error(-8, "Both the RPC endpoint wallet and wallet_name parameter were provided (only one allowed)", w1.unloadwallet, "w2"),
assert_raises_rpc_error(-8, "Both the RPC endpoint wallet and wallet_name parameter were provided (only one allowed)", w1.unloadwallet, "w1"),
# Successfully unload the specified wallet name
self.nodes[0].unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Successfully unload the wallet referenced by the request endpoint
# Also ensure unload works during walletpassphrase timeout
w2.encryptwallet('test')
w2.walletpassphrase('test', 1)
w2.unloadwallet()
time.sleep(1.1)
assert 'w2' not in self.nodes[0].listwallets()
# Successfully unload all wallets
for wallet_name in self.nodes[0].listwallets():
self.nodes[0].unloadwallet(wallet_name)
assert_equal(self.nodes[0].listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", self.nodes[0].getwalletinfo)
# Successfully load a previously unloaded wallet
self.nodes[0].loadwallet('w1')
assert_equal(self.nodes[0].listwallets(), ['w1'])
assert_equal(w1.getwalletinfo()['walletname'], 'w1')
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
# Test backing up and restoring wallets
self.log.info("Test wallet backup")
self.restart_node(0, ['-nowallet'])
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
for wallet_name in wallet_names:
rpc = self.nodes[0].get_wallet_rpc(wallet_name)
addr = rpc.getnewaddress()
backup = os.path.join(self.options.tmpdir, 'backup.dat')
if os.path.exists(backup):
os.unlink(backup)
rpc.backupwallet(backup)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(empty_created_wallet if wallet_name == self.default_wallet_name else empty_wallet, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], False)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(backup, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], True)
# Test .walletlock file is closed
self.start_node(1)
wallet = os.path.join(self.options.tmpdir, 'my_wallet')
self.nodes[0].createwallet(wallet)
if self.options.descriptors:
assert_raises_rpc_error(-4, "Unable to obtain an exclusive lock", self.nodes[1].loadwallet, wallet)
else:
assert_raises_rpc_error(-4, "Error initializing wallet database environment", self.nodes[1].loadwallet, wallet)
self.nodes[0].unloadwallet(wallet)
self.nodes[1].loadwallet(wallet)
if __name__ == '__main__':
MultiWalletTest().main()
|
map_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.map()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from collections import namedtuple
import threading
import time
import warnings
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.experimental.ops import threading_options
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_concat_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
def _test_combinations_with_mode_v1(mode):
def new_map_fn(dataset, *args, **kwargs):
return dataset.map(*args, **kwargs)
def legacy_map_fn(dataset, *args, **kwargs):
return dataset.map_with_legacy_function(*args, **kwargs)
new_map_combinations = combinations.combine(
tf_api_version=1,
mode=mode,
apply_map=combinations.NamedObject("map_fn", new_map_fn))
legacy_map_combinations = combinations.combine(
tf_api_version=1,
mode=mode,
apply_map=combinations.NamedObject("legacy_map_fn", legacy_map_fn))
return new_map_combinations + legacy_map_combinations
def _test_combinations_with_mode_v2(mode):
def new_map_fn(dataset, *args, **kwargs):
return dataset.map(*args, **kwargs)
return combinations.combine(
tf_api_version=2,
mode=mode,
apply_map=combinations.NamedObject("map_fn", new_map_fn))
def _test_combinations_with_mode(mode):
return _test_combinations_with_mode_v1(
mode) + _test_combinations_with_mode_v2(mode)
def _test_combinations():
return _test_combinations_with_mode("eager") + _test_combinations_with_mode(
"graph")
def _short_circuit_test_cases():
cases = [
("Identity", None, lambda x: x),
("Replicate", None, lambda x: (x, x)),
("Swap", (None, None), lambda x, y: (y, x)),
("Project", (None, None), lambda x, y: x)
]
def reduce_fn(x, y):
name, structure, fn = y
return x + combinations.combine(
structure=structure, fn=combinations.NamedObject(name, fn))
return functools.reduce(reduce_fn, cases, [])
def _make_coordinated_sloppy_dataset(apply_map, num_elements,
num_parallel_calls):
"""Produces a dataset iterator and events to control the order of elements.
Args:
apply_map: method that applies the `map` transformation
num_elements: the number of input elements
num_parallel_calls: the degree of map parallelism
Returns:
A dataset iterator (represented as `get_next` op) and events that can be
used to control the order of output elements.
"""
# Set up threading events used to sequence when items are produced that
# are subsequently interleaved. These events allow us to deterministically
# simulate slowdowns and force sloppiness.
coordination_events = {i: threading.Event() for i in range(num_elements)}
def map_py_fn(x):
coordination_events[x].wait()
coordination_events[x].clear()
return x * x
def fn(x):
return script_ops.py_func(map_py_fn, [x], x.dtype)
options = dataset_ops.Options()
options.experimental_deterministic = False
dataset = dataset_ops.Dataset.range(num_elements)
dataset = apply_map(dataset, fn, num_parallel_calls).with_options(options)
return dataset, coordination_events
class Foo(object):
"""Dummy class used for invalid return value tests."""
def __init__(self):
pass
class MapTest(test_base.DatasetTestBase, parameterized.TestCase):
def _map_dataset_factory(self, components, apply_map, count):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
dataset = apply_map(dataset, _map_fn).repeat(count)
self.assertEqual(
[c.shape[1:] for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
return dataset
@combinations.generate(_test_combinations())
def testMapDataset(self, apply_map):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
# Test single-threaded access to the iterator.
get_next = self.getNext(
self._map_dataset_factory(components, apply_map, count=14))
for _ in range(14):
for i in range(7):
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# TODO(b/117581999): add eager coverage
@combinations.generate(_test_combinations_with_mode("graph"))
def testMapDatasetMultiThreaded(self, apply_map):
# Test multi-threaded access to the same iterator.
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
get_next = self.getNext(
self._map_dataset_factory(components, apply_map, count=18))
results = []
with self.cached_session() as sess:
def iterator_thread():
while True:
try:
results.append(sess.run(get_next()))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread) for _ in range(8)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
def _parallel_map_dataset_factory(self, components, apply_map, count,
num_parallel_calls, buffer_size):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
dataset = apply_map(dataset, _map_fn, num_parallel_calls=num_parallel_calls)
dataset = dataset.prefetch(buffer_size).repeat(count)
self.assertEqual(
[c.shape[1:] for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
return dataset
@combinations.generate(
combinations.times(
_test_combinations(),
combinations.combine(num_parallel_calls=1, buffer_size=1) +
combinations.combine(num_parallel_calls=1, buffer_size=2) +
combinations.combine(num_parallel_calls=2, buffer_size=2) +
combinations.combine(num_parallel_calls=2, buffer_size=4) +
combinations.combine(num_parallel_calls=8, buffer_size=8) +
combinations.combine(num_parallel_calls=8, buffer_size=16)))
def testParallelMapDataset(self, apply_map, num_parallel_calls, buffer_size):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> ParallelMapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
# Test single-threaded access to the iterator.
get_next = self.getNext(
self._parallel_map_dataset_factory(components, apply_map, 14,
num_parallel_calls, buffer_size))
for _ in range(14):
for i in range(7):
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# TODO(b/117581999): add eager coverage
@combinations.generate(
combinations.times(
_test_combinations_with_mode("graph"),
combinations.combine(num_parallel_calls=1, buffer_size=1) +
combinations.combine(num_parallel_calls=1, buffer_size=2) +
combinations.combine(num_parallel_calls=2, buffer_size=2) +
combinations.combine(num_parallel_calls=2, buffer_size=4) +
combinations.combine(num_parallel_calls=8, buffer_size=8) +
combinations.combine(num_parallel_calls=8, buffer_size=16)))
def testParallelMapDatasetMultiThreaded(self, apply_map, num_parallel_calls,
buffer_size):
# Test multi-threaded access to the same iterator.
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
get_next = self.getNext(
self._parallel_map_dataset_factory(components, apply_map, 18,
num_parallel_calls, buffer_size))
results = []
with self.cached_session() as sess:
def iterator_thread():
while True:
try:
results.append(sess.run(get_next()))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread) for _ in range(64)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
@combinations.generate(_test_combinations())
def testImplicitDisposeParallelMapDataset(self, apply_map):
# Tests whether a parallel map dataset will be cleaned up correctly when
# the pipeline does not run it until exhaustion.
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(1000).
components = (np.arange(1000),
np.array([[1, 2, 3]]) * np.arange(1000)[:, np.newaxis],
np.array(37.0) * np.arange(1000))
dataset = self._parallel_map_dataset_factory(components, apply_map, 1000,
100, 100)
# NOTE(mrry): Also test that the prefetching thread is cancelled correctly.
dataset = dataset.prefetch(100)
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testParallelMapUnspecifiedOutputSize(self, apply_map):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
dataset = apply_map(
dataset,
lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2)
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testParallelMapError(self, apply_map):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
dataset = apply_map(
dataset,
lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2)
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testPrefetchError(self, apply_map):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
dataset = apply_map(
dataset, lambda x: array_ops.check_numerics(x, "message")).prefetch(2)
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testCaptureIterator(self, apply_map):
def _build_ds(iterator):
def _map_fn(x):
get_next = iterator.get_next()
return x * get_next
return apply_map(dataset_ops.Dataset.range(10), _map_fn)
def _build_graph():
if context.executing_eagerly():
captured_iterator = iter(dataset_ops.Dataset.range(10))
else:
captured_iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(10))
ds = _build_ds(captured_iterator)
return captured_iterator, ds
captured_iter, ds = _build_graph()
if not context.executing_eagerly():
self.evaluate(captured_iter.initializer)
get_next = self.getNext(ds, requires_initialization=True)
for i in range(10):
self.assertEqual(i * i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testCaptureHashTable(self, apply_map):
# NOTE(mrry): We must use the V2 variants of `HashTable`
# etc. because these produce a `tf.resource`-typed output that is
# compatible with the in-graph function implementation.
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
input_sentences = dataset_ops.Dataset.from_tensor_slices(
["brain brain tank salad surgery", "surgery brain"])
dataset = apply_map(input_sentences,
lambda x: string_ops.string_split([x]).values)
dataset = apply_map(dataset, table.lookup)
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(table.initializer)
self.evaluate(get_next())
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# TODO(b/123904513)
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testCaptureQueue(self, apply_map):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(200, dtypes.int64, shapes=[])
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
dataset = dataset_ops.Dataset.from_tensors(0).repeat(-1)
dataset = apply_map(dataset, lambda _: queue.dequeue())
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(enqueue_op)
self.evaluate(close_op)
for element in elements:
self.assertEqual(element, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# TODO(b/117581999): Possible deadlock in eager mode, debug.
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testCaptureSameResourceMultipleTimes(self, apply_map):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
queue_2 = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
dataset = dataset_ops.Dataset.from_tensors(0).repeat(-1)
dataset = apply_map(dataset, lambda _: (queue.dequeue(), queue_2.dequeue()))
self.evaluate(enqueue_op)
self.evaluate(close_op)
get_next = self.getNext(dataset, requires_initialization=True)
for i in range(100):
self.assertCountEqual([elements[i * 2], elements[i * 2 + 1]],
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testSeededStatefulOperatorIsProperlyStateful(self, apply_map):
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
fn = lambda _: random_ops.random_uniform((), seed=11)
dataset = apply_map(dataset, fn).batch(2)
get_next = self.getNext(dataset, requires_initialization=True)
random_values = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values.extend(self.evaluate(get_next()))
self.assertLen(random_values, 10)
self.assertGreater(np.abs(np.diff(random_values)).max(), 1e-6)
get_next = self.getNext(dataset, requires_initialization=True)
random_values_2 = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values_2.extend(self.evaluate(get_next()))
# Randomness is repeatable given same seed
self.assertAllClose(random_values, random_values_2)
@combinations.generate(_test_combinations())
def testStatefulMapKeepsStateAcrossIterators(self, apply_map):
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
fn = lambda _: random_ops.random_uniform((), seed=11)
dataset = apply_map(dataset, fn).repeat(1000).batch(10)
get_next = self.getNext(dataset)
random_values = self.evaluate(get_next())
# Assert that one of the next 99 batches yielded by the iterator is
# different from the first.
i = 0
while i < 99:
if np.any(random_values != self.evaluate(get_next())):
break
i += 1
self.assertLess(i, 99)
@combinations.generate(_test_combinations())
def testStatefulOperationInShortCircuit(self, apply_map):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
def increment_fn(x):
counter_var.assign_add(1)
return x
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, increment_fn)
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(counter_var.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(counter_var))
self.assertEqual(i, self.evaluate(get_next()))
self.assertEqual(10, self.evaluate(counter_var))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertEqual(10, self.evaluate(counter_var))
@combinations.generate(_test_combinations())
def testMapDict(self, apply_map):
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, lambda x: {"foo": x * 2, "bar": x**2})
dataset = apply_map(dataset, lambda d: d["foo"] + d["bar"])
self.assertDatasetProduces(
dataset, expected_output=[i * 2 + i**2 for i in range(10)])
@combinations.generate(_test_combinations())
def testMapNamedtuple(self, apply_map):
# construct dataset of tuples
labels = dataset_ops.Dataset.range(10)
images = apply_map(labels, lambda l: -l)
dataset_tuple = dataset_ops.Dataset.zip((labels, images))
# convert dataset of tuples to dataset of namedtuples
example = namedtuple("Example", ["label", "image"])
dataset_namedtuple = apply_map(dataset_tuple, example)
def preprocess_tuple(label, image):
image = 2 * image
return label, image
def preprocess_namedtuple(example):
return example._replace(image=2 * example.image)
# preprocess both datasets
dataset_tuple = apply_map(dataset_tuple, preprocess_tuple)
dataset_namedtuple = apply_map(dataset_namedtuple, preprocess_namedtuple)
next_tuple = self.getNext(dataset_tuple)
next_namedtuple = self.getNext(dataset_namedtuple)
# make sure both datasets contain the same data
for i in range(10):
tuple_, namedtuple_ = self.evaluate([next_tuple(), next_namedtuple()])
self.assertEqual(tuple_, namedtuple_)
self.assertEqual(tuple_, (i, -2 * i))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_namedtuple())
@combinations.generate(_test_combinations())
def testUseStepContainerInMap(self, apply_map):
row = np.arange(6)
dataset = dataset_ops.Dataset.from_tensors(row)
dataset = apply_map(dataset,
lambda elems: map_fn.map_fn(lambda x: x * x, elems))
self.assertDatasetProduces(dataset, expected_output=[row**2])
@combinations.generate(_test_combinations())
def testCaseAndCondInMap(self, apply_map):
def control_map_fn(x, y):
def multiply():
return x * 2
def divide():
return x // 2
def defaults_two():
return control_flow_ops.cond(
math_ops.equal(math_ops.mod(x, 2), 0),
multiply,
divide,
name="cond_mult")
pred_fn_pairs = [
(math_ops.logical_or(math_ops.equal(y, 2),
math_ops.equal(y, 3)), defaults_two),
]
return control_flow_ops.case(
pred_fn_pairs, default=multiply, exclusive=True)
def build_dataset(row, num):
dataset = dataset_ops.Dataset.from_tensor_slices(row)
return apply_map(dataset, lambda x: control_map_fn(x, num))
row = np.arange(6)
for num in [2, 3, 4]:
get_next = self.getNext(build_dataset(row, num))
for i in range(6):
self.assertEqual(
(i // 2 if i % 2 else i * 2) if (num == 2 or num == 3) else i * 2,
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testCaseInWhileInMap(self, apply_map):
def control_map_fn(x, y):
def multiply():
return x * 2
def divide():
return x // 2
pred_fn_pairs = [
(math_ops.logical_or(math_ops.equal(y, 2),
math_ops.equal(y, 3)), divide),
]
return control_flow_ops.case(
pred_fn_pairs, default=multiply, exclusive=True)
def build_dataset(row, num):
dataset = dataset_ops.Dataset.from_tensors(row)
return apply_map(
dataset,
lambda elems: map_fn.map_fn(lambda x: control_map_fn(x, num), elems))
row = np.arange(6)
for num in [2, 3, 4]:
get_next = self.getNext(build_dataset(row, num))
self.assertAllEqual(
[x // 2 if (num == 2 or num == 3) else x * 2 for x in row],
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testCaseAndCondInWhileInMap(self, apply_map):
def control_map_fn(x, y):
def multiply():
return x * 2
def divide():
return x // 2
def defaults_two():
return control_flow_ops.cond(
math_ops.equal(math_ops.mod(x, 2), 0),
multiply,
divide,
name="cond_mult")
pred_fn_pairs = [
(math_ops.logical_or(math_ops.equal(y, 2),
math_ops.equal(y, 3)), defaults_two),
]
return control_flow_ops.case(
pred_fn_pairs, default=multiply, exclusive=True)
row = np.arange(6)
num = 2
dataset = dataset_ops.Dataset.from_tensors(row)
dataset = apply_map(
dataset,
lambda elems: map_fn.map_fn(lambda x: control_map_fn(x, num), elems))
get_next = self.getNext(dataset)
self.assertAllEqual([(x // 2 if x % 2 else x * 2) if
(num == 2 or num == 3) else x * 2 for x in row],
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testNestedListMapDataset(self, apply_map):
dataset = dataset_ops.Dataset.from_tensors([0, 1, 2]).repeat(10)
dataset = apply_map(dataset, lambda a: ([a[1], a[0] + a[2]], a[1]))
expected_output = [(np.array([1, 2]), 1)] * 10
self.assertDatasetProduces(dataset, expected_output=expected_output)
@combinations.generate(
combinations.times(_test_combinations(),
combinations.combine(buffer_size=[1, 2, 3, 4])))
def testPrefetch(self, apply_map, buffer_size):
# We will use this event to test that `_map_py_func()` has been invoked a
# certain number of times (6 times, to be exact) after consuming fewer
# elements from the iterator.
ev = threading.Event()
set_event_during_invocation = 5
def _map_py_func(x):
if x == set_event_during_invocation:
ev.set()
return x * x
def _map_fn(x):
return script_ops.py_func(_map_py_func, [x], x.dtype)
# We can indirectly observe that varying the buffer size has the intended
# effect by observing when `ev` is set (on the 6th invocation of
# `_map_py_func()`).
# NOTE(mrry): We do not test with `buffer_size ==
# set_event_during_invocation`, because we must consume at least one element
# to start the prefetching.
dataset = dataset_ops.Dataset.range(100)
dataset = apply_map(dataset, _map_fn).prefetch(buffer_size)
get_next = self.getNext(dataset)
event_will_be_set_after_consuming = (
set_event_during_invocation - buffer_size + 1)
ev.clear()
for i in range(event_will_be_set_after_consuming):
self.assertFalse(ev.is_set())
self.assertEqual(i * i, self.evaluate(get_next()))
ev.wait()
for i in range(event_will_be_set_after_consuming, 100):
self.assertEqual(i * i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testReturnList(self, apply_map):
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, lambda x: [x, constant_op.constant(37.0)])
self.assertDatasetProduces(
dataset, expected_output=[(i, 37.0) for i in range(10)])
@combinations.generate(_test_combinations())
def testMultiOutputPyFunc(self, apply_map):
# The `tf.py_func()` op returns a list of tensors for its outputs.
def _map_fn(x_tensor):
def _map_py_func(x):
return x, np.array(37.0, dtype=np.float64)
return script_ops.py_func(
_map_py_func, [x_tensor], [dtypes.int64, dtypes.float64])
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, _map_fn)
self.assertDatasetProduces(
dataset, expected_output=[(i, 37.0) for i in range(10)])
@combinations.generate(_test_combinations())
def testSparse(self, apply_map):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, _sparse)
self.assertDatasetProduces(
dataset, expected_output=[_sparse(i) for i in range(10)])
@combinations.generate(_test_combinations())
def testSparseChain(self, apply_map):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
def _check(i):
self.assertTrue(sparse_tensor.is_sparse(i))
return sparse_ops.sparse_concat(0, [i, i])
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, _sparse)
dataset = apply_map(dataset, _check)
self.assertDatasetProduces(
dataset,
expected_output=[self.evaluate(_check(_sparse(i))) for i in range(10)])
@combinations.generate(_test_combinations_with_mode("eager"))
def testSparseMapShapeInference(self, apply_map):
row_lengths = np.random.randint(0, 4, size=128)
values = np.ones(np.sum(row_lengths))
sparse = ragged_tensor.RaggedTensor.from_row_lengths(
values, row_lengths).to_sparse()
dataset = dataset_ops.Dataset.from_tensor_slices(sparse)
dataset = dataset.batch(32, drop_remainder=True)
dataset = apply_map(dataset, lambda x: x)
self.assertEqual((32, 3), dataset.element_spec.shape)
@combinations.generate(_test_combinations_with_mode("eager"))
def testSparseMapShapeInferencePartial(self, apply_map):
row_lengths = np.random.randint(0, 4, size=128)
values = np.ones(np.sum(row_lengths))
sparse = ragged_tensor.RaggedTensor.from_row_lengths(
values, row_lengths).to_sparse()
dataset = dataset_ops.Dataset.from_tensor_slices(sparse)
dataset = dataset.batch(32, drop_remainder=False)
dataset = apply_map(dataset, lambda x: x)
self.assertEqual([None, 3], dataset.element_spec.shape.as_list())
@combinations.generate(_test_combinations())
def testTensorArray(self, apply_map):
def _tensor_array(i):
i = math_ops.cast(i, dtypes.int32)
return (
tensor_array_ops.TensorArray(dtypes.int32, element_shape=(), size=i)
.unstack(math_ops.range(i, dtype=dtypes.int32)))
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, _tensor_array)
self.assertDatasetProduces(
dataset, expected_output=[list(range(i)) for i in range(10)])
@combinations.generate(_test_combinations())
def testTensorArrayChain(self, apply_map):
def _tensor_array(i):
i = math_ops.cast(i, dtypes.int32)
return (
tensor_array_ops.TensorArray(dtypes.int32, element_shape=(), size=i)
.unstack(math_ops.range(i, dtype=dtypes.int32)))
def _check(x):
self.assertIsInstance(x, tensor_array_ops.TensorArray)
return x.identity()
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, _tensor_array)
dataset = apply_map(dataset, _check)
self.assertDatasetProduces(
dataset,
expected_output=[list(range(i)) for i in range(10)])
@combinations.generate(_test_combinations())
def testRagged(self, apply_map):
def _ragged(i):
return ragged_tensor.RaggedTensor.from_tensor(i * [[1]])
dataset = dataset_ops.Dataset.range(5)
dataset = apply_map(dataset, _ragged)
self.assertDatasetProduces(
dataset,
expected_output=[ragged_factory_ops.constant([[i]]) for i in range(5)])
@combinations.generate(_test_combinations())
def testRaggedChain(self, apply_map):
def _ragged(i):
return ragged_tensor.RaggedTensor.from_tensor(i * [[1]])
def _concat(i):
self.assertTrue(ragged_tensor.is_ragged(i))
return ragged_concat_ops.concat([i, i], 0)
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, _ragged)
dataset = apply_map(dataset, _concat)
self.assertDatasetProduces(
dataset,
expected_output=[
self.evaluate(_concat(ragged_factory_ops.constant([[i]])))
for i in range(10)
])
# TODO(b/123904513)
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testParallelMapOutOfRangeError(self, apply_map):
def raising_py_func(i):
if i == 100:
raise StopIteration()
else:
return i
dataset = dataset_ops.Dataset.range(105)
dataset = apply_map(
dataset,
lambda x: script_ops.py_func(raising_py_func, [x], dtypes.int64),
num_parallel_calls=2)
get_next = self.getNext(dataset)
for i in range(100):
self.assertEqual(i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testConstantOutput(self, apply_map):
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, lambda x: [x, "hello", 10])
self.assertDatasetProduces(dataset, [(i, b"hello", 10) for i in range(10)])
@combinations.generate(_test_combinations())
def testWarnOnLookupTable(self, apply_map):
def collecting_function(x):
_ = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(["a"], [1.]), 0.0, name="t1")
return x
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
dataset = dataset_ops.Dataset.range(10)
_ = apply_map(dataset, collecting_function)
# NOTE(mrry): Python 3 prints other warnings in addition to the one we are
# testing, so we search for the expected warning.
self.assertGreaterEqual(len(w), 1)
found_warning = False
for warning in w:
if ("Creating resources inside a function passed to Dataset.map() is "
"not supported." in str(warning)):
found_warning = True
break
self.assertTrue(found_warning)
@combinations.generate(test_base.default_test_combinations())
def testWarnOnSeedFromOuterGraph(self):
with ops.Graph().as_default() as g:
g.seed = 10
warnings.simplefilter("always")
def _check_warning(caught_warnings, expected_result):
found_warning = False
for warning in caught_warnings:
if ("Explicitly set the seed in the function if this is not the "
"intended behavior" in str(warning)):
found_warning = True
break
self.assertEqual(found_warning, expected_result)
# map_fun doesn't use seed, so no warning is generated.
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).map(math_ops.square)
_check_warning(w, False)
def random_func(x):
x = math_ops.add(x, 1)
random_ops.random_shuffle([x, math_ops.square(x)])
return x
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).map(random_func)
_check_warning(w, True)
def random_func_seeded(x):
ops.get_default_graph().seed = None
random_ops.random_shuffle(x)
return x
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).batch(2).map(random_func_seeded)
_check_warning(w, False)
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).batch(2).map(
lambda x: random_ops.random_shuffle(x, seed=37))
_check_warning(w, False)
@combinations.generate(_test_combinations())
def testNestedDatasetMap(self, apply_map):
dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0])
dataset = apply_map(dataset, dataset_ops.Dataset.from_tensor_slices)
dataset = apply_map(dataset, lambda ds: ds.batch(3)).flat_map(lambda x: x)
self.assertDatasetProduces(dataset, expected_output=[[1.0, 2.0, 3.0]])
@combinations.generate(_test_combinations())
def testReturnValueError(self, apply_map):
dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0])
with self.assertRaisesRegex(
TypeError, r"Unsupported return value from function passed to "
r"Dataset.map\(\)"):
_ = apply_map(dataset, lambda x: Foo)
@combinations.generate(test_base.default_test_combinations())
def testBrokenFunctionErrorOnInitialization(self):
dataset = dataset_ops.Dataset.from_tensor_slices([1.0, 2.0, 3.0])
def broken_function(_):
"""A function deliberately designed to fail on instantiation."""
value = []
tensor_value = attr_value_pb2.AttrValue()
tensor_value.tensor.CopyFrom(
tensor_util.make_tensor_proto(
value, dtype=dtypes.float32, shape=[0], verify_shape=False))
dtype_value = attr_value_pb2.AttrValue(type=dtypes.int32.as_datatype_enum)
# Create a "Const" op with a `tf.float32` value and a `tf.int32` type.
const_tensor = ops.get_default_graph().create_op(
"Const", [], [dtypes.int32],
attrs={
"value": tensor_value,
"dtype": dtype_value
},
name="BrokenConst").outputs[0]
return const_tensor
dataset = dataset.map(broken_function)
self.assertDatasetProduces(
dataset, expected_error=(errors.InvalidArgumentError, "BrokenConst"))
@combinations.generate(
combinations.times(
_test_combinations_with_mode("graph"),
combinations.combine(num_parallel_calls=[None, 12])))
def testNoInterOpParallelism(self, apply_map, num_parallel_calls):
dataset = dataset_ops.Dataset.from_tensors(0)
def _get_tid():
return np.int64(threading.current_thread().ident)
def _map_fn(_):
tids = []
for _ in range(10):
tids.append(script_ops.py_func(_get_tid, [], dtypes.int64))
return tids
dataset = apply_map(dataset, _map_fn)
dataset._variant_tensor.op._set_attr("use_inter_op_parallelism",
attr_value_pb2.AttrValue(b=False))
get_next = self.getNext(dataset)
tids = self.evaluate(get_next())
self.assertTrue(all(tids[0] == tid for tid in tids))
@combinations.generate(
combinations.times(_test_combinations(), _short_circuit_test_cases(),
combinations.combine(num_parallel_calls=[None, 12])))
def testShortCircuit(self, apply_map, structure, fn, num_parallel_calls):
dataset = self.structuredDataset(structure).repeat()
dataset = apply_map(dataset, fn, num_parallel_calls=num_parallel_calls)
get_next = self.getNext(dataset)
if isinstance(structure, tuple):
expected = fn(*self.evaluate(self.structuredElement(structure)))
else:
expected = fn(self.evaluate(self.structuredElement(structure)))
self.assertEqual(expected, self.evaluate(get_next()))
@combinations.generate(
combinations.times(_test_combinations(),
combinations.combine(num_parallel_calls=[None, 12])))
def testShortCircuitCapturedInput(self, apply_map, num_parallel_calls):
captured_t = variables.Variable(42)
dataset = self.structuredDataset(None).repeat()
dataset = apply_map(
dataset, lambda x: captured_t, num_parallel_calls=num_parallel_calls)
self.evaluate(variables.global_variables_initializer())
get_next = self.getNext(dataset, requires_initialization=True)
self.assertEqual(42, self.evaluate(get_next()))
@combinations.generate(
combinations.times(
_test_combinations(),
combinations.combine(num_elements=1, num_parallel_calls=1) +
combinations.combine(num_elements=10, num_parallel_calls=1) +
combinations.combine(num_elements=10, num_parallel_calls=10) +
combinations.combine(num_elements=100, num_parallel_calls=1) +
combinations.combine(num_elements=100, num_parallel_calls=10) +
combinations.combine(num_elements=100, num_parallel_calls=100)))
def testSloppyInterleaveInOrder(self, apply_map, num_elements,
num_parallel_calls):
dataset, coordination_events = _make_coordinated_sloppy_dataset(
apply_map, num_elements, num_parallel_calls)
options = dataset_ops.Options()
options.experimental_threading = threading_options.ThreadingOptions()
options.experimental_threading.private_threadpool_size = (
num_parallel_calls + 1)
dataset = dataset.with_options(options)
get_next = self.getNext(dataset, requires_initialization=True)
for i in range(num_elements):
coordination_events[i].set()
self.assertEqual(i * i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(
combinations.times(
_test_combinations(),
combinations.combine(num_elements=10, num_parallel_calls=10) +
combinations.combine(num_elements=100, num_parallel_calls=10) +
combinations.combine(num_elements=100, num_parallel_calls=100)))
def testSloppyInterleaveOutOfOrder(self, apply_map, num_elements,
num_parallel_calls):
dataset, coordination_events = _make_coordinated_sloppy_dataset(
apply_map, num_elements, num_parallel_calls)
options = dataset_ops.Options()
options.experimental_threading = threading_options.ThreadingOptions()
options.experimental_threading.private_threadpool_size = (
num_parallel_calls + 1)
dataset = dataset.with_options(options)
get_next = self.getNext(dataset, requires_initialization=True)
elements = [x for x in range(num_elements)]
for i in [1, 4, 7]:
elements[i], elements[i + 1] = elements[i + 1], elements[i]
for element in elements:
coordination_events[element].set()
self.assertEqual(element * element, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(
combinations.combine(
tf_api_version=2,
mode=["eager", "graph"],
num_parallel_calls=[None, 12]))
def testPreserveCardinality(self, num_parallel_calls):
def py_fn(_):
raise StopIteration()
dataset = dataset_ops.Dataset.from_tensors(0).map(
lambda x: script_ops.py_func(py_fn, [x], dtypes.int64),
num_parallel_calls=num_parallel_calls)
get_next = self.getNext(dataset)
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
@combinations.generate(_test_combinations_with_mode("graph"))
def testCollectionCopy(self, apply_map):
w = variable_scope.get_variable("w", [])
self.assertIn(w, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
def func(x):
self.assertIn(w, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
return x
dataset = dataset_ops.Dataset.from_tensors(constant_op.constant(1.0))
_ = apply_map(dataset, func)
@combinations.generate(
combinations.times(
_test_combinations_with_mode_v1("graph"),
combinations.combine(num_parallel_calls=[None, 12])))
def testMapCancellation(self, apply_map, num_parallel_calls):
# Checks that a cancellation of is threaded through to map transformation.
queue = data_flow_ops.FIFOQueue(10, dtypes.int32, ())
def fn(_):
return queue.dequeue()
dataset = dataset_ops.Dataset.range(1)
dataset = apply_map(dataset, fn, num_parallel_calls=num_parallel_calls)
get_next = self.getNext(dataset, requires_initialization=True)
with self.cached_session() as sess:
thread = self.checkedThread(self.assert_op_cancelled, args=(get_next(),))
thread.start()
time.sleep(0.2)
sess.close()
thread.join()
# TODO(b/126553094): map doesnt work with variable defined inside function in
# eager mode, possible Graph tensors leak out of the function building context
# from function graph in eager mode as variables are created in init_scope.
@combinations.generate(test_base.graph_only_combinations())
def testCreateVariableInsideFunctionWithGetter(self):
def func(_):
with variable_scope.variable_scope(
"variable", reuse=variable_scope.AUTO_REUSE):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
return counter_var.assign_add(1)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
if hasattr(dataset, "map_with_legacy_function"):
# NOTE: In the legacy function, resource is captured by value.
with self.assertRaisesWithPredicateMatch(
AttributeError, "'Tensor' object has no attribute 'assign_add'"):
dataset.map_with_legacy_function(func)
dataset = dataset.map(func)
self.evaluate(variables.global_variables_initializer())
get_next = self.getNext(dataset, requires_initialization=True)
for i in range(10):
self.assertEqual(i + 1, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testCaptureVariable(self, apply_map):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = apply_map(dataset, lambda _: counter_var.assign_add(1))
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(counter_var.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(counter_var))
self.assertEqual(i + 1, self.evaluate(get_next()))
self.assertEqual(10, self.evaluate(counter_var))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertEqual(10, self.evaluate(counter_var))
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testCaptureUninitializedVariableError(self, apply_map):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = apply_map(dataset, lambda _: counter_var.assign_add(1))
get_next = self.getNext(dataset, requires_initialization=True)
with self.assertRaises(errors.NotFoundError):
self.evaluate(get_next())
# TODO(b/121264236): add eager mode coverage when we have multi-device setup.
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testCaptureConstantsWithConflictingDevices(self, apply_map):
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.cached_session(config=config):
with ops.device("/device:CPU:0"):
a = constant_op.constant(3.0)
with ops.device("/device:CPU:1"):
b = constant_op.constant(5.0)
def func(_):
return math_ops.add(a, b)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = apply_map(dataset, func)
expected_output = [8.0] * 10
self.assertDatasetProduces(dataset, expected_output=expected_output)
# TODO(b/121264236): add eager mode coverage when we have multi-device setup.
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testReferenceVariablesWithMultipleDevices(self, apply_map):
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.cached_session(config=config):
def func(_):
with ops.device("/device:CPU:0"):
a = variables.VariableV1(3.0)
with ops.device("/device:CPU:1"):
b = variables.VariableV1(5.0)
return math_ops.add(a, b)
# NOTE: Use the legacy function implementation as eager function will
# convert RefVariables to ResourceVariables.
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = apply_map(dataset, func)
self.evaluate(variables.global_variables_initializer())
expected_output = [8.0] * 10
self.assertDatasetProduces(
dataset,
expected_output=expected_output,
requires_initialization=True)
# TODO(b/121264236): add eager mode coverage when we have multi-device setup.
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testResourceVariablesWithMultipleDevices(self, apply_map):
config = config_pb2.ConfigProto(device_count={"CPU": 3})
def func(_):
with variable_scope.variable_scope(
"variable", reuse=variable_scope.AUTO_REUSE):
with ops.device("/device:CPU:0"):
a_var = variable_scope.get_variable(
"a", (), dtypes.int32, use_resource=True)
a_var = math_ops.add(a_var, 1)
with ops.device("/device:CPU:1"):
b_var = variable_scope.get_variable(
"b", (), dtypes.int32, use_resource=True)
return math_ops.add(a_var, b_var)
g = ops.Graph()
with self.session(config=config, graph=g):
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = apply_map(dataset, func)
self.evaluate(variables.global_variables_initializer())
expected_output = [1] * 10
self.assertDatasetProduces(
dataset,
expected_output=expected_output,
requires_initialization=True)
@combinations.generate(
combinations.times(
_test_combinations(),
combinations.combine(
local_determinism=[None, True, False],
global_determinism=[True, False])))
def testDeterminismConfiguration(self, apply_map, local_determinism,
global_determinism):
expect_determinism = local_determinism or (local_determinism is None and
global_determinism)
elements = list(range(1000))
def dataset_fn(delay_ms):
def sleep(x):
time.sleep(delay_ms / 1000)
return x
def map_function(x):
if math_ops.equal(x, 0):
return script_ops.py_func(sleep, [x], x.dtype)
else:
return x
dataset = dataset_ops.Dataset.from_tensor_slices(elements)
dataset = apply_map(
dataset,
map_function,
num_parallel_calls=2,
deterministic=local_determinism)
opts = dataset_ops.Options()
opts.experimental_deterministic = global_determinism
dataset = dataset.with_options(opts)
return dataset
self.checkDeterminism(
dataset_fn, expect_determinism, expected_elements=elements)
@combinations.generate(_test_combinations())
def testNoneComponent(self, apply_map):
dataset = dataset_ops.Dataset.from_tensors((42, None))
def map_function(x, y):
if y is None:
return x / 2
return x
dataset = apply_map(dataset, map_function)
self.assertDatasetProduces(dataset, expected_output=[21])
if __name__ == "__main__":
test.main()
|
cluster_monitor_2_test.py | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import parl
from parl.remote.master import Master
from parl.remote.worker import Worker
from parl.remote.monitor import ClusterMonitor
import time
import threading
from parl.remote.client import disconnect
from parl.remote import exceptions
import subprocess
from parl.utils import get_free_tcp_port
@parl.remote_class
class Actor(object):
def __init__(self, arg1=None, arg2=None):
self.arg1 = arg1
self.arg2 = arg2
def get_arg1(self):
return self.arg1
def get_arg2(self):
return self.arg2
def set_arg1(self, value):
self.arg1 = value
def set_arg2(self, value):
self.arg2 = value
def add_one(self, value):
value += 1
return value
def add(self, x, y):
time.sleep(3)
return x + y
def will_raise_exception_func(self):
x = 1 / 0
class TestClusterMonitor(unittest.TestCase):
def tearDown(self):
disconnect()
def test_add_actor(self):
port = get_free_tcp_port()
master = Master(port=port)
th = threading.Thread(target=master.run)
th.start()
time.sleep(1)
worker = Worker('localhost:{}'.format(port), 1)
cluster_monitor = ClusterMonitor('localhost:{}'.format(port))
time.sleep(1)
self.assertEqual(0, len(cluster_monitor.data['clients']))
parl.connect('localhost:{}'.format(port))
time.sleep(10)
self.assertEqual(1, len(cluster_monitor.data['clients']))
self.assertEqual(1, cluster_monitor.data['workers'][0]['vacant_cpus'])
actor = Actor()
time.sleep(20)
self.assertEqual(0, cluster_monitor.data['workers'][0]['vacant_cpus'])
self.assertEqual(1, cluster_monitor.data['workers'][0]['used_cpus'])
self.assertEqual(1, cluster_monitor.data['clients'][0]['actor_num'])
del actor
time.sleep(40)
self.assertEqual(0, cluster_monitor.data['clients'][0]['actor_num'])
self.assertEqual(1, cluster_monitor.data['workers'][0]['vacant_cpus'])
worker.exit()
master.exit()
if __name__ == '__main__':
unittest.main()
|
client.py | # client.py
#
# Common client. Measure the response rate of the echo server
from socket import *
import time
from threading import Thread
import atexit
import sys
if len(sys.argv) > 1:
MSGSIZE = int(sys.argv[1])
else:
MSGSIZE = 1
msg = b'x'*MSGSIZE
sock = socket(AF_INET, SOCK_STREAM)
sock.connect(('localhost', 25000))
sock.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
N = 0
results = []
def monitor():
global N
while True:
time.sleep(1)
print(N, 'requests/sec')
results.append(N)
N = 0
Thread(target=monitor, daemon=True).start()
def print_average():
import statistics
print('Average', statistics.mean(results), 'requests/sec')
atexit.register(print_average)
while True:
sock.sendall(msg)
nrecv = 0
while nrecv < MSGSIZE:
resp = sock.recv(MSGSIZE)
if not resp:
raise SystemExit()
nrecv += len(resp)
N += 1
|
voice_client.py | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
Some documentation to refer to:
- Our main web socket (mWS) sends opcode 4 with a guild ID and channel ID.
- The mWS receives VOICE_STATE_UPDATE and VOICE_SERVER_UPDATE.
- We pull the session_id from VOICE_STATE_UPDATE.
- We pull the token, endpoint and server_id from VOICE_SERVER_UPDATE.
- Then we initiate the voice web socket (vWS) pointing to the endpoint.
- We send opcode 0 with the user_id, server_id, session_id and token using the vWS.
- The vWS sends back opcode 2 with an ssrc, port, modes(array) and hearbeat_interval.
- We send a UDP discovery packet to endpoint:port and receive our IP and our port in LE.
- Then we send our IP and port via vWS with opcode 1.
- When that's all done, we receive opcode 4 from the vWS.
- Finally we can transmit data to endpoint:port.
"""
from __future__ import annotations
import asyncio
import socket
import logging
import struct
import threading
import select
import time
from typing import Any, Callable, List, Optional, TYPE_CHECKING, Tuple
from . import opus, utils
from .backoff import ExponentialBackoff
from .gateway import *
from .errors import ClientException, ConnectionClosed, RecordingException
from .player import AudioPlayer, AudioSource
from .sink import Sink, RawData
from .utils import MISSING
if TYPE_CHECKING:
from .client import Client
from .guild import Guild
from .state import ConnectionState
from .user import ClientUser
from .opus import Encoder
from . import abc
from .types.voice import (
GuildVoiceState as GuildVoiceStatePayload,
VoiceServerUpdate as VoiceServerUpdatePayload,
SupportedModes,
)
has_nacl: bool
try:
import nacl.secret # type: ignore
has_nacl = True
except ImportError:
has_nacl = False
__all__ = (
'VoiceProtocol',
'VoiceClient',
)
log: logging.Logger = logging.getLogger(__name__)
class VoiceProtocol:
"""A class that represents the Discord voice protocol.
This is an abstract class. The library provides a concrete implementation
under :class:`VoiceClient`.
This class allows you to implement a protocol to allow for an external
method of sending voice, such as Lavalink_ or a native library implementation.
These classes are passed to :meth:`abc.Connectable.connect <VoiceChannel.connect>`.
.. _Lavalink: https://github.com/freyacodes/Lavalink
Parameters
------------
client: :class:`Client`
The client (or its subclasses) that started the connection request.
channel: :class:`abc.Connectable`
The voice channel that is being connected to.
"""
def __init__(self, client: Client, channel: abc.Connectable) -> None:
self.client: Client = client
self.channel: abc.Connectable = channel
async def on_voice_state_update(self, data: GuildVoiceStatePayload) -> None:
"""|coro|
An abstract method that is called when the client's voice state
has changed. This corresponds to ``VOICE_STATE_UPDATE``.
Parameters
------------
data: :class:`dict`
The raw `voice state payload`__.
.. _voice_state_update_payload: https://discord.com/developers/docs/resources/voice#voice-state-object
__ voice_state_update_payload_
"""
raise NotImplementedError
async def on_voice_server_update(self, data: VoiceServerUpdatePayload) -> None:
"""|coro|
An abstract method that is called when initially connecting to voice.
This corresponds to ``VOICE_SERVER_UPDATE``.
Parameters
------------
data: :class:`dict`
The raw `voice server update payload`__.
.. _voice_server_update_payload: https://discord.com/developers/docs/topics/gateway#voice-server-update-voice-server-update-event-fields
__ voice_server_update_payload_
"""
raise NotImplementedError
async def connect(self, *, timeout: float, reconnect: bool) -> None:
"""|coro|
An abstract method called when the client initiates the connection request.
When a connection is requested initially, the library calls the constructor
under ``__init__`` and then calls :meth:`connect`. If :meth:`connect` fails at
some point then :meth:`disconnect` is called.
Within this method, to start the voice connection flow it is recommended to
use :meth:`Guild.change_voice_state` to start the flow. After which,
:meth:`on_voice_server_update` and :meth:`on_voice_state_update` will be called.
The order that these two are called is unspecified.
Parameters
------------
timeout: :class:`float`
The timeout for the connection.
reconnect: :class:`bool`
Whether reconnection is expected.
"""
raise NotImplementedError
async def disconnect(self, *, force: bool) -> None:
"""|coro|
An abstract method called when the client terminates the connection.
See :meth:`cleanup`.
Parameters
------------
force: :class:`bool`
Whether the disconnection was forced.
"""
raise NotImplementedError
def cleanup(self) -> None:
"""This method *must* be called to ensure proper clean-up during a disconnect.
It is advisable to call this from within :meth:`disconnect` when you are
completely done with the voice protocol instance.
This method removes it from the internal state cache that keeps track of
currently alive voice clients. Failure to clean-up will cause subsequent
connections to report that it's still connected.
"""
key_id, _ = self.channel._get_voice_client_key()
self.client._connection._remove_voice_client(key_id)
class VoiceClient(VoiceProtocol):
"""Represents a Discord voice connection.
You do not create these, you typically get them from
e.g. :meth:`VoiceChannel.connect`.
Warning
--------
In order to use PCM based AudioSources, you must have the opus library
installed on your system and loaded through :func:`opus.load_opus`.
Otherwise, your AudioSources must be opus encoded (e.g. using :class:`FFmpegOpusAudio`)
or the library will not be able to transmit audio.
Attributes
-----------
session_id: :class:`str`
The voice connection session ID.
token: :class:`str`
The voice connection token.
endpoint: :class:`str`
The endpoint we are connecting to.
channel: :class:`abc.Connectable`
The voice channel connected to.
loop: :class:`asyncio.AbstractEventLoop`
The event loop that the voice client is running on.
"""
endpoint_ip: str
voice_port: int
secret_key: List[int]
ssrc: int
def __init__(self, client: Client, channel: abc.Connectable):
if not has_nacl:
raise RuntimeError("PyNaCl library needed in order to use voice")
super().__init__(client, channel)
state = client._connection
self.token: str = MISSING
self.socket = MISSING
self.loop: asyncio.AbstractEventLoop = state.loop
self._state: ConnectionState = state
# this will be used in the AudioPlayer
self._connected: threading.Event = threading.Event()
self._handshaking: bool = False
self._potentially_reconnecting: bool = False
self._voice_state_complete: asyncio.Event = asyncio.Event()
self._voice_server_complete: asyncio.Event = asyncio.Event()
self.mode: str = MISSING
self._connections: int = 0
self.sequence: int = 0
self.timestamp: int = 0
self.timeout: float = 0
self._runner: asyncio.Task = MISSING
self._player: Optional[AudioPlayer] = None
self.encoder: Encoder = MISSING
self.decoder = None
self._lite_nonce: int = 0
self.ws: DiscordVoiceWebSocket = MISSING
self.paused = False
self.recording = False
self.user_timestamps = {}
self.sink = None
self.starting_time = None
self.stopping_time = None
warn_nacl = not has_nacl
supported_modes: Tuple[SupportedModes, ...] = (
'xsalsa20_poly1305_lite',
'xsalsa20_poly1305_suffix',
'xsalsa20_poly1305',
)
@property
def guild(self) -> Optional[Guild]:
"""Optional[:class:`Guild`]: The guild we're connected to, if applicable."""
return getattr(self.channel, 'guild', None)
@property
def user(self) -> ClientUser:
""":class:`ClientUser`: The user connected to voice (i.e. ourselves)."""
return self._state.user
def checked_add(self, attr, value, limit):
val = getattr(self, attr)
if val + value > limit:
setattr(self, attr, 0)
else:
setattr(self, attr, val + value)
# connection related
async def on_voice_state_update(self, data: GuildVoiceStatePayload) -> None:
self.session_id = data['session_id']
channel_id = data['channel_id']
if not self._handshaking or self._potentially_reconnecting:
# If we're done handshaking then we just need to update ourselves
# If we're potentially reconnecting due to a 4014, then we need to differentiate
# a channel move and an actual force disconnect
if channel_id is None:
# We're being disconnected so cleanup
await self.disconnect()
else:
guild = self.guild
self.channel = channel_id and guild and guild.get_channel(int(channel_id)) # type: ignore
else:
self._voice_state_complete.set()
async def on_voice_server_update(self, data: VoiceServerUpdatePayload) -> None:
if self._voice_server_complete.is_set():
log.info('Ignoring extraneous voice server update.')
return
self.token = data.get('token')
self.server_id = int(data['guild_id'])
endpoint = data.get('endpoint')
if endpoint is None or self.token is None:
log.warning('Awaiting endpoint... This requires waiting. ' \
'If timeout occurred considering raising the timeout and reconnecting.')
return
self.endpoint, _, _ = endpoint.rpartition(':')
if self.endpoint.startswith('wss://'):
# Just in case, strip it off since we're going to add it later
self.endpoint = self.endpoint[6:]
# This gets set later
self.endpoint_ip = MISSING
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setblocking(False)
if not self._handshaking:
# If we're not handshaking then we need to terminate our previous connection in the websocket
await self.ws.close(4000)
return
self._voice_server_complete.set()
async def voice_connect(self) -> None:
await self.channel.guild.change_voice_state(channel=self.channel)
async def voice_disconnect(self) -> None:
log.info('The voice handshake is being terminated for Channel ID %s (Guild ID %s)', self.channel.id, self.guild.id)
await self.channel.guild.change_voice_state(channel=None)
def prepare_handshake(self) -> None:
self._voice_state_complete.clear()
self._voice_server_complete.clear()
self._handshaking = True
log.info('Starting voice handshake... (connection attempt %d)', self._connections + 1)
self._connections += 1
def finish_handshake(self) -> None:
log.info('Voice handshake complete. Endpoint found %s', self.endpoint)
self._handshaking = False
self._voice_server_complete.clear()
self._voice_state_complete.clear()
async def connect_websocket(self) -> DiscordVoiceWebSocket:
ws = await DiscordVoiceWebSocket.from_client(self)
self._connected.clear()
while ws.secret_key is None:
await ws.poll_event()
self._connected.set()
return ws
async def connect(self, *, reconnect: bool, timeout: float) ->None:
log.info('Connecting to voice...')
self.timeout = timeout
for i in range(5):
self.prepare_handshake()
# This has to be created before we start the flow.
futures = [
self._voice_state_complete.wait(),
self._voice_server_complete.wait(),
]
# Start the connection flow
await self.voice_connect()
try:
await utils.sane_wait_for(futures, timeout=timeout)
except asyncio.TimeoutError:
await self.disconnect(force=True)
raise
self.finish_handshake()
try:
self.ws = await self.connect_websocket()
break
except (ConnectionClosed, asyncio.TimeoutError):
if reconnect:
log.exception('Failed to connect to voice... Retrying...')
await asyncio.sleep(1 + i * 2.0)
await self.voice_disconnect()
continue
else:
raise
if self._runner is MISSING:
self._runner = self.loop.create_task(self.poll_voice_ws(reconnect))
async def potential_reconnect(self) -> bool:
# Attempt to stop the player thread from playing early
self._connected.clear()
self.prepare_handshake()
self._potentially_reconnecting = True
try:
# We only care about VOICE_SERVER_UPDATE since VOICE_STATE_UPDATE can come before we get disconnected
await asyncio.wait_for(self._voice_server_complete.wait(), timeout=self.timeout)
except asyncio.TimeoutError:
self._potentially_reconnecting = False
await self.disconnect(force=True)
return False
self.finish_handshake()
self._potentially_reconnecting = False
try:
self.ws = await self.connect_websocket()
except (ConnectionClosed, asyncio.TimeoutError):
return False
else:
return True
@property
def latency(self) -> float:
""":class:`float`: Latency between a HEARTBEAT and a HEARTBEAT_ACK in seconds.
This could be referred to as the Discord Voice WebSocket latency and is
an analogue of user's voice latencies as seen in the Discord client.
.. versionadded:: 1.4
"""
ws = self.ws
return float("inf") if not ws else ws.latency
@property
def average_latency(self) -> float:
""":class:`float`: Average of most recent 20 HEARTBEAT latencies in seconds.
.. versionadded:: 1.4
"""
ws = self.ws
return float("inf") if not ws else ws.average_latency
async def poll_voice_ws(self, reconnect: bool) -> None:
backoff = ExponentialBackoff()
while True:
try:
await self.ws.poll_event()
except (ConnectionClosed, asyncio.TimeoutError) as exc:
if isinstance(exc, ConnectionClosed):
# The following close codes are undocumented so I will document them here.
# 1000 - normal closure (obviously)
# 4014 - voice channel has been deleted.
# 4015 - voice server has crashed
if exc.code in (1000, 4015):
log.info('Disconnecting from voice normally, close code %d.', exc.code)
await self.disconnect()
break
if exc.code == 4014:
log.info('Disconnected from voice by force... potentially reconnecting.')
successful = await self.potential_reconnect()
if not successful:
log.info('Reconnect was unsuccessful, disconnecting from voice normally...')
await self.disconnect()
break
else:
continue
if not reconnect:
await self.disconnect()
raise
retry = backoff.delay()
log.exception('Disconnected from voice... Reconnecting in %.2fs.', retry)
self._connected.clear()
await asyncio.sleep(retry)
await self.voice_disconnect()
try:
await self.connect(reconnect=True, timeout=self.timeout)
except asyncio.TimeoutError:
# at this point we've retried 5 times... let's continue the loop.
log.warning('Could not connect to voice... Retrying...')
continue
async def disconnect(self, *, force: bool = False) -> None:
"""|coro|
Disconnects this voice client from voice.
"""
if not force and not self.is_connected():
return
self.stop()
self._connected.clear()
try:
if self.ws:
await self.ws.close()
await self.voice_disconnect()
finally:
self.cleanup()
if self.socket:
self.socket.close()
async def move_to(self, channel: abc.Snowflake) -> None:
"""|coro|
Moves you to a different voice channel.
Parameters
-----------
channel: :class:`abc.Snowflake`
The channel to move to. Must be a voice channel.
"""
await self.channel.guild.change_voice_state(channel=channel)
def is_connected(self) -> bool:
"""Indicates if the voice client is connected to voice."""
return self._connected.is_set()
# audio related
def _get_voice_packet(self, data):
header = bytearray(12)
# Formulate rtp header
header[0] = 0x80
header[1] = 0x78
struct.pack_into('>H', header, 2, self.sequence)
struct.pack_into('>I', header, 4, self.timestamp)
struct.pack_into('>I', header, 8, self.ssrc)
encrypt_packet = getattr(self, '_encrypt_' + self.mode)
return encrypt_packet(header, data)
def _encrypt_xsalsa20_poly1305(self, header: bytes, data) -> bytes:
box = nacl.secret.SecretBox(bytes(self.secret_key))
nonce = bytearray(24)
nonce[:12] = header
return header + box.encrypt(bytes(data), bytes(nonce)).ciphertext
def _encrypt_xsalsa20_poly1305_suffix(self, header: bytes, data) -> bytes:
box = nacl.secret.SecretBox(bytes(self.secret_key))
nonce = nacl.utils.random(nacl.secret.SecretBox.NONCE_SIZE)
return header + box.encrypt(bytes(data), nonce).ciphertext + nonce
def _encrypt_xsalsa20_poly1305_lite(self, header: bytes, data) -> bytes:
box = nacl.secret.SecretBox(bytes(self.secret_key))
nonce = bytearray(24)
nonce[:4] = struct.pack('>I', self._lite_nonce)
self.checked_add('_lite_nonce', 1, 4294967295)
return header + box.encrypt(bytes(data), bytes(nonce)).ciphertext + nonce[:4]
def _decrypt_xsalsa20_poly1305(self, header, data):
box = nacl.secret.SecretBox(bytes(self.secret_key))
nonce = bytearray(24)
nonce[:12] = header
return self.strip_header_ext(box.decrypt(bytes(data), bytes(nonce)))
def _decrypt_xsalsa20_poly1305_suffix(self, header, data):
box = nacl.secret.SecretBox(bytes(self.secret_key))
nonce_size = nacl.secret.SecretBox.NONCE_SIZE
nonce = data[-nonce_size:]
return self.strip_header_ext(box.decrypt(bytes(data[:-nonce_size]), nonce))
def _decrypt_xsalsa20_poly1305_lite(self, header, data):
box = nacl.secret.SecretBox(bytes(self.secret_key))
nonce = bytearray(24)
nonce[:4] = data[-4:]
data = data[:-4]
return self.strip_header_ext(box.decrypt(bytes(data), bytes(nonce)))
@staticmethod
def strip_header_ext(data):
if data[0] == 0xbe and data[1] == 0xde and len(data) > 4:
_, length = struct.unpack_from('>HH', data)
offset = 4 + length * 4
data = data[offset:]
return data
def get_ssrc(self, user_id):
return {info['user_id']: ssrc for ssrc, info in self.ws.ssrc_map.items()}[user_id]
def play(self, source: AudioSource, *, after: Callable[[Optional[Exception]], Any]=None) -> None:
"""Plays an :class:`AudioSource`.
The finalizer, ``after`` is called after the source has been exhausted
or an error occurred.
If an error happens while the audio player is running, the exception is
caught and the audio player is then stopped. If no after callback is
passed, any caught exception will be displayed as if it were raised.
Parameters
-----------
source: :class:`AudioSource`
The audio source we're reading from.
after: Callable[[Optional[:class:`Exception`]], Any]
The finalizer that is called after the stream is exhausted.
This function must have a single parameter, ``error``, that
denotes an optional exception that was raised during playing.
Raises
-------
ClientException
Already playing audio or not connected.
TypeError
Source is not a :class:`AudioSource` or after is not a callable.
OpusNotLoaded
Source is not opus encoded and opus is not loaded.
"""
if not self.is_connected():
raise ClientException('Not connected to voice.')
if self.is_playing():
raise ClientException('Already playing audio.')
if not isinstance(source, AudioSource):
raise TypeError(f'source must be an AudioSource not {source.__class__.__name__}')
if not self.encoder and not source.is_opus():
self.encoder = opus.Encoder()
self._player = AudioPlayer(source, self, after=after)
self._player.start()
def is_playing(self) -> bool:
"""Indicates if we're currently playing audio."""
return self._player is not None and self._player.is_playing()
def is_paused(self) -> bool:
"""Indicates if we're playing audio, but if we're paused."""
return self._player is not None and self._player.is_paused()
def stop(self) -> None:
"""Stops playing audio."""
if self._player:
self._player.stop()
self._player = None
def pause(self) -> None:
"""Pauses the audio playing."""
if self._player:
self._player.pause()
def resume(self) -> None:
"""Resumes the audio playing."""
if self._player:
self._player.resume()
@property
def source(self) -> Optional[AudioSource]:
"""Optional[:class:`AudioSource`]: The audio source being played, if playing.
This property can also be used to change the audio source currently being played.
"""
return self._player.source if self._player else None
@source.setter
def source(self, value: AudioSource) -> None:
if not isinstance(value, AudioSource):
raise TypeError(f'expected AudioSource not {value.__class__.__name__}.')
if self._player is None:
raise ValueError('Not playing anything.')
self._player._set_source(value)
def send_audio_packet(self, data: bytes, *, encode: bool = True) -> None:
"""Sends an audio packet composed of the data.
You must be connected to play audio.
Parameters
----------
data: :class:`bytes`
The :term:`py:bytes-like object` denoting PCM or Opus voice data.
encode: :class:`bool`
Indicates if ``data`` should be encoded into Opus.
Raises
-------
ClientException
You are not connected.
opus.OpusError
Encoding the data failed.
"""
self.checked_add('sequence', 1, 65535)
if encode:
encoded_data = self.encoder.encode(data, self.encoder.SAMPLES_PER_FRAME)
else:
encoded_data = data
packet = self._get_voice_packet(encoded_data)
try:
self.socket.sendto(packet, (self.endpoint_ip, self.voice_port))
except BlockingIOError:
log.warning('A packet has been dropped (seq: %s, timestamp: %s)', self.sequence, self.timestamp)
self.checked_add('timestamp', opus.Encoder.SAMPLES_PER_FRAME, 4294967295)
def unpack_audio(self, data):
"""Takes an audio packet received from Discord and decodes it into pcm audio data.
If there are no users talking in the channel, `None` will be returned.
You must be connected to receive audio.
Parameters
---------
data: :class:`bytes`
Bytes received by Discord via the UDP connection used for sending and receiving voice data.
"""
if 200 <= data[1] <= 204:
# RTCP received.
# RTCP provides information about the connection
# as opposed to actual audio data, so it's not
# important at the moment.
return
if self.paused:
return
data = RawData(data, self)
if data.decrypted_data == b'\xf8\xff\xfe': # Frame of silence
return
self.decoder.decode(data)
def start_recording(self, sink, callback, *args):
"""The bot will begin recording audio from the current voice channel it is in.
This function uses a thread so the current code line will not be stopped.
Must be in a voice channel to use.
Must not be already recording.
Parameters
----------
sink: :class:`Sink`
A Sink which will "store" all the audio data.
callback: :class:`asynchronous function`
A function which is called after the bot has stopped recording.
*args:
Args which will be passed to the callback function.
Raises
------
RecordingException
Not connected to a voice channel.
RecordingException
Already recording.
RecordingException
Must provide a Sink object.
"""
if not self.is_connected():
raise RecordingException('Not connected to voice channel.')
if self.recording:
raise RecordingException("Already recording.")
#if not isinstance(sink, Sink):
# raise RecordingException("Must provide a Sink object.")
self.empty_socket()
self.decoder = opus.DecodeManager(self)
self.decoder.start()
self.recording = True
self.sink = sink
sink.init(self)
t = threading.Thread(target=self.recv_audio, args=(sink, callback, *args,))
t.start()
def stop_recording(self):
"""Stops the recording.
Must be already recording.
Raises
------
RecordingException
Not currently recording.
"""
if not self.recording:
raise RecordingException("Not currently recording audio.")
self.decoder.stop()
self.recording = False
self.paused = False
def toggle_pause(self):
"""Pauses or unpauses the recording.
Must be already recording.
Raises
------
RecordingException
Not currently recording.
"""
if not self.recording:
raise RecordingException("Not currently recording audio.")
self.paused = not self.paused
def empty_socket(self):
while True:
ready, _, _ = select.select([self.socket], [], [], 0.0)
if not ready:
break
for s in ready:
s.recv(4096)
def recv_audio(self, sink, callback, *args):
# Gets data from _recv_audio and sorts
# it by user, handles pcm files and
# silence that should be added.
self.user_timestamps = {}
self.starting_time = time.perf_counter()
while self.recording:
ready, _, err = select.select([self.socket], [],
[self.socket], 0.01)
if not ready:
if err:
print(f"Socket error: {err}")
continue
try:
data = self.socket.recv(4096)
except OSError:
self.stop_recording()
continue
self.unpack_audio(data)
self.stopping_time = time.perf_counter()
self.sink.cleanup()
callback = asyncio.run_coroutine_threadsafe(callback(self.sink, *args), self.loop)
result = callback.result()
if result is not None:
print(result)
def recv_decoded_audio(self, data):
if data.ssrc not in self.user_timestamps:
self.user_timestamps.update({data.ssrc: data.timestamp})
# Add silence of when they were not being recorded.
data.decoded_data = struct.pack('<h', 0) * round(
self.decoder.CHANNELS * self.decoder.SAMPLING_RATE * (time.perf_counter() - self.starting_time)
) + data.decoded_data
else:
self.user_timestamps[data.ssrc] = data.timestamp
silence = data.timestamp - self.user_timestamps[data.ssrc] - 960
data.decoded_data = struct.pack('<h', 0) * silence + data.decoded_data
while data.ssrc not in self.ws.ssrc_map:
time.sleep(0.05)
self.sink.write(data.decoded_data, self.ws.ssrc_map[data.ssrc]['user_id'])
|
StreamWebVideo.py | # import the necessary packages
from threading import Thread
import cv2
import time
from CameraMemory import CameraMemory
import logging
"""
Implement a URL based video stream.
Note: for some reason, reading / writing OPENCV cv2.CAP_PROPS* gives an error using
Python 3.7 and OpenCV 4.1. For now, don't support settings.
"""
class StreamWebVideo: #----------------------
def __init__(self, url, camera_settings,camera_memory):
self.name = "StreamWebVideo:%s" % (url)
self.logger = logging.getLogger(__name__)
self.webcamera = cv2.VideoCapture(url)
time.sleep(0.2)
self.camera_memory = CameraMemory(camera_memory)
# do not mess with settings on web stream. Causes many problems.
# self.camera_settings = CameraSettings(self.webcamera,camera_settings)
self.starttime = 0
self.stoptime = 0
self.stopped = False
self.frame_count = 0
if self.webcamera.isOpened() == False:
self.logger.error(self.name+" couldn't open url :"+ url)
self.camera_memory.write((-2,None))
def settings(self,camera_settings=None):
ret = self.camera_settings.settings(self.webcamera,camera_settings)
time.sleep(0.1)
return ret
def memory(self,camera_memory=None):
ret = self.camera_memory.memory(camera_memory)
time.sleep(0.1)
return ret
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
self.starttime = time.time()
t.start()
time.sleep(1)
return self
def update(self):
self.stopped = False
while True:
if self.stopped:
break
(ret, frame) = self.webcamera.read()
if not ret:
self.camera_memory.write((-1,None))
break
# always store frame number with frame as Tuple (,)
self.frame_count += 1
self.camera_memory.write((self.frame_count,frame))
self.stoptime = time.time()
def read(self):
return self.camera_memory.read()
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
time.sleep(0.2)
self.webcamera.release
def stats(self):
duration = self.stoptime-self.starttime
fps = self.frame_count/duration
return {"duration":round(duration,2), "frame_count":self.frame_count,"FPS":round(fps,2)}
# Test -------------------------------------
def main():
cv2.namedWindow("TestStreamWebVideo", cv2.WINDOW_NORMAL)
cv2.resizeWindow("TestStreamWebVideo", 640,540)
cv2.moveWindow("TestStreamWebVideo", 100,100)
# read from a url
camera_memory = ['queue',30,True,3,1]
camera_settings = []
webcamera = StreamWebVideo("http://clips.vorwaerts-gmbh.de/big_buck_bunny.mp4", camera_settings, camera_memory)
webcamera.start()
previous = -3
i=0
while True:
(num, frame) = webcamera.read()
if num == 0:
continue
if previous==num:
time.sleep(0.02)
continue
previous = num
if num == -1: # finished
break
cv2.imshow("TestStreamWebVideo",frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('q') or key == 27:
break
i += 1
webcamera.stop()
cv2.destroyAllWindows()
print(webcamera.stats())
print("main read ",i)
if __name__ == '__main__':
main()
|
cli.py | # Leave a space between each Stock Symbol
import Stocks #Main API
import sys #To get Command Line arguments
import config #Config file API
import multiprocessing #It's obviuos :)
import time
StartEndString = "------------------------------------" #For neat printing.
StockApp = Stocks.Stocks() #Initialize API
bcolors = Stocks.bcolors() #Initialize terminal colours
Config = config.Config() #Initialize Config file API
# Change this value to control the coloring in the terminal.
thresh = 2.5 #Doesn't matter for windows. (Atleast for now)
def SimpleGet(StocksList = sys.argv):
#Check if argument passed or not and set start and end values accordingly.
if StocksList == sys.argv:
start = 2
end = len(sys.argv)
else:
start = 0
end = len(StocksList)
#Loop each stock symbol from StocksList
for i in range (start, end):
try:
StockApp.ExtractStockPrice(StocksList[i])
RequestComplete = True
except Exception as e:
#No internet, stock symbol which doesn't exist, etc.
print ("Can't get", StocksList[i])
print (e)
RequestComplete = False
try:
#Get values from API.
lastPrice = StockApp.lastPrice
pChange = StockApp.pChange
change = StockApp.change
lastUpdateTime = StockApp.lastUpdateTime
companyName = StockApp.companyName
except AttributeError:
#Attribute error when above values do not exist.
pass
End = bcolors.ENDC #To close off ANSI codes.
if RequestComplete:
#Checks stock values and sets ANSI codes for clour, bold, etc.
if float(pChange) > thresh:
Start = bcolors.OKGREEN + bcolors.BOLD #Green and bold
elif float(pChange) < -thresh:
Start = bcolors.FAIL #Red
elif float(pChange) < 0 and float(pChange) > -thresh:
Start = bcolors.HEADER #Purple
elif float(pChange) > 0 and float(pChange) < thresh:
Start = bcolors.WARNING #Yellow
else:
Start = ""
print (StartEndString) #For neat printing.
print ("Stock:", Start, companyName, End)
print ("Last Price:", bcolors.BOLD, lastPrice, End)
print ("Percentage Change:", Start, pChange, End)
print ("Absolute Change:", Start, change, End)
print ("Last Updated Time:", lastUpdateTime)
print ()
def MultiStockPrice(Stock, lock): #Lock needed for print lock or else printing gets messed up.
try:
StockApp.ExtractStockPrice(Stock)
RequestComplete = True
except Exception:
#No internet, stock symbol which doesn't exist, etc.
print ("Can't get", Stock)
print ()
RequestComplete = False
try:
#Assign values
lastPrice = StockApp.lastPrice
pChange = StockApp.pChange
change = StockApp.change
lastUpdateTime = StockApp.lastUpdateTime
companyName = StockApp.companyName
except AttributeError:
print ("AttributeError")
End = bcolors.ENDC
if RequestComplete:
if float(pChange) > thresh:
Start = bcolors.OKGREEN + bcolors.BOLD
elif float(pChange) < -thresh:
Start = bcolors.FAIL
elif float(pChange) < 0 and float(pChange) > -thresh:
Start = bcolors.HEADER
elif float(pChange) > 0 and float(pChange) < thresh:
Start = bcolors.WARNING
else:
Start = ""
lock.acquire() #Acquire print lock
if RequestComplete:
print (StartEndString)
print ("Stock:", Start, companyName, End)
print ("Last Price:", bcolors.BOLD, lastPrice, End)
print ("Percentage Change:", Start, pChange, End)
print ("Absolute Change:", Start, change, End)
print ("Last Updated Time:", lastUpdateTime)
print ()
lock.release() #Release print lock
else:
lock.release() #Release print lock
def MultiGet(StocksList = sys.argv):
lock = multiprocessing.Lock() #For print lock argument.
#Check if argument passed or not and set start and end values accordingly.
if StocksList == sys.argv:
start = 2
end = len(sys.argv)
else:
start = 0
end = len(StocksList)
Processes = [] #To store each process.
StartTime = time.time()
SlowInternetMessagePrinted = False
for i in range (start, end):
Processes.append (multiprocessing.Process(target = MultiStockPrice, args = (StocksList[i], lock, ))) #Create frocess for each stock symbol
for i in Processes:
i.start() #Start all processes
for i in Processes:
while i.is_alive(): #Standby till every processes is complete.
if time.time() - StartTime > 10 and not SlowInternetMessagePrinted:
print ("Internet seems to be very slow")
print ("Please check your internet connection and try again")
print ("Press Ctrl + C to exit or wait for program to finish")
SlowInternetMessagePrinted = True
def ExecuteDefault():
Settings = Config.GetAllSettings()
Default = Settings["default"]
print (bcolors.OKBLUE + "No command found. Executing default: " + Default + bcolors.ENDC)
CommandLineArgs([" ", *Default.split()]) #Execute default command.
def CommandLineArgs(argslist = sys.argv):
if len(argslist) > 1: #If some command has been given.
if argslist[1] == "get":
try:
if argslist[2] == "all-m": #Multiprocessing
MultiGet(Config.GetAllStockSymbols())
elif argslist[2] == "all": #Sequential
SimpleGet(Config.GetAllStockSymbols())
else:
SimpleGet() #Default
except IndexError:
print ()
elif argslist[1] == "add":
for i in range (2, len(argslist)):
Config.AddStockSymbol(argslist[i])
elif argslist[1] == "remove":
for i in range (2, len(argslist)):
Config.RemoveStockSymbol(argslist[i])
elif argslist[1] == "status":
print ("Following stock symbols have been added:-")
for i in Config.GetAllStockSymbols():
print (" ", i) #Neat printing
elif argslist[1] == "help":
message = '''
List of commands available: -
1) get all
2) get
3) add
4) remove
5) status
1) get all:-
Use 'get all' to show all your stock values.
Use `get all-m` to get faster results, but stocks are shown randomly.
2) get:-
Use 'get STOCKSYMBOLS' to get the values of particular stocks.
3) add:-
Use 'add STOCKSYMBOLS' to add stocks to the list of all your stocks.
4) remove:-
Use 'remove STOCKSYMBOLS' to remove stocks from your list of stocks.
5) status:-
Use 'status' to see your list of stocks
Wherever STOCKSYMBOLS have been used, it means you can use a single stock symbol or multiple stock symbols seperated by spaces
To call a command, type:-
python cli.py COMMAND
if you are in a system which has python2 by default, type:-
python3 cli.py COMMAND'''
print (message)
else:
#Wrong command
print (bcolors.OKBLUE + "I don't know that command!" + bcolors.ENDC)
else:
#If no command passed
ExecuteDefault()
if __name__ == "__main__": #Absolutely needed for windows.
CommandLineArgs()
print (StartEndString) #Prints right at the end of program. |
test_user_secrets.py | import json
import os
import subprocess
import threading
import unittest
from http.server import BaseHTTPRequestHandler, HTTPServer
from test.support import EnvironmentVarGuard
from urllib.parse import urlparse
from datetime import datetime, timedelta
import mock
from google.auth.exceptions import DefaultCredentialsError
from google.cloud import bigquery
from kaggle_secrets import (GcpTarget, UserSecretsClient,
NotFoundError, ValidationError)
from kaggle_web_client import (_KAGGLE_URL_BASE_ENV_VAR_NAME,
_KAGGLE_USER_SECRETS_TOKEN_ENV_VAR_NAME,
CredentialError, BackendError)
_TEST_JWT = 'test-secrets-key'
class UserSecretsHTTPHandler(BaseHTTPRequestHandler):
def set_request(self):
raise NotImplementedError()
def get_response(self):
raise NotImplementedError()
def do_HEAD(s):
s.send_response(200)
def do_POST(s):
s.set_request()
s.send_response(200)
s.send_header("Content-type", "application/json")
s.end_headers()
s.wfile.write(json.dumps(s.get_response()).encode("utf-8"))
class TestUserSecrets(unittest.TestCase):
SERVER_ADDRESS = urlparse(os.getenv(_KAGGLE_URL_BASE_ENV_VAR_NAME, default="http://127.0.0.1:8001"))
def _test_client(self, client_func, expected_path, expected_body, secret=None, success=True):
_request = {}
class AccessTokenHandler(UserSecretsHTTPHandler):
def set_request(self):
_request['path'] = self.path
content_len = int(self.headers.get('Content-Length'))
_request['body'] = json.loads(self.rfile.read(content_len))
_request['headers'] = self.headers
def get_response(self):
if success:
return {'result': {'secret': secret, 'secretType': 'refreshToken', 'secretProvider': 'google', 'expiresInSeconds': 3600}, 'wasSuccessful': "true"}
else:
return {'wasSuccessful': "false", 'errors': ['No user secrets exist for kernel']}
env = EnvironmentVarGuard()
env.set(_KAGGLE_USER_SECRETS_TOKEN_ENV_VAR_NAME, _TEST_JWT)
with env:
with HTTPServer((self.SERVER_ADDRESS.hostname, self.SERVER_ADDRESS.port), AccessTokenHandler) as httpd:
threading.Thread(target=httpd.serve_forever).start()
try:
client_func()
finally:
httpd.shutdown()
path, headers, body = _request['path'], _request['headers'], _request['body']
self.assertEqual(
path,
expected_path,
msg="Fake server did not receive the right request from the UserSecrets client.")
self.assertEqual(
body,
expected_body,
msg="Fake server did not receive the right body from the UserSecrets client.")
def test_no_token_fails(self):
env = EnvironmentVarGuard()
env.unset(_KAGGLE_USER_SECRETS_TOKEN_ENV_VAR_NAME)
with env:
with self.assertRaises(CredentialError):
client = UserSecretsClient()
def test_get_secret_succeeds(self):
secret = '12345'
def call_get_secret():
client = UserSecretsClient()
secret_response = client.get_secret("secret_label")
self.assertEqual(secret_response, secret)
self._test_client(call_get_secret,
'/requests/GetUserSecretByLabelRequest', {'Label': "secret_label"},
secret=secret)
def test_get_secret_handles_unsuccessful(self):
def call_get_secret():
client = UserSecretsClient()
with self.assertRaises(BackendError):
secret_response = client.get_secret("secret_label")
self._test_client(call_get_secret,
'/requests/GetUserSecretByLabelRequest', {'Label': "secret_label"},
success=False)
def test_get_secret_validates_label(self):
env = EnvironmentVarGuard()
env.set(_KAGGLE_USER_SECRETS_TOKEN_ENV_VAR_NAME, _TEST_JWT)
with env:
client = UserSecretsClient()
with self.assertRaises(ValidationError):
secret_response = client.get_secret("")
def test_get_gcloud_secret_succeeds(self):
secret = '{"client_id":"gcloud","type":"authorized_user"}'
def call_get_secret():
client = UserSecretsClient()
secret_response = client.get_gcloud_credential()
self.assertEqual(secret_response, secret)
self._test_client(call_get_secret,
'/requests/GetUserSecretByLabelRequest', {'Label': "__gcloud_sdk_auth__"},
secret=secret)
def test_get_gcloud_secret_handles_unsuccessful(self):
def call_get_secret():
client = UserSecretsClient()
with self.assertRaises(NotFoundError):
secret_response = client.get_gcloud_credential()
self._test_client(call_get_secret,
'/requests/GetUserSecretByLabelRequest', {'Label': "__gcloud_sdk_auth__"},
success=False)
def test_set_gcloud_credentials_succeeds(self):
secret = '{"client_id":"gcloud","type":"authorized_user","refresh_token":"refresh_token"}'
project = 'foo'
account = 'bar'
def get_gcloud_config_value(field):
result = subprocess.run(['gcloud', 'config', 'get-value', field], capture_output=True)
result.check_returncode()
return result.stdout.strip().decode('ascii')
def test_fn():
client = UserSecretsClient()
client.set_gcloud_credentials(project=project, account=account)
self.assertEqual(project, os.environ['GOOGLE_CLOUD_PROJECT'])
self.assertEqual(project, get_gcloud_config_value('project'))
self.assertEqual(account, os.environ['GOOGLE_ACCOUNT'])
self.assertEqual(account, get_gcloud_config_value('account'))
expected_creds_file = '/tmp/gcloud_credential.json'
self.assertEqual(expected_creds_file, os.environ['GOOGLE_APPLICATION_CREDENTIALS'])
self.assertEqual(expected_creds_file, get_gcloud_config_value('auth/credential_file_override'))
with open(expected_creds_file, 'r') as f:
self.assertEqual(secret, '\n'.join(f.readlines()))
self._test_client(test_fn, '/requests/GetUserSecretByLabelRequest', {'Label': "__gcloud_sdk_auth__"}, secret=secret)
@mock.patch('kaggle_secrets.datetime')
def test_get_access_token_succeeds(self, mock_dt):
secret = '12345'
now = datetime(1993, 4, 24)
mock_dt.utcnow = mock.Mock(return_value=now)
def call_get_bigquery_access_token():
client = UserSecretsClient()
secret_response = client.get_bigquery_access_token()
self.assertEqual(secret_response, (secret, now + timedelta(seconds=3600)))
def call_get_gcs_access_token():
client = UserSecretsClient()
secret_response = client._get_gcs_access_token()
self.assertEqual(secret_response, (secret, now + timedelta(seconds=3600)))
self._test_client(call_get_bigquery_access_token,
'/requests/GetUserSecretRequest', {'Target': GcpTarget.BIGQUERY.target},
secret=secret)
self._test_client(call_get_gcs_access_token,
'/requests/GetUserSecretRequest', {'Target': GcpTarget.GCS.target},
secret=secret)
def test_get_access_token_handles_unsuccessful(self):
def call_get_access_token():
client = UserSecretsClient()
with self.assertRaises(BackendError):
client.get_bigquery_access_token()
self._test_client(call_get_access_token,
'/requests/GetUserSecretRequest', {'Target': GcpTarget.BIGQUERY.target}, success=False)
|
camera361.py | import time
import picamera
import picamera.array
from picamera.array import PiRGBAnalysis
import cv2
from gpiozero import Button
import threading
import sys
import random
import math
import numpy as np
import traceback
capture =False
running = True
analyze = False
text = False
cv2.namedWindow("window_name", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("window_name", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
button1 = Button(18)
button2 = Button(23)
button3 = Button(24)
button4 = Button(21)
modes = [0,1,2]
modeSwitch = 0
mode = modes[modeSwitch]
xCrop = []
font = cv2.FONT_HERSHEY_SIMPLEX
def generateSinFunction():
x=np.linspace(-1,1,xCrop[-1]*2);
X=np.tile(x,(976,1))
Y=np.tile(x,(xCrop[-1]*2,1))
Y=Y.T
Y=Y[-xCrop[2]:-xCrop[2]+976,:]
if np.shape(Y)[0]<976:
Y2=np.zeros((np.shape(X)[0]-np.shape(Y)[0],np.shape(X)[1]))
Y=np.concatenate((Y2,Y),axis = 0)
fov = math.pi/2.0 * 220/180.0
R=np.sqrt(np.power(X,2)+np.power(Y,2));
circle = R<.99
R2=np.copy(R)
R[R>1]=0
phi2 = (np.arctan2(X,Y))
theta2 = (fov * R) - math.pi/2
phi = np.arctan2(np.sin(theta2),np.sin(phi2)*np.cos(theta2))
theta = np.arcsin(np.cos(theta2)*np.cos(phi2))
dPhi = np.abs((np.roll(phi,1,axis=1)-np.roll(phi,-1,axis=1)))
dPhi[dPhi>math.pi] = np.abs(dPhi[dPhi>math.pi]-2*math.pi)
#dPhi = dPhi*circle
dTheta = np.abs((np.roll(theta,-1,axis=0)-np.roll(theta,1,axis=0)))
dTheta = circle*(dTheta)
dPhi = dPhi*circle
circularMask = R2<.98
Vcoscos = np.cos(phi)*np.cos(theta)*dTheta
Vcossin = np.sin(phi)*np.cos(theta)*dTheta
Vsin = np.sin(theta)*dTheta
VcoscosA = np.array(Vcoscos*circularMask)
VcoscosR = np.array(Vcoscos*dPhi)
VcossinA = np.array(Vcossin*circularMask)
VcossinR = np.array(Vcossin*dPhi)
VsinA = Vsin*dPhi*circularMask
VsinR = Vsin*dPhi*dTheta
#im = np.array(Vcoscos * 255, dtype = np.uint8)
#cv2.imwrite('/home/pi/imTest/Vcoscos.jpg',im)
return VcoscosA,VcoscosR,VcossinA,VcossinR,VsinA,VsinR,circularMask
class visionAnalyzer(PiRGBAnalysis):
duV = 0
dudV = 0
dpV = 0
dpdV = 0
newVision = False
i=0
thresholdRed = 20
thresholdBlue = 200
xCrop = []
t0 = 0
fov = 220.0/180.0
circularMask = []
VcoscosA = []
VcossinA = []
VsinA = []
VcoscosR = []
VcossinR = []
VsinR = []
circle = []
def __init__(self,camera,section):
super(visionAnalyzer,self).__init__(camera)
self.xCrop = section
print('generateSinFunction')
self.VcoscosA,self.VcoscosR,self.VcossinA,self.VcossinR,self.VsinA,self.VsinR,self.circle=generateSinFunction()
print('generatedSinFunction')
def analyze(self,frame):
global capture
frameC = frame[:,self.xCrop[0]:self.xCrop[1],:]
#t0=time.time()
if not analyze:
cv2.imshow("window_name", frameC)
cv2.waitKey(1)
if capture:
timestr = time.strftime("%Y%m%d-%H%M%S")
cv2.imwrite('/home/pi/Pictures/Blue'+timestr+'.jpg',frameC)
capture = False
else:
maskRB = np.subtract(frameC[:,:,2], frameC[:,:,1].astype(np.int16))*self.circle
maskRB=maskRB>self.thresholdRed
maskdRB = (np.roll(maskRB,1,axis=1) != np.roll(maskRB,-1,axis=1))
frameD = cv2.bitwise_and(frameC,frameC,mask = maskRB.astype(np.uint8))
n = str(self.i).zfill(5)
self.duV = np.sum(self.VcoscosR[maskRB])
self.dudV = np.sum(self.VcoscosA[maskdRB])/2.0
self.dpV = np.sum(self.VcossinR[maskRB])
self.dpdV = np.sum(self.VcossinA[maskdRB])/2.0
if text:
cv2.putText(frameD,"%10.4f"%(self.duV),(10,30),font,1,(0,255,0),2)
cv2.putText(frameD,"%10.4f"%(self.dudV),(10,60),font,1,(0,255,0),2)
cv2.putText(frameD,"%10.4f"%(self.dpV),(10,90),font,1,(0,255,0),2)
cv2.putText(frameD,"%10.4f"%(self.dpdV),(10,120),font,1,(0,255,0),2)
cv2.imshow("window_name", frameD)
cv2.waitKey(1)
if capture:
timestr = time.strftime("%Y%m%d-%H%M%S")
cv2.imwrite('/home/pi/Pictures/Blue'+timestr+'.jpg',frameC)
cv2.imwrite('/home/pi/Pictures/Cut'+timestr+'.jpg',frameD)
cv2.imwrite('/home/pi/Pictures/Thres'+timestr+'.jpg',maskRB.astype(np.uint8)*255)
capture = False
self.i=self.i+1
t1=time.time()
self.t0 = t1
self.newVision = True
time.sleep(.01)
def sectionCrop(crop):
xCenter = int(crop[1])
yCenter = int(crop[2])
RMax = int(crop[0])
xMax = xCenter+RMax
yMax = yCenter+RMax
xMin = xCenter-RMax
yMin = yCenter-RMax
xCrop = [xMin,xMax,yMin,yMax,xCenter,yCenter,RMax]
return xCrop
def buttonLogger():
global capture
global analyze
global modeSwitch
global mode
global text
while True:
#print("1 : "+str(button1.is_pressed))
#print("2 : "+str(button2.is_pressed))
#print("3 : "+str(button3.is_pressed))
if button4.is_pressed:
capture = True
if button1.is_pressed:
modeSwitch = (modeSwitch+1)%len(modes)
mode = modes[modeSwitch]
print(mode)
button1.wait_for_release(1)
if button2.is_pressed:
analyze = not analyze
button2.wait_for_release(1)
if button3.is_pressed:
text = not text
button3.wait_for_release(1)
time.sleep(.1)
def captureImage():
global capture
with picamera.PiCamera() as camera:
camera.resolution = (2592, 1944)
camera.start_preview()
# Camera warm-up time
time.sleep(2)
timestr = time.strftime("%Y%m%d-%H%M%S")
camera.capture('/home/pi/Pictures/image'+timestr+'.jpg')
capture = False
def streamCamera():
with picamera.PiCamera() as camera:
camera.resolution = (320, 240)
camera.framerate = 30
camera.led = False
#camera.start_preview()
time.sleep(2)
with picamera.array.PiRGBArray(camera) as stream:
while not capture and mode == 0:
camera.capture(stream, format='bgr', use_video_port=True)
# At this point the image is available as stream.array
frame = stream.array
cv2.imshow("window_name", frame)
cv2.waitKey(1)
stream.seek(0)
stream.truncate()
def cameraDrone():
with picamera.PiCamera() as camera:
camera.resolution = (1296,976)
#camera.zoom = ( .3563 , 0.2875 , 228/640 , 228/480 )
camera.framerate = 3
camera.iso = 800
camera.shutter_speed = 50000
camera.awb_mode = 'off'
camera.awb_gains=(2.5,6)
#camera.exposure_speed = 100
#camera.exposure_mode = 'night'
camera.exposure_compensation = 20
firstRound = True
running =True
print('here')
try:
k = 0
with visionAnalyzer(camera,xCrop) as anal:
camera.start_recording(anal, 'bgr')
#for frame in enumerate(camera.capture_continuous(rawCapture, 'rgb')):#,resize=(228,228))):
while mode == 1:
k=k+1
#print('analog gain : '+str(camera.analog_gain)+' digital_gain : '+str(camera.digital_gain))
camera.wait_recording(1.0/camera.framerate)
if camera.analog_gain >7 and camera.digital_gain > 1 and firstRound:
camera.exposure_mode = 'off'
camera.awb_mode = 'off'
b=0
r=0
camera.awb_gains=(1, 0)
firstRound = False
print(camera.awb_gains)
if not firstRound and False:
r=r+.1
if r>8:
r=0
b=b+.1
if b>8:
break
camera.awb_gains=(r,b)
print(camera.awb_gains)
camera.stop_recording()
except:
traceback.print_exc()
def wbCalib():
global capture
with picamera.PiCamera() as camera:
camera.resolution = (1296,976)
#camera.zoom = ( .3563 , 0.2875 , 228/640 , 228/480 )
camera.framerate = 3
camera.iso = 800
camera.shutter_speed = 50000
camera.awb_mode = 'off'
camera.awb_gains=(2.5,6)
#camera.exposure_speed = 100
#camera.exposure_mode = 'night'
camera.exposure_compensation = 20
firstRound = True
firstWB = True
running =True
print('here')
time.sleep(2)
stepWB = .3
with picamera.array.PiRGBArray(camera) as stream:
while mode == 2:
camera.capture(stream, format='bgr', use_video_port=True)
# At this point the image is available as stream.array
frame = stream.array
frame = frame[:,xCrop[0]:xCrop[1],:]
cv2.imshow("window_name", frame)
cv2.waitKey(1)
stream.seek(0)
stream.truncate()
if camera.analog_gain >7 and camera.digital_gain > 1 and firstRound:
camera.exposure_mode = 'off'
camera.awb_mode = 'off'
b=0
r=0
camera.awb_gains=(1, 1)
firstRound = False
print(camera.awb_gains)
if capture and not firstRound:
if firstWB:
timestr = time.strftime("%Y%m%d-%H%M%S")
r = 0
b = 0
camera.awb_gains=(0,0)
firstWB = False
else:
rName = '%.1f' % r
bName = '%.1f' % b
cv2.imwrite('/home/pi/Pictures/WB/WB-'+timestr+'-r_'+rName+'-b_'+bName+'.jpg',frame)
r=r+stepWB
if r>8:
r=0
b=b+stepWB
if b>8:
capture = False
firstWB = True
camera.awb_gains=(1,1)
else:
camera.awb_gains=(r,b)
def main():
global xCrop
buttonThread = threading.Thread(target=buttonLogger)
buttonThread.daemon = True
buttonThread.start()
eyeProp = np.loadtxt('/home/pi/droneSpecs.csv')
xCrop = sectionCrop(eyeProp)
while running:
time.sleep(.1)
if mode == 0:
if capture:
captureImage()
else:
streamCamera()
if mode == 1:
cameraDrone()
if mode == 2:
wbCalib()
if __name__ == "__main__":
main() |
TestAll.py | #!/usr/bin/env python
#coding: utf-8
import os
import re
import json
import time
import subprocess
import threading
from datetime import datetime
import psutil
import requests
TEST_SERVER_HOSTS = ['192.168.40.215', '192.168.40.91']
TEST_SERVER_PORT = 8999
TEST_REQ_TMPL = 'http://%(host)s:%(port)d/test'
APP_SERVER_IP = '192.168.3.235'
APP_SERVER_PATH_TMPL = 'http://%(ip)s:%(port)d/hello'
TARGET_REQUEST = {
'path_tmpl': '',
'headers': {
},
'params' : {
}
}
SECONDS = 10
CONCURRENTS = [400, 600, 800, 1000, 1600]
PROCESSES_LST = [1, 4, 8, 16, 32]
HEADERS = {'Content-type': 'application/json', 'Accept': 'text/plain'}
REGEXPS = {
'availability(%)' : r'^Availability.*\b(\d+\.\d+)\b.*',
'transaction-rate(trans/sec)': r'^Transaction rate.*\b(\d+\.\d+)\b.*'
}
SUMMARY = {
'INFO': {
'TAG' : 'None',
'SECONDS': SECONDS,
'CONCURRENTS': CONCURRENTS,
'PROCESSES_LST': PROCESSES_LST,
'TEST_SERVER_HOSTS': TEST_SERVER_HOSTS,
'APP_SERVER_IP' : APP_SERVER_IP
},
'tests': [
{
'app': 'test_http.go',
'cmd_tmpl': './webapps/test_http.bin -port=%(port)d -size=%(processes)d 2>/dev/null 1>/dev/null',
'port' : 9001,
'results': []
},
{
'app': 'test_martini.go',
'cmd_tmpl': './webapps/test_martini.bin -port=%(port)d -size=%(processes)d 2>/dev/null 1>/dev/null',
'port': 9002,
'results': []
},
{
'app': 'test_tornado.py',
'port': 8001,
'cmd_tmpl': './webapps/test_tornado.py --port=%(port)d --processes=%(processes)d 2>/dev/null 1>/dev/null',
'results': []
},
{
'app': 'test_webpy_gevent.py',
'port': 8002,
'cmd_tmpl': 'cd webapps && gunicorn -k gevent -w %(processes)d -b 0.0.0.0:%(port)d test_webpy_gevent:wsgiapp 2>/dev/null 1>/dev/null',
'results': []
}
]
}
time_now = lambda: datetime.now().strftime("%m-%d_%H:%M:%S")
results_lock = threading.Lock()
def kill_proc_tree(pid, including_parent=True):
parent = psutil.Process(pid)
for child in parent.children(recursive=True):
try:
child.kill()
except psutil.NoSuchProcess:
pass
if including_parent:
try:
parent.kill()
except psutil.NoSuchProcess:
pass
def ping(url):
status = False
req = None
try:
req = requests.get(url, verify=False, timeout=2)
except Exception as e:
print 'Ping failed:', url, e
time.sleep(30)
if req and req.status_code == 200:
status = True
return status
def extract_test(data):
output = data['output']
result = {
'output': output
}
for line in output.split('\n'):
for name, regexp in REGEXPS.iteritems():
m = re.match(regexp, line)
if m:
match_result = m.groups()[0]
result[name] = float(match_result)
break
return result
def test_request(results, url, data, timeout):
retry = 3
resp_data = None
while retry > 0:
try:
req = requests.post(url, headers=HEADERS, data=json.dumps(data), timeout=timeout)
resp_data = req.json()
retry = 0 # !!!
except requests.Timeout as e:
print (3-retry), e
retry -= 1
if resp_data:
result = extract_test(resp_data)
results_lock.acquire()
results.append(result)
results_lock.release()
def merge_test(datas):
if len(datas) == 0: return None
result = {}
outputs = []
keys = []
for key in REGEXPS.keys():
keys.append(key)
# result[key] = []
result[key + '_TOTAL'] = 0
for data in datas:
outputs.append(data['output'])
for key in keys:
if key not in data: continue
# result[key].append(data[key])
result[key + '_TOTAL'] = result[key + '_TOTAL'] + data[key]
result['output'] = '\n\n'.join(outputs)
return result
def do_test(app_url, concurrent, seconds=20):
data = {
'url': app_url,
'concurrent': concurrent,
'seconds': seconds,
}
timeout = seconds + 10
results = []
threads = []
for host in TEST_SERVER_HOSTS:
port = TEST_SERVER_PORT
test_req_url = TEST_REQ_TMPL % locals()
t = threading.Thread(target=test_request, args=(results, test_req_url, data, timeout))
t.start()
threads.append(t)
[t.join() for t in threads]
return merge_test(results)
def gen_server_results(cmd_tmpl, port, app_url):
for processes in PROCESSES_LST:
cmd = cmd_tmpl % locals()
print 'Server:', cmd
p = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
time.sleep(0.5)
if not ping(app_url):
yield {
'processes': processes,
'concurrent': -1,
'output': 'PingError'
}
kill_proc_tree(p.pid)
continue
for concurrent in CONCURRENTS:
result = do_test(app_url, concurrent, seconds=SECONDS)
result['processes'] = processes
result['concurrent'] = concurrent * len(TEST_SERVER_HOSTS)
yield result
kill_proc_tree(p.pid)
time.sleep(3)
def main():
def cmp_res(a, b):
c1, c2 = a['concurrent'], b['concurrent']
if c1 > c2: return 1
if c1 < c2: return -1
p1, p2 = a['processes'], b['processes']
if p1 > p2: return 1
if p1 <= p2: return -1
for info in SUMMARY['tests']:
cmd_tmpl = info['cmd_tmpl']
port = info['port']
ip = APP_SERVER_IP
app_url = APP_SERVER_PATH_TMPL % locals()
results = info['results']
print 'Section:', info['app'], app_url
print time_now()
print '=================='
for result in gen_server_results(cmd_tmpl, port, app_url):
print 'section: {0}, processes: {1}, concurrent: {2}'.format(info['app'], result['processes'], result['concurrent'])
output = result.pop('output')
print '--------------------'
print output
print '--------------------'
print time_now(), info['app']
print '----------------------------------------\n'
results.append(result)
results.sort(cmp=cmp_res)
print '======================================================\n\n'
with open(os.path.join('results', '{0}_summary.json'.format(time_now())), 'w') as f:
f.write(json.dumps(SUMMARY, indent=4))
if __name__ == '__main__':
main()
|
main.py | """
The entry point to the Graphical user user interface for the capser project
trial software.
Author: Jonah Yolles-Murphy
...
FIXME: talk about the project architecture here.
...
"""
# import normal packages
import threading
import sys
#import threads
import user_interface as ui
from user_interface import *
import radio_communications as radio
import trial_interpreter as ti
#FIXME: import debug_...
#initialize the message_que for communitcation
#que is a shortening of the word queue
message_que = [] # might become a dqueue, depends
#### define the task threads ####
# each thread is a daemon so it terminates when this file exits
threads = []
args = (message_que,)
ui_thread = threading.Thread(target=ui.loop, args=args, daemon=True)
threads.append(ui_thread)
radio_thread = threading.Thread(target=radio.loop, args=args, daemon=True)
threads.append(radio_thread)
ti_thread = threading.Thread(target=ti.loop, args=args, daemon=True)
threads.append(ti_thread)
### define main task threads resources and loop ###
#this is pretending to be a loop
def loop(que):
"""
desc: the loop used to monitor the message_que for exit commands;
"""
#setup goes here:
# instantiang classes
while True:
#if the que's first message is adressed to main (may implememnt total loop through later)
if len(que) and que[0][0].startswith('main'):
# fetch the message from the top of the que
addr, retaddr, args = que.pop(0)
# parse the adress into just the command by spitiling and disposing
# of the first item
cmd = addr.split('.')[1:]
# if the head of the command is exit, exit the program
if cmd[0] == 'exit':
exit() #b/c the threads are daemons they will die along with this one
#if this is the name main file start all the threads, enter main loop
if __name__ == '__main__':
ui_thread.start()
radio_thread.start()
ti_thread.start()
threading.Thread(target=loop, args=args, daemon=True).start()
ui.qt_loop()
|
tts.py |
import threading
import subprocess
import os
def say(text):
threading.Thread(target=run_say, args=(text,)).start()
def run_say(text):
subprocess.call(["espeak", text], stdout=open(os.devnull, 'w'), stderr=subprocess.STDOUT) |
tumblr.py | #!/usr/bin/env python2
# vim: set fileencoding=utf8
from __future__ import unicode_literals
import os
import sys
import re
import json
import collections
import multiprocessing
import requests
requests.packages.urllib3.disable_warnings()
import argparse
import random
import time
import select
import signal
API_KEY = 'fuiKNFp9vQFvjLNvx4sUwti4Yb5yGutBN4Xh10LXZhhRKjWlV4'
PID_PATH = '/tmp/tumblr.py.pid'
# statistic parameters
NET_ERRORS = multiprocessing.Value('i', 0)
UNCOMPLETION = multiprocessing.Value('i', 0)
DOWNLOAD_ERRORS = multiprocessing.Value('i', 0)
DOWNLOADS = multiprocessing.Value('i', 0)
COMPLETION = multiprocessing.Value('i', 0)
OFFSET = multiprocessing.Value('i', 0)
############################################################
# wget exit status
wget_es = {
0: "No problems occurred.",
2: "User interference.",
1<<8: "Generic error code.",
2<<8: "Parse error - for instance, when parsing command-line " \
"optio.wgetrc or .netrc...",
3<<8: "File I/O error.",
4<<8: "Network failure.",
5<<8: "SSL verification failure.",
6<<8: "Username/password authentication failure.",
7<<8: "Protocol errors.",
8<<8: "Server issued an error response."
}
############################################################
s = '\x1b[%d;%dm%s\x1b[0m' # terminual color template
headers = {
"Accept":"text/html,application/xhtml+xml,application/xml; " \
"q=0.9,image/webp,*/*;q=0.8",
"Accept-Encoding":"text/html",
"Accept-Language":"en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4,zh-TW;q=0.2",
"Content-Type":"application/x-www-form-urlencoded",
"Referer":"https://api.tumblr.com/console//calls/blog/posts",
"User-Agent":"Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 " \
"(KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36"
}
ss = requests.session()
ss.headers.update(headers)
class Error(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
def reset_statistic_params():
NET_ERRORS.value = 0
UNCOMPLETION.value = 0
DOWNLOAD_ERRORS.value = 0
DOWNLOADS.value = 0
COMPLETION.value = 0
OFFSET.value = 0
def play(urls, args):
for url in urls:
tumblr = Tumblr(args, url)
while True:
items = tumblr.get_item_generator()
if not items:
break
play_do(items, args.quiet)
def play_do(items, quiet):
for item in items:
num = random.randint(0, 7) % 8
col = s % (2, num + 90, item['durl'])
print ' ++ play:', col
quiet = ' --really-quiet' if quiet else ''
cmd = 'mpv%s --no-ytdl --cache-default 20480 --cache-secs 120 ' \
'--http-header-fields "User-Agent:%s" ' \
'"%s"' \
% (quiet, headers['User-Agent'], item['durl'])
os.system(cmd)
timeout = 1
ii, _, _ = select.select([sys.stdin], [], [], timeout)
if ii:
sys.exit(0)
else:
pass
def remove_downloaded_items(items):
N = len(items)
for i in range(N):
item = items.pop()
filepath = os.path.join(item['dir_'], item['subdir'], item['filename'])
if not os.path.exists(filepath):
items.appendleft(item)
def download_run(item):
filepath = os.path.join(item['dir_'], item['subdir'], item['filename'])
# if os.path.exists(filepath):
# return None
# num = random.randint(0, 7) % 8
# col = s % (1, num + 90, filepath)
# print ' ++ download: %s' % col
cmd = ' '.join([
'wget', '-c', '-q', '-T', '10',
'-O', '"%s.tmp"' % filepath,
'--user-agent', '"%s"' % headers['User-Agent'],
'"%s"' % item['durl'].replace('http:', 'https:')
])
status = os.system(cmd)
return status, filepath
def callback(filepath):
os.rename('%s.tmp' % filepath, filepath)
class Downloader(multiprocessing.Process):
def __init__(self, queue, lock):
super(Downloader, self).__init__()
self.queue = queue
self.daemon = True
self.lock = lock
def run(self):
while True:
item = self.queue.get()
self.queue.task_done()
if not item:
break
status = download_run(item)
if not status: # file was downloaded.
continue
status, filepath = status
if status != 0:
# print s % (1, 93, '[Error %s] at wget' % status), wget_es[status]
self.lock.acquire()
UNCOMPLETION.value += 1
DOWNLOAD_ERRORS.value += 1
self.lock.release()
else:
self.lock.acquire()
DOWNLOADS.value += 1
self.lock.release()
callback(filepath)
class TumblrAPI(object):
def _request(self, base_hostname, target, type, params):
api_url = '/'.join(['https://api.tumblr.com/v2/blog',
base_hostname, target, type])
params['api_key'] = API_KEY
while True:
try:
res = ss.get(api_url, params=params, timeout=10)
json_data = res.json()
break
except KeyboardInterrupt:
sys.exit()
except Exception as e:
NET_ERRORS.value += 1 # count errors
# print s % (1, 93, '[Error at requests]:'), e
time.sleep(5)
if json_data['meta']['msg'].lower() != 'ok':
raise Error(s % (1, 91, json_data['meta']['msg']))
return json_data['response']
def _info(self, base_hostname):
return self._request(base_hostname, 'info', '', None)
def _photo(self, base_hostname, offset='', tag='', post_id='', to_items=True):
def make_items(raw_data):
items = collections.deque()
for i in raw_data['posts']:
index = 1
if i.get('photos'):
for ii in i['photos']:
durl = ii['original_size']['url'].replace('http:', 'https:')
filename = os.path.join(
'%s_%s.%s' % (i['id'], index, durl.split('.')[-1]))
t = {
'durl': durl,
'filename': filename,
'key': i['timestamp'],
'subdir': 'photos',
}
index += 1
items.append(t)
return items
params = {
'offset': offset,
'before': offset if tag else '',
'tag': tag,
'id': post_id,
'limit': 20 if not tag and not post_id else '',
'filter': 'text'
}
raw_data = self._request(base_hostname, 'posts', 'photo', params)
if to_items:
return make_items(raw_data)
else:
return raw_data
def _audio(self, base_hostname, offset='', tag='', post_id='', to_items=True):
def make_items(raw_data):
items = collections.deque()
for i in raw_data['posts']:
durl = i['audio_url'].replace('http:', 'https:')
filename = os.path.join(
'%s_%s.%s' % (i['id'], i['track_name'], durl.split('.')[-1]))
t = {
'durl': durl,
'filename': filename,
'timestamp': i['timestamp'] if tag else '',
'subdir': 'audios'
}
items.append(t)
return items
params = {
'offset': offset,
'before': offset if tag else '',
'tag': tag,
'id': post_id,
'limit': 20 if not tag and not post_id else '',
'filter': 'text'
}
raw_data = self._request(base_hostname, 'posts', 'audio', params)
if to_items:
return make_items(raw_data)
else:
return raw_data
def _video(self, base_hostname, offset='', tag='', post_id='', to_items=True):
def make_items(raw_data):
items = collections.deque()
for i in raw_data['posts']:
if not i.get('video_url'):
continue
durl = i['video_url'].replace('http:', 'https:')
filename = os.path.join(
'%s.%s' % (i['id'], durl.split('.')[-1]))
t = {
'durl': durl,
'filename': filename,
'timestamp': i['timestamp'] if tag else '',
'subdir': 'videos'
}
items.append(t)
return items
params = {
'offset': offset,
'before': offset if tag else '',
'tag': tag,
'id': post_id,
'limit': 20 if not tag and not post_id else '',
'filter': 'text'
}
raw_data = self._request(base_hostname, 'posts', 'video', params)
if to_items:
return make_items(raw_data)
else:
return raw_data
class Tumblr(TumblrAPI):
def __init__(self, args, url):
self.args = args
self.offset = self.args.offset
self.make_items = self.parse_urls(url)
def save_json(self):
with open(self.json_path, 'w') as g:
g.write(json.dumps(
{'offset': self.offset}, indent=4, sort_keys=True))
def init_infos(self, base_hostname, target_type, tag=''):
self.infos = {'host': base_hostname}
if not tag:
dir_ = os.path.join(os.getcwd(), self.infos['host'])
json_path = os.path.join(dir_, 'json.json')
if not os.path.exists(dir_):
if not self.args.play:
os.makedirs(dir_)
else:
if os.path.exists(json_path):
self.offset = json.load(open(json_path))['offset'] - 60 \
if not self.args.update else self.args.offset
if self.offset < 0: self.offset = 0
else:
dir_ = os.path.join(os.getcwd(), 'tumblr-%s' % tag)
json_path = os.path.join(dir_, 'json.json')
if not os.path.exists(dir_):
if not self.args.play:
os.makedirs(dir_)
self.offset = int(time.time())
else:
if os.path.exists(json_path):
self.offset = json.load(open(json_path))['offset'] \
if not self.args.update else int(time.time())
self.infos['dir_'] = dir_
self.json_path = json_path
subdir = os.path.join(dir_, target_type)
if not os.path.exists(subdir) and not self.args.play:
os.makedirs(subdir)
if not self.args.play:
for fl in os.listdir(subdir):
if not fl.endswith('.tmp'):
COMPLETION.value += 1
else:
UNCOMPLETION.value += 1
if self.args.offset:
self.offset = self.args.offset
print s % (1, 92, '## begin:'), 'offset = %s,' % self.offset, base_hostname
print s % (1, 97, 'INFO:\n') + \
'D = Downloads, R = Repair_Need\n' + \
'C = Completion, NE = Net_Errors, O = Offset'
def download_photos_by_offset(self, base_hostname, post_id):
self.init_infos(base_hostname, 'photos')
def do():
items = self._photo(
base_hostname, offset=self.offset if not post_id else '', post_id=post_id)
if not items:
return []
self.offset += 20
self.save_json()
return items
return do
def download_photos_by_tag(self, base_hostname, tag):
self.init_infos(base_hostname, 'photos', tag=tag)
def do():
items = self._photo(base_hostname, tag=tag, before=self.offset)
if not items:
return []
self.offset = items[-1]['timestamp']
self.save_json()
return items
return do
def download_videos_by_offset(self, base_hostname, post_id):
self.init_infos(base_hostname, 'videos')
def do():
items = self._video(
base_hostname, offset=self.offset, post_id=post_id)
if not items:
return []
self.offset += 20
if not self.args.play:
self.save_json()
return items
return do
def download_videos_by_tag(self, base_hostname, tag):
self.init_infos(base_hostname, 'videos', tag)
def do():
items = self._video(
base_hostname, before=self.offset, tag=tag)
if not items:
return []
self.offset = items[-1]['timestamp']
if not self.args.play:
self.save_json()
return items
return do
def download_audios_by_offset(self, base_hostname, post_id):
self.init_infos(base_hostname, 'audios')
def do():
items = self._audio(
base_hostname, offset=self.offset if not post_id else '', post_id=post_id)
if not items:
return []
self.offset += 20
if not self.args.play:
self.save_json()
return items
return do
def download_audios_by_tag(self, base_hostname, tag):
self.init_infos(base_hostname, 'audios', tag)
def do():
items = self._audio(
base_hostname, before=self.offset, tag=tag)
if not self.infos['items']:
return []
self.offset = self.infos['items'][-1]['timestamp']
if not self.args.play:
self.save_json()
return items
return do
def download_photos(self, base_hostname, post_id='', tag=''):
if tag:
return self.download_photos_by_tag(base_hostname, tag)
else:
return self.download_photos_by_offset(base_hostname, post_id=post_id)
def download_videos(self, base_hostname, post_id='', tag=''):
if tag:
return self.download_videos_by_tag(base_hostname, tag)
else:
return self.download_videos_by_offset(base_hostname, post_id=post_id)
def download_audios(self, base_hostname, post_id='', tag=''):
if tag:
return self.download_audios_by_tag(base_hostname, tag)
else:
return self.download_audios_by_offset(base_hostname, post_id=post_id)
def fix_photos(self, base_hostname):
self.init_infos(base_hostname, 'photos')
t = os.listdir(os.path.join(self.infos['dir_'], 'photos'))
t = [i[:i.find('_')] for i in t if i.endswith('.tmp')]
self.post_ids = list(set(t))
def do():
if len(self.post_ids):
post_id = self.post_ids.pop()
return self._photo(base_hostname, post_id=post_id)
else:
return []
return do
def parse_urls(self, url):
_mod = re.search(r'(http://|https://|)(?P<hostname>.+\.tumblr.com)', url)
if not _mod:
print s % (1, 91, '[Error]:'), 'url is illegal.', '\n' + url.decode('utf8', 'ignore')
return lambda: []
base_hostname = _mod.group('hostname')
if self.args.check:
return self.fix_photos(base_hostname)
if re.search(r'post/(\d+)', url):
post_id = re.search(r'post/(\d+)', url).group(1)
else:
post_id = ''
if self.args.video:
return self.download_videos(base_hostname, post_id=post_id, tag=self.args.tag)
elif self.args.audio:
return self.download_audios(base_hostname, post_id=post_id, tag=self.args.tag)
else:
return self.download_photos(base_hostname, post_id=post_id, tag=self.args.tag)
def get_item_generator(self):
OFFSET.value = self.offset
items = self.make_items()
for item in items:
item['dir_'] = self.infos['dir_']
return items
def args_handler(argv):
p = argparse.ArgumentParser(
description='download from tumblr.com')
p.add_argument('xxx', type=str, nargs='*', help='命令对象.')
p.add_argument('-p', '--processes', action='store', type=int, default=10,
help='指定多进程数,默认为10个,最多为20个 eg: -p 20')
p.add_argument('-f', '--offset', action='store', type=int, default=0,
help='offset')
p.add_argument('-q', '--quiet', action='store_true',
help='quiet')
p.add_argument('-c', '--check', action='store_true',
help='尝试修复未下载成功的图片')
p.add_argument('-P', '--play', action='store_true',
help='play with mpv')
p.add_argument('-V', '--video', action='store_true',
help='download videos')
p.add_argument('-A', '--audio', action='store_true',
help='download audios')
p.add_argument('-t', '--tag', action='store',
default=None, type=str,
help='下载特定tag的图片, eg: -t beautiful')
p.add_argument('--update', action='store_true',
help='update new things')
p.add_argument('--redownload', action='store_true',
help='redownload all things')
args = p.parse_args(argv[1:])
xxx = args.xxx
if args.redownload: args.update = True
return args, xxx
def print_msg(check):
time.sleep(2) # initial interval
while True:
msg = "\r%s, %s, %s, %s, %s " % \
(
'D: ' + s % (1, 92, DOWNLOADS.value),
'R: ' + s % (1, 93, UNCOMPLETION.value \
if not check \
else UNCOMPLETION.value - DOWNLOAD_ERRORS.value - DOWNLOADS.value),
'C: ' + s % (1, 97, COMPLETION.value + DOWNLOADS.value),
'NE: ' + s % (1, 91, NET_ERRORS.value),
'O: %s' % OFFSET.value
)
sys.stdout.write(msg)
sys.stdout.flush()
time.sleep(2)
def sighandler(signum, frame):
# print s % (1, 91, "\n !! Signal:"), signum
# print s % (1, 91, " !! Frame: %s" % frame)
sys.exit()
def handle_signal():
signal.signal(signal.SIGBUS, sighandler)
signal.signal(signal.SIGHUP, sighandler)
# http://stackoverflow.com/questions/14207708/ioerror-errno-32-broken-pipe-python
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
signal.signal(signal.SIGQUIT, sighandler)
signal.signal(signal.SIGSYS, sighandler)
signal.signal(signal.SIGABRT, sighandler)
signal.signal(signal.SIGFPE, sighandler)
signal.signal(signal.SIGILL, sighandler)
signal.signal(signal.SIGINT, sighandler)
signal.signal(signal.SIGSEGV, sighandler)
signal.signal(signal.SIGTERM, sighandler)
def main(argv):
handle_signal()
args, xxx = args_handler(argv)
if args.play:
play(xxx, args)
lock = multiprocessing.Lock()
queue = multiprocessing.JoinableQueue(maxsize=args.processes)
thrs = []
for i in range(args.processes):
thr = Downloader(queue, lock)
thr.start()
thrs.append(thr)
# massage thread
msg_thr = multiprocessing.Process(target=print_msg, args=(args.check,))
msg_thr.daemon = True
msg_thr.start()
for url in xxx:
reset_statistic_params()
tumblr = Tumblr(args, url)
not_add = 0
while True:
items = tumblr.get_item_generator()
if not items:
break
# Check the downloaded items.
# It will be exited, if there is no new item to download
# in 5 loops, unless with --redownload
remove_downloaded_items(items)
if not args.redownload:
if not items:
not_add += 1
if not_add > 5:
print s % (1, 93, '\n[Warning]:'), \
'There is nothing new to download in 5 loops.\n', \
'If you want to scan all resources, using --redownload\n' \
'or running the script again to next 5 loops.'
break
continue
else:
not_add = 0
for item in items:
queue.put(item)
while not queue.empty():
time.sleep(2)
for i in range(args.processes):
queue.put(None)
queue.join()
for thr in thrs:
thr.join()
msg_thr.terminate()
if __name__ == '__main__':
argv = sys.argv
main(argv)
|
TAsyncioServerTest.py | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import functools
import socket
import threading
import unittest
from contextlib import contextmanager
from unittest.mock import Mock
from thrift.protocol.THeaderProtocol import THeaderProtocol
from thrift.server.TAsyncioServer import (
ThriftAsyncServerFactory,
ThriftClientProtocolFactory,
ThriftHeaderClientProtocol,
ThriftHeaderServerProtocol,
)
from thrift.Thrift import TApplicationException
from thrift.transport.TTransport import TTransportException
from thrift.util.asyncio import create_client
from thrift_asyncio.sleep import Sleep
from thrift_asyncio.tutorial import Calculator
from .handler import (
AsyncCalculatorHandler,
AsyncSleepHandler,
)
def server_loop_runner(loop, sock, handler, protocol_factory=None):
return loop.run_until_complete(
ThriftAsyncServerFactory(
handler,
port=None,
loop=loop,
sock=sock,
protocol_factory=protocol_factory,
),
)
async def test_server_with_client(sock, loop, factory=ThriftClientProtocolFactory):
port = sock.getsockname()[1]
(transport, protocol) = await loop.create_connection(
factory(Calculator.Client, loop=loop),
host="localhost",
port=port,
)
client = protocol.client
add_result = await asyncio.wait_for(
client.add(1, 2),
timeout=None,
loop=loop,
)
transport.close()
protocol.close()
return add_result
async def test_echo_timeout(sock, loop, factory=ThriftClientProtocolFactory):
port = sock.getsockname()[1]
(transport, protocol) = await loop.create_connection(
factory(Sleep.Client, loop=loop, timeouts={"echo": 1}),
host="localhost",
port=port,
)
client = protocol.client
# Ask the server to delay for 30 seconds.
# However, we told the client factory above to use a 1 second timeout
# for the echo() function.
await asyncio.wait_for(
client.echo("test", 30),
timeout=None,
loop=loop,
)
transport.close()
protocol.close()
async def test_overflow(sock, value, loop, factory=ThriftClientProtocolFactory):
port = sock.getsockname()[1]
(transport, protocol) = await loop.create_connection(
factory(Sleep.Client, loop=loop, timeouts={"echo": 1}),
host="localhost",
port=port,
)
client = protocol.client
await asyncio.wait_for(
client.overflow(value),
timeout=None,
loop=loop,
)
transport.close()
protocol.close()
class TestTHeaderProtocol(THeaderProtocol):
def __init__(self, probe, *args, **kwargs):
THeaderProtocol.__init__(self, *args, **kwargs)
self.probe = probe
def readMessageBegin(self):
self.probe.touch()
return THeaderProtocol.readMessageBegin(self)
class TestTHeaderProtocolFactory(object):
def __init__(self, probe, *args, **kwargs):
self.probe = probe
self.args = args
self.kwargs = kwargs
def getProtocol(self, trans):
return TestTHeaderProtocol(
self.probe,
trans,
*self.args,
**self.kwargs,
)
class TestThriftClientProtocol(ThriftHeaderClientProtocol):
THEADER_PROTOCOL_FACTORY = None
def __init__(self, probe, *args, **kwargs):
ThriftHeaderClientProtocol.__init__(self, *args, **kwargs)
def factory(*args, **kwargs):
return TestTHeaderProtocolFactory(probe, *args, **kwargs)
self.THEADER_PROTOCOL_FACTORY = factory
class TAsyncioServerTest(unittest.TestCase):
def test_THEADER_PROTOCOL_FACTORY_readMessageBegin(self):
loop = asyncio.get_event_loop()
loop.set_debug(True)
sock = socket.socket()
server_loop_runner(loop, sock, AsyncCalculatorHandler())
class Probe(object):
def __init__(self):
self.touched = False
def touch(self):
self.touched = True
probe = Probe()
def factory(*args, **kwargs):
return functools.partial(
TestThriftClientProtocol,
probe,
*args,
**kwargs,
)
add_result = loop.run_until_complete(
test_server_with_client(
sock,
loop,
factory=factory,
)
)
self.assertTrue(probe.touched)
self.assertEqual(42, add_result)
def test_read_error(self):
"""Test the behavior if readMessageBegin() throws an exception"""
loop = asyncio.get_event_loop()
loop.set_debug(True)
sock = socket.socket()
server_loop_runner(loop, sock, AsyncCalculatorHandler())
# A helper Probe class that will raise an exception when
# it is invoked by readMessageBegin()
class Probe(object):
def touch(self):
raise TTransportException(
TTransportException.INVALID_TRANSFORM, "oh noes"
)
probe = Probe()
def factory(*args, **kwargs):
return functools.partial(
TestThriftClientProtocol,
probe,
*args,
**kwargs,
)
try:
add_result = loop.run_until_complete(
test_server_with_client(
sock,
loop,
factory=factory,
)
)
self.fail(
"expected client method to throw; instead returned %r" % (add_result,)
)
except TTransportException as ex:
self.assertEqual(str(ex), "oh noes")
self.assertEqual(ex.type, TTransportException.INVALID_TRANSFORM)
def _test_using_event_loop(self, loop):
sock = socket.socket()
server_loop_runner(loop, sock, AsyncCalculatorHandler())
add_result = loop.run_until_complete(test_server_with_client(sock, loop))
self.assertEqual(42, add_result)
def test_default_event_loop(self):
loop = asyncio.get_event_loop()
loop.set_debug(True)
self._test_using_event_loop(loop)
def test_custom_event_loop(self):
loop = asyncio.new_event_loop()
loop.set_debug(True)
self.assertIsNot(loop, asyncio.get_event_loop())
self._test_using_event_loop(loop)
def _start_server_thread(self, server, loop):
def _run(server, loop):
loop.run_until_complete(server.wait_closed())
t = threading.Thread(target=functools.partial(_run, server, loop))
t.start()
return t
def test_server_in_separate_thread(self):
sock = socket.socket()
server_loop = asyncio.new_event_loop()
server_loop.set_debug(True)
server = server_loop_runner(server_loop, sock, AsyncCalculatorHandler())
server_thread = self._start_server_thread(server, server_loop)
client_loop = asyncio.new_event_loop()
client_loop.set_debug(True)
add_result = client_loop.run_until_complete(
test_server_with_client(sock, client_loop),
)
self.assertEqual(42, add_result)
server_loop.call_soon_threadsafe(server.close)
server_thread.join()
async def _make_out_of_order_calls(self, sock, loop):
port = sock.getsockname()[1]
client_manager = await create_client(
Sleep.Client,
host="localhost",
port=port,
loop=loop,
)
with client_manager as client:
futures = [client.echo(str(delay), delay * 0.1) for delay in [3, 2, 1]]
results_in_arrival_order = []
for f in asyncio.as_completed(futures, loop=loop):
result = await f
results_in_arrival_order.append(result)
self.assertEquals(["1", "2", "3"], results_in_arrival_order)
@contextmanager
def server_in_background_thread(self, sock):
server_loop = asyncio.new_event_loop()
server_loop.set_debug(True)
handler = AsyncSleepHandler(server_loop)
server = server_loop_runner(server_loop, sock, handler)
server_thread = self._start_server_thread(server, server_loop)
try:
yield server
finally:
server_loop.call_soon_threadsafe(server.close)
server_thread.join()
def test_out_of_order_calls(self):
sock = socket.socket()
with self.server_in_background_thread(sock):
client_loop = asyncio.new_event_loop()
client_loop.set_debug(True)
client_loop.run_until_complete(
self._make_out_of_order_calls(sock, client_loop),
)
async def _assert_transport_is_closed_on_error(self, sock, loop):
port = sock.getsockname()[1]
client_manager = await create_client(
Sleep.Client,
host="localhost",
port=port,
loop=loop,
)
try:
with client_manager as client:
raise Exception("expected exception from test")
except Exception:
self.assertFalse(client._oprot.trans.isOpen())
def test_close_client_on_error(self):
sock = socket.socket()
with self.server_in_background_thread(sock):
loop = asyncio.new_event_loop()
loop.set_debug(True)
loop.run_until_complete(
self._assert_transport_is_closed_on_error(sock, loop),
)
def test_overflow_failure(self):
loop = asyncio.get_event_loop()
loop.set_debug(True)
sock = socket.socket()
server_loop_runner(loop, sock, AsyncSleepHandler(loop))
with self.assertRaises(TTransportException, msg="Connection closed"):
# This will raise an exception on the server. The
# OverflowResult.value is byte and 0xffff will result in exception
#
# struct.error('byte format requires -128 <= number <= 127',)
loop.run_until_complete(test_overflow(sock, 0xFFFF, loop))
def test_overflow_success(self):
loop = asyncio.get_event_loop()
loop.set_debug(True)
sock = socket.socket()
server_loop_runner(loop, sock, AsyncSleepHandler(loop))
# This shouldn't raise any exceptions
loop.run_until_complete(test_overflow(sock, 0x7F, loop))
def test_timeout(self):
loop = asyncio.get_event_loop()
loop.set_debug(True)
sock = socket.socket()
server_loop_runner(loop, sock, AsyncSleepHandler(loop))
with self.assertRaisesRegex(TApplicationException, "Call to echo timed out"):
loop.run_until_complete(test_echo_timeout(sock, loop))
def test_custom_protocol_factory(self):
loop = asyncio.get_event_loop()
sock = socket.socket()
wrapper_protocol = None
def wrapper_protocol_factory(*args, **kwargs):
nonlocal wrapper_protocol
wrapper_protocol = Mock(wraps=ThriftHeaderServerProtocol(*args, **kwargs))
return wrapper_protocol
server_loop_runner(
loop, sock, AsyncCalculatorHandler(), wrapper_protocol_factory
)
add_result = loop.run_until_complete(test_server_with_client(sock, loop))
self.assertEqual(42, add_result)
wrapper_protocol.connection_made.assert_called_once()
wrapper_protocol.data_received.assert_called()
|
master.py | """
A instagram user for posting medias.
"""
import os
import shutil
import settings
from instagram.base import BaseInstagram, MediaTypes
from common.tools import LockDir
from common.logger import logger
class MasterInstagram(BaseInstagram):
"Master instagram user"
def _start(self):
# threading.Thread(target=self._start_cleaner).start()
self._start_sharing()
def _start_cleaner(self):
pass
def _start_sharing(self):
os.makedirs(settings.DOWNLOADS, exist_ok=True)
os.makedirs(settings.SHARED, exist_ok=True)
logger.info("Listening the downloads folder")
while self.is_active:
logger.debug("Checking downloads folder")
for downloads in os.listdir(settings.DOWNLOADS):
downloads = os.path.join(settings.DOWNLOADS, downloads)
self.share_from_folder(downloads)
logger.debug("Downloads folder check is done.")
self._wait_with_log("LISTENER_WAIT_TIME")
def share_from_folder(self, downloads):
"Shares file in the given folder"
os.chdir(downloads)
logger.info("New folder %s found.", downloads)
with LockDir(downloads, wait_until_release=True):
to_shared = [file for file in os.listdir(".")
if MediaTypes.is_known_extension(file)]
self.share(to_shared)
os.chdir(settings.BASE_DIR)
self.move_to_shared(downloads)
def share(self, share_list):
"Shares the given files"
if len(share_list) == 1:
self._share_single(share_list[0])
elif share_list:
self._share_carousel(share_list)
else:
# Empty folder
return
def _share_single(self, filename):
media_type = MediaTypes.get_media_type(filename, ignore_error=True)
if media_type == MediaTypes.PHOTO:
self.uploadPhoto(filename)
elif media_type == MediaTypes.VIDEO:
self.upload_video(filename, settings.DEFAULT_THUMBNAIL)
else:
logger.error("Unkown media type: %s", filename)
def _share_carousel(self, carousel_media):
album = []
for media in carousel_media:
if MediaTypes.is_type_of(media, MediaTypes.PHOTO):
type_ = "photo"
else:
type_ = "video"
album.append({
"file": media,
"type": type_
})
self.uploadAlbum(album)
@staticmethod
def move_to_shared(path):
"Moves the downloaded file under shared folder and renames"
basename = os.path.basename(path)
basename = basename.replace("downloaded", "shared")
shutil.move(path, os.path.join(settings.SHARED, basename))
|
threading.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, print_function, absolute_import,
division)
import sys
import threading
import time
import Queue
from time import sleep
from itertools import izip_longest
import logging
from qalib.qabase.formatters import human_readable_time_from_seconds as hrts
class Parallel(object):
"""
A helper class that makes it simpler to run tasks multiple times
in parallel. If you have multiple tasks you want to run in parallel
you need to encapsulate them in a single function that accepts a variety
of arguments.
"""
def __init__(self, funcs, args_list=None, kwargs_list=None, max_workers=5,
timeout=3600, run_over_exceptions=False,
i_know_what_im_doing=False, output_interval=None):
"""
:param funcs: A list of functions to be used by the workers
:param args_list: A list of tuples of arguments required by each
function in `funcs`
:param kwargs_list: A list of dictionaries of kwargs accepted
by each function in `funcs`
:param max_workers: The maximum number of simultaneous threads
:param run_over_exceptions: Will ignore given exceptions and just keep
running threads. Don't use this. If you do use it, I will not be
responsible for the monster/monsters you create.
:param output_interval: Int,
the interval at which status will be output.
"""
self.logger = logging.getLogger(__name__)
if not self.logger.handlers:
self.logger.addHandler(logging.NullHandler())
self.funcs = funcs
self.args_list = args_list if args_list else []
self.kwargs_list = kwargs_list if kwargs_list else []
self.max_workers = max_workers
self.queue = Queue.Queue()
self.exceptions = Queue.Queue()
self.threads = []
self.timeout = timeout
self.keep_running = True
self.run_over_exceptions = run_over_exceptions and i_know_what_im_doing
self.output_interval = output_interval
@staticmethod
def _set_current_thread_name_from_func_name(func):
""" Renames the current thread to reflect the name of func """
orig_thread_number = threading.current_thread().name.split('-')[-1]
threading.current_thread().name = "Parallel-" + \
func.__module__ + '.' + func.__name__ + "-" + orig_thread_number
def _wrapped(self):
threading.current_thread().name = "Parallel-Worker-" + \
threading.current_thread().name
while self.keep_running:
try:
func, args, kwargs = self.queue.get(block=False)
except Queue.Empty:
break
self.logger.debug(
"Running {} with args: {} and kwargs {} with thread {}".format(
func, args, kwargs, threading.current_thread()))
try:
# Rename this thread to reflect the function we're running
orig_name = threading.current_thread().name
self._set_current_thread_name_from_func_name(func)
# Call the function:
func(*args, **kwargs)
# Reset this thread name to its original (e.g. "Thread-9")
threading.current_thread().name = orig_name
except Exception:
self.keep_running = self.run_over_exceptions
self.logger.exception("Exception occurred in thread {}".format(
threading.current_thread()))
self.exceptions.put(sys.exc_info())
self.queue.task_done()
def run_threads(self):
"""
Call this function to start the worker threads. They will continue
running until all args/kwargs are consumed. This is a blocking call.
"""
try:
for func, args, kwargs in izip_longest(
self.funcs, self.args_list, self.kwargs_list,
fillvalue={}):
# Flag a common (and confusing) user error:
if isinstance(args, str) or isinstance(args, unicode):
msg = "args_list must be list of lists not list of strings"
raise ValueError(msg)
self.queue.put((func, args, kwargs))
for _ in xrange(self.max_workers):
thread = threading.Thread(target=self._wrapped)
thread.setDaemon(True)
thread.start()
self.threads.append(thread)
if (len(self.funcs) < len(self.args_list) or
len(self.funcs) < len(self.kwargs_list)):
raise ValueError(
"List of functions passed into a Parallel object must "
"be longer or equal in length to the list of args "
"and/or kwargs passed to the object. {}, {}, {"
"}".format(self.funcs, self.args_list, self.kwargs_list))
start_time = time.time()
last_output_time = start_time
while self.queue.unfinished_tasks:
# Check if exception has been generated by a thread and raise
# if found one is found
try:
exc = self.exceptions.get(block=False)
self.keep_running = self.run_over_exceptions
raise exc[0], exc[1], exc[2]
except Queue.Empty:
pass
if self.output_interval is not None:
if time.time() - last_output_time > self.output_interval:
msg = ("After {} {} functions pending execution of"
" {} total".format(
hrts(time.time() - start_time),
self.queue.qsize(),
len(self.funcs)
))
self.logger.info(msg)
last_output_time = time.time()
sleep(0.2)
# Ensure all threads will exit regardless of the current
# state of the main thread
finally:
try:
exc = self.exceptions.get(block=False)
self.keep_running = self.run_over_exceptions
# Join all threads to ensure we don't continue
# without all threads stopping
for thread in self.threads:
thread.join(self.timeout)
raise exc[0], exc[1], exc[2]
except Queue.Empty:
pass
|
web.py | import click
import tensorflow as tf
from flask import Flask, jsonify, request, render_template
from threading import Thread
from PIL import Image
from six.moves import _thread
from luminoth.tools.checkpoint import get_checkpoint_config
from luminoth.utils.config import get_config, override_config_params
from luminoth.utils.predicting import PredictorNetwork
app = Flask(__name__)
def get_image():
image = request.files.get('image')
if not image:
raise ValueError
image = Image.open(image.stream).convert('RGB')
return image
@app.route('/')
def index():
return render_template('index.html')
@app.route('/api/<model_name>/predict/', methods=['GET', 'POST'])
def predict(model_name):
if request.method == 'GET':
return jsonify(error='Use POST method to send image.'), 400
try:
image_array = get_image()
except ValueError:
return jsonify(error='Missing image'), 400
except OSError:
return jsonify(error='Incompatible file type'), 400
total_predictions = request.args.get('total')
if total_predictions is not None:
try:
total_predictions = int(total_predictions)
except ValueError:
total_predictions = None
# Wait for the model to finish loading.
NETWORK_START_THREAD.join()
objects = PREDICTOR_NETWORK.predict_image(image_array)
objects = objects[:total_predictions]
return jsonify({'objects': objects})
def start_network(config):
global PREDICTOR_NETWORK
try:
PREDICTOR_NETWORK = PredictorNetwork(config)
except Exception as e:
# An error occurred loading the model; interrupt the whole server.
tf.logging.error(e)
_thread.interrupt_main()
@click.command(help='Start basic web application.')
@click.option('config_files', '--config', '-c', multiple=True, help='Config to use.') # noqa
@click.option('--checkpoint', help='Checkpoint to use.')
@click.option('override_params', '--override', '-o', multiple=True, help='Override model config params.') # noqa
@click.option('--host', default='127.0.0.1', help='Hostname to listen on. Set this to "0.0.0.0" to have the server available externally.') # noqa
@click.option('--port', default=5000, help='Port to listen to.')
@click.option('--debug', is_flag=True, help='Set debug level logging.')
def web(config_files, checkpoint, override_params, host, port, debug):
if debug:
tf.logging.set_verbosity(tf.logging.DEBUG)
else:
tf.logging.set_verbosity(tf.logging.INFO)
if checkpoint:
config = get_checkpoint_config(checkpoint)
elif config_files:
config = get_config(config_files)
else:
click.echo(
'Neither checkpoint not config specified, assuming `accurate`.'
)
config = get_checkpoint_config('accurate')
if override_params:
config = override_config_params(config, override_params)
# Bounding boxes will be filtered by frontend (using slider), so we set a
# low threshold.
if config.model.type == 'fasterrcnn':
config.model.rcnn.proposals.min_prob_threshold = 0.01
elif config.model.type == 'ssd':
config.model.proposals.min_prob_threshold = 0.01
else:
raise ValueError(
"Model type '{}' not supported".format(config.model.type)
)
# Initialize model
global NETWORK_START_THREAD
NETWORK_START_THREAD = Thread(target=start_network, args=(config,))
NETWORK_START_THREAD.start()
app.run(host=host, port=port, debug=debug)
|
measurePerformance.py | # ########################################################################
# Copyright 2013 Advanced Micro Devices, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ########################################################################
import sys
import argparse
import subprocess
import itertools
import re#gex
import os
from threading import Timer, Thread
import thread, time
from platform import system
from datetime import datetime
import errorHandler
from fftPerformanceTesting import *
from performanceUtility import timeout, log, generate235Radices
IAM = 'FFT'
TIMOUT_VAL = 900 #In seconds
devicevalues = ['gpu', 'cpu']
layoutvalues = ['cp', 'ci']
placevalues = ['in', 'out']
precisionvalues = ['single', 'double']
libraryvalues = ['clFFT']
pow10 = '1-9,10-90:10,100-900:100,1000-9000:1000,10000-90000:10000,100000-900000:100000,1000000-9000000:1000000'
parser = argparse.ArgumentParser(description='Measure performance of the clFFT library')
parser.add_argument('--device',
dest='device', default='gpu',
help='device(s) to run on; may be a comma-delimited list. choices are ' + str(devicevalues) + '. (default gpu)')
parser.add_argument('-b', '--batchsize',
dest='batchSize', default='1',
help='number of FFTs to perform with one invocation of the client. the special value \'max\' may be used to adjust the batch size on a per-transform basis to the maximum problem size possible on the device. may be a range or a comma-delimited list. if a range is entered, you may follow it with \':X\', where X is the stepping of the range (if omitted, it defaults to a stepping of 1). e.g., 1-15 or 12,18 or 7,10-30:10,1050-1054. the special value \'pow10\' expands to \'{}\'. Note that \'max\' and \'pow10\' may not be used in a list; they must be used by themselves; max may only be used with --library clFFT. (default 1)'.format(pow10))
parser.add_argument('-a', '--adaptivemax',
dest='constProbSize', default='-1',
help='Max problem size that you want to maintain across the invocations of client with different lengths. This is adaptive and adjusts itself automtically.'.format(pow10))
parser.add_argument('-x', '--lengthx',
dest='lengthx', default='1',
help='length(s) of x to test; must be factors of 1, 2, 3, or 5 with clFFT; may be a range or a comma-delimited list. e.g., 16-128 or 1200 or 16,2048-32768 (default 1)')
parser.add_argument('-y', '--lengthy',
dest='lengthy', default='1',
help='length(s) of y to test; must be factors of 1, 2, 3, or 5 with clFFT; may be a range or a comma-delimited list. e.g., 16-128 or 1200 or 16,32768 (default 1)')
parser.add_argument('-z', '--lengthz',
dest='lengthz', default='1',
help='length(s) of z to test; must be factors of 1, 2, 3, or 5 with clFFT; may be a range or a comma-delimited list. e.g., 16-128 or 1200 or 16,32768 (default 1)')
parser.add_argument('--problemsize',
dest='problemsize', default=None,
help='additional problems of a set size. may be used in addition to lengthx/y/z. each indicated problem size will be added to the list of FFTs to perform. should be entered in AxBxC:D format. A, B, and C indicate the sizes of the X, Y, and Z dimensions (respectively). D is the batch size. All values except the length of X are optional. may enter multiple in a comma-delimited list. e.g., 2x2x2:32768 or 256x256:100,512x512:256')
parser.add_argument('-i', '--inputlayout',
dest='inputlayout', default='ci',
help='may enter multiple in a comma-delimited list. choices are ' + str(layoutvalues) + '. ci = complex interleaved, cp = complex planar (default ci)')
parser.add_argument('-o', '--outputlayout',
dest='outputlayout', default='ci',
help='may enter multiple in a comma-delimited list. choices are ' + str(layoutvalues) + '. ci = complex interleaved, cp = complex planar (default ci)')
parser.add_argument('-p', '--placeness',
dest='placeness', default='in',
help='may enter multiple in a comma-delimited list. choices are ' + str(placevalues) + '. in = in place, out = out of place (default in)')
parser.add_argument('-r', '--precision',
dest='precision', default='single',
help='may enter multiple in a comma-delimited list. choices are ' + str(precisionvalues) + '. (default single)')
parser.add_argument('--library',
dest='library', default='clFFT', choices=libraryvalues,
help='indicates the library to use for testing on this run')
parser.add_argument('--label',
dest='label', default=None,
help='a label to be associated with all transforms performed in this run. if LABEL includes any spaces, it must be in \"double quotes\". note that the label is not saved to an .ini file. e.g., --label cayman may indicate that a test was performed on a cayman card or --label \"Windows 32\" may indicate that the test was performed on Windows 32')
parser.add_argument('--createini',
dest='createIniFilename', default=None,
help='create an .ini file with the given name that saves the other parameters given at the command line, then quit. e.g., \'measureperformance.py -x 2048 --createini my_favorite_setup.ini\' will create an .ini file that will save the configuration for a 2048-datapoint 1D FFT.')
parser.add_argument('--ini',
dest='iniFilename', default=None,
help='use the parameters in the named .ini file instead of the command line parameters.')
parser.add_argument('--tablefile',
dest='tableOutputFilename', default=None,
help='save the results to a plaintext table with the file name indicated. this can be used with plotPerformance.py to generate graphs of the data (default: table prints to screen)')
args = parser.parse_args()
label = str(args.label)
subprocess.call('mkdir perfLog', shell = True)
logfile = os.path.join('perfLog', (label+'-'+'fftMeasurePerfLog.txt'))
def printLog(txt):
print txt
log(logfile, txt)
printLog("=========================MEASURE PERFORMANCE START===========================")
printLog("Process id of Measure Performance:"+str(os.getpid()))
currCommandProcess = None
printLog('Executing measure performance for label: '+str(label))
#This function is defunct now
@timeout(1, "fileName") # timeout is 5 minutes, 5*60 = 300 secs
def checkTimeOutPut2(args):
global currCommandProcess
#ret = subprocess.check_output(args, stderr=subprocess.STDOUT)
#return ret
currCommandProcess = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
printLog("Curr Command Process id = "+str(currCommandProcess.pid))
ret = currCommandProcess.communicate()
if(ret[0] == None or ret[0] == ''):
errCode = currCommandProcess.poll()
raise subprocess.CalledProcessError(errCode, args, output=ret[1])
return ret[0]
#Spawns a separate thread to execute the library command and wait for that thread to complete
#This wait is of 900 seconds (15 minutes). If still the thread is alive then we kill the thread
def checkTimeOutPut(args):
t = None
global currCommandProcess
global stde
global stdo
stde = None
stdo = None
def executeCommand():
global currCommandProcess
global stdo
global stde
try:
stdo, stde = currCommandProcess.communicate()
printLog('stdout:\n'+str(stdo))
printLog('stderr:\n'+str(stde))
except:
printLog("ERROR: UNKNOWN Exception - +checkWinTimeOutPut()::executeCommand()")
currCommandProcess = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
thread = Thread(target=executeCommand)
thread.start()
thread.join(TIMOUT_VAL) #wait for the thread to complete
if thread.is_alive():
printLog('ERROR: Killing the process - terminating thread because it is taking too much of time to execute')
currCommandProcess.kill()
printLog('ERROR: Timed out exception')
raise errorHandler.ApplicationException(__file__, errorHandler.TIME_OUT)
if stdo == "" or stdo==None:
errCode = currCommandProcess.poll()
printLog('ERROR: @@@@@Raising Called processor exception')
raise subprocess.CalledProcessError(errCode, args, output=stde)
return stdo
# don't try to create and use an .ini file at the same time (it will open a portal through which demons will emerge)
if args.iniFilename and args.createIniFilename:
printLog('ERROR: --ini and --createini are mutually exclusive. Please choose only one.')
quit()
#read in .ini parameters if --ini is used
if args.iniFilename != None:
if not os.path.isfile(args.iniFilename):
printLog("No file with the name \'{}\' exists. Please indicate another filename.".format(args.iniFilename))
quit()
ini = open(args.iniFilename, 'r')
iniContents = ini.read()
iniContents = iniContents.split(';')
for i in range(0,len(iniContents)):
line = iniContents.pop()
line = line.partition(' ')
parameter = line[0]
value = line[2]
value = value.replace('\'','').replace('[','').replace(']','').replace(' ','')
if parameter == 'batchSize':
args.batchSize = value
elif parameter == 'constProbSize':
args.constProbSize = value
elif parameter == 'lengthx':
args.lengthx = value
elif parameter == 'lengthy':
args.lengthy = value
elif parameter == 'lengthz':
args.lengthz = value
elif parameter == 'problemsize':
args.problemsize = value
elif parameter == 'device':
args.device = value
elif parameter == 'inputlayout':
args.inputlayout = value
elif parameter == 'outputlayout':
args.outputlayout = value
elif parameter == 'placeness':
args.placeness = value
elif parameter == 'precision':
args.precision = value
else:
printLog('{} corrupted. Please re-create a .ini file with the --createini flag.'.format(args.iniFilename))
quit()
#create ini file if requested
if args.createIniFilename != None:
printLog('Creating Ini files')
if os.path.isfile(args.createIniFilename):
printLog('A file with the name \'{}\' already exists. Please delete the file or choose another name.'.format(args.createIniFilename))
quit()
printLog('Creating Ini file:'+args.createIniFilename+'\n')
ini = open(args.createIniFilename, 'w')
ini.write('batchSize {} ;'.format(args.batchSize))
ini.write('constProbSize {} ;'.format(args.constProbSize))
ini.write('lengthx {} ;'.format(args.lengthx))
ini.write('lengthy {} ;'.format(args.lengthy))
ini.write('lengthz {} ;'.format(args.lengthz))
ini.write('problemsize {} ;'.format(args.problemsize))
ini.write('device {} ;'.format(args.device))
ini.write('inputlayout {} ;'.format(args.inputlayout))
ini.write('outputlayout {} ;'.format(args.outputlayout))
ini.write('placeness {} ;'.format(args.placeness))
ini.write('precision {} ;'.format(args.precision))
printLog('Created Ini file:'+args.createIniFilename+'\n')
printLog("=========================MEASURE PERFORMANCE START===========================\n")
quit()
#turn pow10 into its range list
if args.batchSize.count('pow10'):
args.batchSize = pow10
#split up comma-delimited lists
args.batchSize = args.batchSize.split(',')
args.constProbSize = int(args.constProbSize.split(',')[0])
args.device = args.device.split(',')
args.lengthx = args.lengthx.split(',')
args.lengthy = args.lengthy.split(',')
args.lengthz = args.lengthz.split(',')
if args.problemsize:
args.problemsize = args.problemsize.split(',')
args.inputlayout = args.inputlayout.split(',')
args.outputlayout = args.outputlayout.split(',')
args.placeness = args.placeness.split(',')
args.precision = args.precision.split(',')
printLog('Executing for label: '+str(args.label))
#check parameters for sanity
# batchSize of 'max' must not be in a list (does not get on well with others)
#if args.batchSize.count('max') and len(args.batchSize) > 1:
if ( args.batchSize.count('max') or args.batchSize.count('adapt') )and len(args.batchSize) > 1:
printLog('ERROR: --batchsize max must not be in a comma delimited list')
quit()
# in case of an in-place transform, input and output layouts must be the same (otherwise: *boom*)
for n in args.placeness:
if n == 'in' or n == 'inplace':
if len(args.inputlayout) > 1 or len(args.outputlayout) > 1 or args.inputlayout[0] != args.outputlayout[0]:
printLog('ERROR: if transformation is in-place, input and output layouts must match')
quit()
# check for valid values in precision
for n in args.precision:
if n != 'single' and n != 'double':
printLog('ERROR: invalid value for precision')
quit()
def isPrime(n):
import math
n = abs(n)
i = 2
while i <= math.sqrt(n):
if n%i == 0:
return False
i += 1
return True
def findFactors(number):
iter_space = range(1, number+1)
prime_factor_list = []
for curr_iter in iter_space:
if isPrime(curr_iter) == True:
#print 'curr_iter_prime: ', curr_iter
if number%curr_iter == 0:
prime_factor_list.append(curr_iter)
return prime_factor_list
#Type : Function
#Input: num, a number which we need to factorize
#Return Type: list
#Details: This function returns only the prime factors on an input number
# e.g: input: 20, returns: [2,2,5]
# input: 32, returns: [2,2,2,2,2]
def factor(num):
if num == 1:
return [1]
i = 2
limit = num**0.5
while i <= limit:
if num % i == 0:
ret = factor(num/i)
ret.append(i)
return ret
i += 1
return [num]
def validateFactors(flist):
ref_list = [1,2,3,5]
if flist==ref_list:
return True
if len(flist) > len(ref_list):
return False
for felement in flist:
if ref_list.count(felement) != 1:
return False
return True
#Type : Function
#Input: num, a number which we need to validate for 1,2,3 or 5 factors
#Return Type: boolean
#Details: This function validates an input number for its prime factors
# If factors has number other than 1,2,3 or 5 then return false else return true
# e.g: input: 20, returns: True
# input: 28, returns: False
def validate_number_for_1235(num):
if num == 0:
return True
set1235 = set([1,2,3,5])
setPrimeFactors = set(factor(num))
setPrimeFactors = setPrimeFactors | set1235 #performed union of two sets
#if still the sets are same then we are done!!!
#else we got few factors other than 1,2,3 or 5 and we should invalidate
#the input number
if setPrimeFactors == set1235:
return True
return False
def getValidNumbersInRange(rlist):
valid_number_list = []
for relement in rlist:
prime_factors = findFactors(relement)
if validateFactors(prime_factors) == True:
valid_number_list.append(relement)
return valid_number_list
def get_next_num_with_1235_factors(start):
start+=1
while not validateFactors(findFactors(start)):
start+=1
return start
def check_number_for_1235_factors(number):
#printLog('number:'+ number)
factors = findFactors(number)
#printLog('factors:'+ factors)
if not validateFactors(factors):
printLog("ERROR: --{0} must have only 1,2,3,5 as factors")
return False
return True
def check_for_1235_factors(values, option):
#print 'values: ', values
for n in values:
for m in n.replace('-',',').split(','):
if not validate_number_for_1235(int(m)):
print 'ERROR: --{0} must specify number with only 1,2,3,5 as factors'.format(option)
quit()
#print 'Valid number for :',option,':', m
if args.library == 'clFFT':
check_for_1235_factors(args.lengthx, 'lengthx')
check_for_1235_factors(args.lengthy, 'lengthy')
check_for_1235_factors(args.lengthz, 'lengthz')
if not os.path.isfile(executable(args.library)):
printLog("ERROR: Could not find client named {0}".format(executable(args.library)))
quit()
def get235RadicesNumberInRange(minimum, maximum):
if minimum == 0 and maximum == 0:
return [0]
numbers = generate235Radices(maximum)
minIndex = numbers.index(minimum)
maxIndex = numbers.index(maximum)
return numbers[minIndex:maxIndex+1]
#expand ranges
class Range:
def __init__(self, ranges, defaultStep='+1'):
self.expanded = []
for thisRange in ranges:
if thisRange != 'max' and thisRange != 'adapt' :
if thisRange.count(':'):
self._stepAmount = thisRange.split(':')[1]
else:
self._stepAmount = defaultStep
thisRange = thisRange.split(':')[0]
if self._stepAmount.count('x'):
self._stepper = '_mult'
self._stepAmount = self._stepAmount.lstrip('+x')
self._stepAmount = int(self._stepAmount)
elif self._stepAmount.count('l'):
self._stepper = '_next_num_with_1235_factor'
self._stepAmount = 0
else:
self._stepper = '_add'
self._stepAmount = self._stepAmount.lstrip('+x')
self._stepAmount = int(self._stepAmount)
if thisRange.count('-'):
self.begin = int(thisRange.split('-')[0])
self.end = int(thisRange.split('-')[1])
else:
self.begin = int(thisRange.split('-')[0])
self.end = int(thisRange.split('-')[0])
self.current = self.begin
# _thisRangeExpanded = []
if thisRange == 'max':
self.expanded = self.expanded + ['max']
elif thisRange == 'adapt':
self.expanded = self.expanded + ['adapt']
elif self.begin == 0 and self._stepper == '_mult':
self.expanded = self.expanded + [0]
else:
if self._stepper == '_next_num_with_1235_factor':
self.expanded = self.expanded + get235RadicesNumberInRange(self.current, self.end)
else:
while self.current <= self.end:
self.expanded = self.expanded + [self.current]
self._step()
# now we want to uniquify and sort the expanded range
self.expanded = list(set(self.expanded))
self.expanded.sort()
# advance current value to next
def _step(self):
getattr(self, self._stepper)()
def _mult(self):
self.current = self.current * self._stepAmount
def _add(self):
self.current = self.current + self._stepAmount
def _next_num_with_1235_factor(self):
self.current = get_next_num_with_1235_factors(self.current)
args.batchSize = Range(args.batchSize).expanded
args.lengthx = Range(args.lengthx, 'l').expanded
args.lengthy = Range(args.lengthy, 'l').expanded
args.lengthz = Range(args.lengthz, 'l').expanded
#expand problemsizes ('XxYxZ:batch')
#print "args.problemsize--1-->", args.problemsize
if args.problemsize and args.problemsize[0] != 'None':
i = 0
while i < len(args.problemsize):
args.problemsize[i] = args.problemsize[i].split(':')
args.problemsize[i][0] = args.problemsize[i][0].split('x')
i = i+1
#create the problem size combinations for each run of the client
problem_size_combinations = itertools.product(args.lengthx, args.lengthy, args.lengthz, args.batchSize)
problem_size_combinations = list(itertools.islice(problem_size_combinations, None))
#print "args.problemsize--2-->", args.problemsize
#add manually entered problem sizes to the list of FFTs to crank out
manual_test_combinations = []
if args.problemsize and args.problemsize[0] != 'None':
for n in args.problemsize:
x = []
y = []
z = []
batch = []
x.append(int(n[0][0]))
if len(n[0]) >= 2:
y.append(int(n[0][1]))
else:
y.append(1)
if len(n[0]) >= 3:
z.append(int(n[0][2]))
else:
z.append(1)
if len(n) > 1:
batch.append(int(n[1]))
else:
batch.append(1)
combos = itertools.product(x, y, z, batch)
combos = list(itertools.islice(combos, None))
for n in combos:
manual_test_combinations.append(n)
# manually entered problem sizes should not be plotted (for now). they may still be output in a table if requested
problem_size_combinations = problem_size_combinations + manual_test_combinations
#create final list of all transformations (with problem sizes and transform properties)
test_combinations = itertools.product(problem_size_combinations, args.device, args.inputlayout, args.outputlayout, args.placeness, args.precision)
test_combinations = list(itertools.islice(test_combinations, None))
test_combinations = [TestCombination(params[0][0], params[0][1], params[0][2], params[0][3], params[1], params[2], params[3], params[4], params[5], args.label) for params in test_combinations]
#turn each test combination into a command, run the command, and then stash the gflops
result = [] # this is where we'll store the results for the table
#open output file and write the header
if args.tableOutputFilename == None:
args.tableOutputFilename = 'results' + datetime.now().isoformat().replace(':','.') + '.txt'
else:
if os.path.isfile(args.tableOutputFilename):
oldname = args.tableOutputFilename
args.tableOutputFilename = args.tableOutputFilename + datetime.now().isoformat().replace(':','.')
message = 'A file with the name ' + oldname + ' already exists. Changing filename to ' + args.tableOutputFilename
printLog(message)
printLog('table header---->'+ str(tableHeader))
table = open(args.tableOutputFilename, 'w')
table.write(tableHeader + '\n')
table.flush()
if args.constProbSize == -1:
args.constProbSize = maxBatchSize(1, 1, 1, args.inputlayout[0], args.precision[0], executable(args.library), '--' + args.device[0])
args.constProbSize = int(args.constProbSize)
printLog('Total combinations = '+str(len(test_combinations)))
vi = 0
#test_combinations = test_combinations[825:830]
for params in test_combinations:
vi = vi+1
printLog("")
printLog('preparing command: '+ str(vi))
device = params.device
lengthx = str(params.x)
lengthy = str(params.y)
lengthz = str(params.z)
if params.batchsize == 'max':
batchSize = maxBatchSize(lengthx, lengthy, lengthz, params.inlayout, params.precision, executable(args.library), '--' + device)
elif params.batchsize == 'adapt':
batchSize = str(args.constProbSize/(int(lengthx)*int(lengthy)*int(lengthz)))
else:
batchSize = str(params.batchsize)
if params.inlayout == 'complexinterleaved' or params.inlayout == 'ci':
inputlayout = '1'
elif params.inlayout == 'complexplanar' or params.inlayout == 'cp':
inputlayout = '2'
else:
printLog('ERROR: invalid value for input layout when assembling client command')
if params.outlayout == 'complexinterleaved' or params.outlayout == 'ci':
outputlayout = '1'
elif params.outlayout == 'complexplanar' or params.outlayout == 'cp':
outputlayout = '2'
else:
printLog('ERROR: invalid value for output layout when assembling client command')
if params.placeness == 'inplace' or params.placeness == 'in':
placeness = ''
elif params.placeness == 'outofplace' or params.placeness == 'out':
placeness = '--outPlace'
else:
printLog('ERROR: invalid value for placeness when assembling client command')
if params.precision == 'single':
precision = ''
elif params.precision == 'double':
precision = '--double'
else:
printLog('ERROR: invalid value for precision when assembling client command')
#set up arguments here
if args.library == 'clFFT':
arguments = [executable(args.library),
'--' + device,
'-x', lengthx,
'-y', lengthy,
'-z', lengthz,
'--batchSize', batchSize,
'--inLayout', inputlayout,
'--outLayout', outputlayout,
placeness,
precision,
'-p', '10']
writeline = True
try:
printLog('Executing Command: '+str(arguments))
output = checkTimeOutPut(arguments)
output = output.split(os.linesep);
printLog('Execution Successfull---------------\n')
except errorHandler.ApplicationException as ae:
writeline = False
printLog('ERROR: Command is taking too much of time '+ae.message+'\n'+'Command: \n'+str(arguments))
continue
except subprocess.CalledProcessError as clientCrash:
print 'Command execution failure--->'
if clientCrash.output.count('CLFFT_INVALID_BUFFER_SIZE'):
writeline = False
printLog('Omitting line from table - problem is too large')
else:
writeline = False
printLog('ERROR: client crash. Please report the following error message (with \'CLFFT_*\' error code, if given, and the parameters used to invoke measurePerformance.py) \n'+clientCrash.output+'\n')
printLog('IN ORIGINAL WE CALL QUIT HERE - 1\n')
continue
for x in output:
if x.count('out of memory'):
writeline = False
printLog('ERROR: Omitting line from table - problem is too large')
if writeline:
try:
output = itertools.ifilter( lambda x: x.count('Gflops'), output)
output = list(itertools.islice(output, None))
thisResult = re.search('\d+\.*\d*e*-*\d*$', output[-1])
thisResult = float(thisResult.group(0))
thisResult = (params.x, params.y, params.z, batchSize, params.device, params.inlayout, params.outlayout, params.placeness, params.precision, params.label, thisResult)
outputRow = ''
for x in thisResult:
outputRow = outputRow + str(x) + ','
outputRow = outputRow.rstrip(',')
table.write(outputRow + '\n')
table.flush()
except:
printLog('ERROR: Exception occurs in GFLOP parsing')
else:
if(len(output) > 0):
if output[0].find('nan') or output[0].find('inf'):
printLog( 'WARNING: output from client was funky for this run. skipping table row')
else:
prinLog('ERROR: output from client makes no sense')
printLog(str(output[0]))
printLog('IN ORIGINAL WE CALL QUIT HERE - 2\n')
else:
prinLog('ERROR: output from client makes no sense')
#quit()
printLog("=========================MEASURE PERFORMANCE ENDS===========================\n")
#
#"""
#print a pretty table
#"""
#if args.tableOutputFilename == None:
# args.tableOutputFilename = 'results' + datetime.now().isoformat().replace(':','.') + '.txt'
#else:
# if os.path.isfile(args.tableOutputFilename):
# oldname = args.tableOutputFilename
# args.tableOutputFilename = args.tableOutputFilename + datetime.now().isoformat().replace(':','.')
# message = 'A file with the name ' + oldname + ' already exists. Changing filename to ' + args.tableOutputFilename
# print message
#
#table = open(args.tableOutputFilename, 'w')
#table.write(tableHeader + '\n')
#for x in result:
# row = ''
# for y in x:
# row = row + str(y) + ','
# row = row[:-1] #chomp off the trailing comma
# table.write(row + '\n')
|
ch06_listing_source.py |
import bisect
from collections import defaultdict, deque
import json
import math
import os
import time
import unittest
import uuid
import zlib
import redis
QUIT = False
pipe = inv = item = buyer = seller = inventory = None
# <start id="_1314_14473_8380"/>
def add_update_contact(conn, user, contact):
ac_list = 'recent:' + user
pipeline = conn.pipeline(True) #A
pipeline.lrem(ac_list, contact) #B
pipeline.lpush(ac_list, contact) #C
pipeline.ltrim(ac_list, 0, 99) #D
pipeline.execute() #E
# <end id="_1314_14473_8380"/>
#A Set up the atomic operation
#B Remove the contact from the list if it exists
#C Push the item onto the front of the list
#D Remove anything beyond the 100th item
#E Actually execute everything
#END
# <start id="_1314_14473_8383"/>
def remove_contact(conn, user, contact):
conn.lrem('recent:' + user, contact)
# <end id="_1314_14473_8383"/>
#END
# <start id="_1314_14473_8386"/>
def fetch_autocomplete_list(conn, user, prefix):
candidates = conn.lrange('recent:' + user, 0, -1) #A
matches = []
for candidate in candidates: #B
if candidate.lower().startswith(prefix.lower()): #B
matches.append(candidate) #C
return matches #D
# <end id="_1314_14473_8386"/>
#A Fetch the autocomplete list
#B Check each candidate
#C We found a match
#D Return all of the matches
#END
# <start id="_1314_14473_8396"/>
valid_characters = '`abcdefghijklmnopqrstuvwxyz{' #A
def find_prefix_range(prefix):
posn = bisect.bisect_left(valid_characters, prefix[-1:]) #B
suffix = valid_characters[(posn or 1) - 1] #C
return prefix[:-1] + suffix + '{', prefix + '{' #D
# <end id="_1314_14473_8396"/>
#A Set up our list of characters that we know about
#B Find the position of prefix character in our list of characters
#C Find the predecessor character
#D Return the range
#END
# <start id="_1314_14473_8399"/>
def autocomplete_on_prefix(conn, guild, prefix):
start, end = find_prefix_range(prefix) #A
identifier = str(uuid.uuid4()) #A
start += identifier #A
end += identifier #A
zset_name = 'members:' + guild
conn.zadd(zset_name, start, 0, end, 0) #B
pipeline = conn.pipeline(True)
while 1:
try:
pipeline.watch(zset_name)
sindex = pipeline.zrank(zset_name, start) #C
eindex = pipeline.zrank(zset_name, end) #C
erange = min(sindex + 9, eindex - 2) #C
pipeline.multi()
pipeline.zrem(zset_name, start, end) #D
pipeline.zrange(zset_name, sindex, erange) #D
items = pipeline.execute()[-1] #D
break
except redis.exceptions.WatchError: #E
continue #E
return [item for item in items if '{' not in item] #F
# <end id="_1314_14473_8399"/>
#A Find the start/end range for the prefix
#B Add the start/end range items to the ZSET
#C Find the ranks of our end points
#D Get the values inside our range, and clean up
#E Retry if someone modified our autocomplete zset
#F Remove start/end entries if an autocomplete was in progress
#END
# <start id="_1314_14473_8403"/>
def join_guild(conn, guild, user):
conn.zadd('members:' + guild, user, 0)
def leave_guild(conn, guild, user):
conn.zrem('members:' + guild, user)
# <end id="_1314_14473_8403"/>
#END
# <start id="_1314_14473_8431"/>
def list_item(conn, itemid, sellerid, price):
#...
pipe.watch(inv) #A
if not pipe.sismember(inv, itemid): #B
pipe.unwatch() #B
return None
pipe.multi() #C
pipe.zadd("market:", item, price) #C
pipe.srem(inv, itemid) #C
pipe.execute() #C
return True
#...
# <end id="_1314_14473_8431"/>
#A Watch for changes to the users's inventory
#B Verify that the user still has the item to be listed
#C Actually list the item
#END
# <start id="_1314_14473_8435"/>
def purchase_item(conn, buyerid, itemid, sellerid, lprice):
#...
pipe.watch("market:", buyer) #A
price = pipe.zscore("market:", item) #B
funds = int(pipe.hget(buyer, 'funds')) #B
if price != lprice or price > funds: #B
pipe.unwatch() #B
return None
pipe.multi() #C
pipe.hincrby(seller, 'funds', int(price)) #C
pipe.hincrby(buyerid, 'funds', int(-price)) #C
pipe.sadd(inventory, itemid) #C
pipe.zrem("market:", item) #C
pipe.execute() #C
return True
#...
# <end id="_1314_14473_8435"/>
#A Watch for changes to the market and the buyer's account information
#B Check for a sold/repriced item or insufficient funds
#C Transfer funds from the buyer to the seller, and transfer the item to the buyer
#END
# <start id="_1314_14473_8641"/>
def acquire_lock(conn, lockname, acquire_timeout=10):
identifier = str(uuid.uuid4()) #A
end = time.time() + acquire_timeout
while time.time() < end:
if conn.setnx('lock:' + lockname, identifier): #B
return identifier
time.sleep(.001)
return False
# <end id="_1314_14473_8641"/>
#A A 128-bit random identifier
#B Get the lock
#END
# <start id="_1314_14473_8645"/>
def purchase_item_with_lock(conn, buyerid, itemid, sellerid):
buyer = "users:%s" % buyerid
seller = "users:%s" % sellerid
item = "%s.%s" % (itemid, sellerid)
inventory = "inventory:%s" % buyerid
locked = acquire_lock(conn, 'market:') #A
if not locked:
return False
pipe = conn.pipeline(True)
try:
pipe.zscore("market:", item) #B
pipe.hget(buyer, 'funds') #B
price, funds = pipe.execute() #B
if price is None or price > funds: #B
return None #B
pipe.hincrby(seller, 'funds', int(price)) #C
pipe.hincrby(buyer, 'funds', int(-price)) #C
pipe.sadd(inventory, itemid) #C
pipe.zrem("market:", item) #C
pipe.execute() #C
return True
finally:
release_lock(conn, 'market:', locked) #D
# <end id="_1314_14473_8645"/>
#A Get the lock
#B Check for a sold item or insufficient funds
#C Transfer funds from the buyer to the seller, and transfer the item to the buyer
#D Release the lock
#END
# <start id="_1314_14473_8650"/>
def release_lock(conn, lockname, identifier):
pipe = conn.pipeline(True)
lockname = 'lock:' + lockname
while True:
try:
pipe.watch(lockname) #A
if pipe.get(lockname) == identifier: #A
pipe.multi() #B
pipe.delete(lockname) #B
pipe.execute() #B
return True #B
pipe.unwatch()
break
except redis.exceptions.WatchError: #C
pass #C
return False #D
# <end id="_1314_14473_8650"/>
#A Check and verify that we still have the lock
#B Release the lock
#C Someone else did something with the lock, retry
#D We lost the lock
#END
# <start id="_1314_14473_8790"/>
def acquire_lock_with_timeout(
conn, lockname, acquire_timeout=10, lock_timeout=10):
identifier = str(uuid.uuid4()) #A
lockname = 'lock:' + lockname
lock_timeout = int(math.ceil(lock_timeout)) #D
end = time.time() + acquire_timeout
while time.time() < end:
if conn.setnx(lockname, identifier): #B
conn.expire(lockname, lock_timeout) #B
return identifier
elif conn.ttl(lockname) < 0: #C
conn.expire(lockname, lock_timeout) #C
time.sleep(.001)
return False
# <end id="_1314_14473_8790"/>
#A A 128-bit random identifier
#B Get the lock and set the expiration
#C Check and update the expiration time as necessary
#D Only pass integers to our EXPIRE calls
#END
# <start id="_1314_14473_8986"/>
def acquire_semaphore(conn, semname, limit, timeout=10):
identifier = str(uuid.uuid4()) #A
now = time.time()
pipeline = conn.pipeline(True)
pipeline.zremrangebyscore(semname, '-inf', now - timeout) #B
pipeline.zadd(semname, identifier, now) #C
pipeline.zrank(semname, identifier) #D
if pipeline.execute()[-1] < limit: #D
return identifier
conn.zrem(semname, identifier) #E
return None
# <end id="_1314_14473_8986"/>
#A A 128-bit random identifier
#B Time out old semaphore holders
#C Try to acquire the semaphore
#D Check to see if we have it
#E We failed to get the semaphore, discard our identifier
#END
# <start id="_1314_14473_8990"/>
def release_semaphore(conn, semname, identifier):
return conn.zrem(semname, identifier) #A
# <end id="_1314_14473_8990"/>
#A Returns True if the semaphore was properly released, False if it had timed out
#END
# <start id="_1314_14473_9004"/>
def acquire_fair_semaphore(conn, semname, limit, timeout=10):
identifier = str(uuid.uuid4()) #A
czset = semname + ':owner'
ctr = semname + ':counter'
now = time.time()
pipeline = conn.pipeline(True)
pipeline.zremrangebyscore(semname, '-inf', now - timeout) #B
pipeline.zinterstore(czset, {czset: 1, semname: 0}) #B
pipeline.incr(ctr) #C
counter = pipeline.execute()[-1] #C
pipeline.zadd(semname, identifier, now) #D
pipeline.zadd(czset, identifier, counter) #D
pipeline.zrank(czset, identifier) #E
if pipeline.execute()[-1] < limit: #E
return identifier #F
pipeline.zrem(semname, identifier) #G
pipeline.zrem(czset, identifier) #G
pipeline.execute()
return None
# <end id="_1314_14473_9004"/>
#A A 128-bit random identifier
#B Time out old entries
#C Get the counter
#D Try to acquire the semaphore
#E Check the rank to determine if we got the semaphore
#F We got the semaphore
#G We didn't get the semaphore, clean out the bad data
#END
# <start id="_1314_14473_9014"/>
def release_fair_semaphore(conn, semname, identifier):
pipeline = conn.pipeline(True)
pipeline.zrem(semname, identifier)
pipeline.zrem(semname + ':owner', identifier)
return pipeline.execute()[0] #A
# <end id="_1314_14473_9014"/>
#A Returns True if the semaphore was properly released, False if it had timed out
#END
# <start id="_1314_14473_9022"/>
def refresh_fair_semaphore(conn, semname, identifier):
if conn.zadd(semname, identifier, time.time()): #A
release_fair_semaphore(conn, semname, identifier) #B
return False #B
return True #C
# <end id="_1314_14473_9022"/>
#A Update our semaphore
#B We lost our semaphore, report back
#C We still have our semaphore
#END
# <start id="_1314_14473_9031"/>
def acquire_semaphore_with_lock(conn, semname, limit, timeout=10):
identifier = acquire_lock(conn, semname, acquire_timeout=.01)
if identifier:
try:
return acquire_fair_semaphore(conn, semname, limit, timeout)
finally:
release_lock(conn, semname, identifier)
# <end id="_1314_14473_9031"/>
#END
# <start id="_1314_14473_9056"/>
def send_sold_email_via_queue(conn, seller, item, price, buyer):
data = {
'seller_id': seller, #A
'item_id': item, #A
'price': price, #A
'buyer_id': buyer, #A
'time': time.time() #A
}
conn.rpush('queue:email', json.dumps(data)) #B
# <end id="_1314_14473_9056"/>
#A Prepare the item
#B Push the item onto the queue
#END
# <start id="_1314_14473_9060"/>
def process_sold_email_queue(conn):
while not QUIT:
packed = conn.blpop(['queue:email'], 30) #A
if not packed: #B
continue #B
to_send = json.loads(packed[1]) #C
try:
fetch_data_and_send_sold_email(to_send) #D
except EmailSendError as err:
log_error("Failed to send sold email", err, to_send)
else:
log_success("Sent sold email", to_send)
# <end id="_1314_14473_9060"/>
#A Try to get a message to send
#B No message to send, try again
#C Load the packed email information
#D Send the email using our pre-written emailing function
#END
# <start id="_1314_14473_9066"/>
def worker_watch_queue(conn, queue, callbacks):
while not QUIT:
packed = conn.blpop([queue], 30) #A
if not packed: #B
continue #B
name, args = json.loads(packed[1]) #C
if name not in callbacks: #D
log_error("Unknown callback %s"%name) #D
continue #D
callbacks[name](*args) #E
# <end id="_1314_14473_9066"/>
#A Try to get an item from the queue
#B There is nothing to work on, try again
#C Unpack the work item
#D The function is unknown, log the error and try again
#E Execute the task
#END
# <start id="_1314_14473_9074"/>
def worker_watch_queues(conn, queues, callbacks): #A
while not QUIT:
packed = conn.blpop(queues, 30) #B
if not packed:
continue
name, args = json.loads(packed[1])
if name not in callbacks:
log_error("Unknown callback %s"%name)
continue
callbacks[name](*args)
# <end id="_1314_14473_9074"/>
#A The first changed line to add priority support
#B The second changed line to add priority support
#END
# <start id="_1314_14473_9094"/>
def execute_later(conn, queue, name, args, delay=0):
identifier = str(uuid.uuid4()) #A
item = json.dumps([identifier, queue, name, args]) #B
if delay > 0:
conn.zadd('delayed:', item, time.time() + delay) #C
else:
conn.rpush('queue:' + queue, item) #D
return identifier #E
# <end id="_1314_14473_9094"/>
#A Generate a unique identifier
#B Prepare the item for the queue
#C Delay the item
#D Execute the item immediately
#E Return the identifier
#END
# <start id="_1314_14473_9099"/>
def poll_queue(conn):
while not QUIT:
item = conn.zrange('delayed:', 0, 0, withscores=True) #A
if not item or item[0][1] > time.time(): #B
time.sleep(.01) #B
continue #B
item = item[0][0] #C
identifier, queue, function, args = json.loads(item) #C
locked = acquire_lock(conn, identifier) #D
if not locked: #E
continue #E
if conn.zrem('delayed:', item): #F
conn.rpush('queue:' + queue, item) #F
release_lock(conn, identifier, locked) #G
# <end id="_1314_14473_9099"/>
#A Get the first item in the queue
#B No item or the item is still to be execued in the future
#C Unpack the item so that we know where it should go
#D Get the lock for the item
#E We couldn't get the lock, so skip it and try again
#F Move the item to the proper list queue
#G Release the lock
#END
# <start id="_1314_14473_9124"/>
def create_chat(conn, sender, recipients, message, chat_id=None):
chat_id = chat_id or str(conn.incr('ids:chat:')) #A
recipients.append(sender) #E
recipientsd = dict((r, 0) for r in recipients) #E
pipeline = conn.pipeline(True)
pipeline.zadd('chat:' + chat_id, **recipientsd) #B
for rec in recipients: #C
pipeline.zadd('seen:' + rec, chat_id, 0) #C
pipeline.execute()
return send_message(conn, chat_id, sender, message) #D
# <end id="_1314_14473_9124"/>
#A Get a new chat id
#E Set up a dictionary of users to scores to add to the chat ZSET
#B Create the set with the list of people participating
#C Initialize the seen zsets
#D Send the message
#END
# <start id="_1314_14473_9127"/>
def send_message(conn, chat_id, sender, message):
identifier = acquire_lock(conn, 'chat:' + chat_id)
if not identifier:
raise Exception("Couldn't get the lock")
try:
mid = conn.incr('ids:' + chat_id) #A
ts = time.time() #A
packed = json.dumps({ #A
'id': mid, #A
'ts': ts, #A
'sender': sender, #A
'message': message, #A
}) #A
conn.zadd('msgs:' + chat_id, packed, mid) #B
finally:
release_lock(conn, 'chat:' + chat_id, identifier)
return chat_id
# <end id="_1314_14473_9127"/>
#A Prepare the message
#B Send the message to the chat
#END
# <start id="_1314_14473_9132"/>
def fetch_pending_messages(conn, recipient):
seen = conn.zrange('seen:' + recipient, 0, -1, withscores=True) #A
pipeline = conn.pipeline(True)
for chat_id, seen_id in seen: #B
pipeline.zrangebyscore( #B
'msgs:' + chat_id, seen_id+1, 'inf') #B
chat_info = zip(seen, pipeline.execute()) #C
for i, ((chat_id, seen_id), messages) in enumerate(chat_info):
if not messages:
continue
messages[:] = map(json.loads, messages)
seen_id = messages[-1]['id'] #D
conn.zadd('chat:' + chat_id, recipient, seen_id) #D
min_id = conn.zrange( #E
'chat:' + chat_id, 0, 0, withscores=True) #E
pipeline.zadd('seen:' + recipient, chat_id, seen_id) #F
if min_id:
pipeline.zremrangebyscore( #G
'msgs:' + chat_id, 0, min_id[0][1]) #G
chat_info[i] = (chat_id, messages)
pipeline.execute()
return chat_info
# <end id="_1314_14473_9132"/>
#A Get the last message ids received
#B Fetch all new messages
#C Prepare information about the data to be returned
#D Update the 'chat' ZSET with the most recently received message
#E Discover messages that have been seen by all users
#F Update the 'seen' ZSET
#G Clean out messages that have been seen by all users
#END
# <start id="_1314_14473_9135"/>
def join_chat(conn, chat_id, user):
message_id = int(conn.get('ids:' + chat_id)) #A
pipeline = conn.pipeline(True)
pipeline.zadd('chat:' + chat_id, user, message_id) #B
pipeline.zadd('seen:' + user, chat_id, message_id) #C
pipeline.execute()
# <end id="_1314_14473_9135"/>
#A Get the most recent message id for the chat
#B Add the user to the chat member list
#C Add the chat to the users's seen list
#END
# <start id="_1314_14473_9136"/>
def leave_chat(conn, chat_id, user):
pipeline = conn.pipeline(True)
pipeline.zrem('chat:' + chat_id, user) #A
pipeline.zrem('seen:' + user, chat_id) #A
pipeline.zcard('chat:' + chat_id) #B
if not pipeline.execute()[-1]:
pipeline.delete('msgs:' + chat_id) #C
pipeline.delete('ids:' + chat_id) #C
pipeline.execute()
else:
oldest = conn.zrange( #D
'chat:' + chat_id, 0, 0, withscores=True) #D
conn.zremrangebyscore('msgs:' + chat_id, 0, oldest[0][1]) #E
# <end id="_1314_14473_9136"/>
#A Remove the user from the chat
#B Find the number of remaining group members
#C Delete the chat
#D Find the oldest message seen by all users
#E Delete old messages from the chat
#END
# <start id="_1314_15044_3669"/>
aggregates = defaultdict(lambda: defaultdict(int)) #A
def daily_country_aggregate(conn, line):
if line:
line = line.split()
ip = line[0] #B
day = line[1] #B
country = find_city_by_ip_local(ip)[2] #C
aggregates[day][country] += 1 #D
return
for day, aggregate in aggregates.items(): #E
conn.zadd('daily:country:' + day, **aggregate) #E
del aggregates[day] #E
# <end id="_1314_15044_3669"/>
#A Prepare the local aggregate dictionary
#B Extract the information from our log lines
#C Find the country from the IP address
#D Increment our local aggregate
#E The day file is done, write our aggregate to Redis
#END
# <start id="_1314_14473_9209"/>
def copy_logs_to_redis(conn, path, channel, count=10,
limit=2**30, quit_when_done=True):
bytes_in_redis = 0
waiting = deque()
create_chat(conn, 'source', map(str, range(count)), '', channel) #I
count = str(count)
for logfile in sorted(os.listdir(path)): #A
full_path = os.path.join(path, logfile)
fsize = os.stat(full_path).st_size
while bytes_in_redis + fsize > limit: #B
cleaned = _clean(conn, channel, waiting, count)#B
if cleaned: #B
bytes_in_redis -= cleaned #B
else: #B
time.sleep(.25) #B
with open(full_path, 'rb') as inp: #C
block = ' ' #C
while block: #C
block = inp.read(2**17) #C
conn.append(channel+logfile, block) #C
send_message(conn, channel, 'source', logfile) #D
bytes_in_redis += fsize #E
waiting.append((logfile, fsize)) #E
if quit_when_done: #F
send_message(conn, channel, 'source', ':done') #F
while waiting: #G
cleaned = _clean(conn, channel, waiting, count) #G
if cleaned: #G
bytes_in_redis -= cleaned #G
else: #G
time.sleep(.25) #G
def _clean(conn, channel, waiting, count): #H
if not waiting: #H
return 0 #H
w0 = waiting[0][0] #H
if conn.get(channel + w0 + ':done') == count: #H
conn.delete(channel + w0, channel + w0 + ':done') #H
return waiting.popleft()[1] #H
return 0 #H
# <end id="_1314_14473_9209"/>
#I Create the chat that will be used to send messages to clients
#A Iterate over all of the logfiles
#B Clean out finished files if we need more room
#C Upload the file to Redis
#D Notify the listeners that the file is ready
#E Update our local information about Redis' memory use
#F We are out of files, so signal that it is done
#G Clean up the files when we are done
#H How we actually perform the cleanup from Redis
#END
# <start id="_1314_14473_9213"/>
def process_logs_from_redis(conn, id, callback):
while 1:
fdata = fetch_pending_messages(conn, id) #A
for ch, mdata in fdata:
for message in mdata:
logfile = message['message']
if logfile == ':done': #B
return #B
elif not logfile:
continue
block_reader = readblocks #C
if logfile.endswith('.gz'): #C
block_reader = readblocks_gz #C
for line in readlines(conn, ch+logfile, block_reader):#D
callback(conn, line) #E
callback(conn, None) #F
conn.incr(ch + logfile + ':done') #G
if not fdata:
time.sleep(.1)
# <end id="_1314_14473_9213"/>
#A Fetch the list of files
#B No more logfiles
#C Choose a block reader
#D Iterate over the lines
#E Pass each line to the callback
#F Force a flush of our aggregate caches
#G Report that we are finished with the log
#END
# <start id="_1314_14473_9221"/>
def readlines(conn, key, rblocks):
out = ''
for block in rblocks(conn, key):
out += block
posn = out.rfind('\n') #A
if posn >= 0: #B
for line in out[:posn].split('\n'): #C
yield line + '\n' #D
out = out[posn+1:] #E
if not block: #F
yield out
break
# <end id="_1314_14473_9221"/>
#A Find the rightmost linebreak if any - rfind() returns -1 on failure
#B We found a line break
#C Split on all of the line breaks
#D Yield each line
#E Keep track of the trailing data
#F We are out of data
#END
# <start id="_1314_14473_9225"/>
def readblocks(conn, key, blocksize=2**17):
lb = blocksize
pos = 0
while lb == blocksize: #A
block = conn.substr(key, pos, pos + blocksize - 1) #B
yield block #C
lb = len(block) #C
pos += lb #C
yield ''
# <end id="_1314_14473_9225"/>
#A Keep going while we got as much as we expected
#B Fetch the block
#C Prepare for the next pass
#END
# <start id="_1314_14473_9229"/>
def readblocks_gz(conn, key):
inp = ''
decoder = None
for block in readblocks(conn, key, 2**17): #A
if not decoder:
inp += block
try:
if inp[:3] != "\x1f\x8b\x08": #B
raise IOError("invalid gzip data") #B
i = 10 #B
flag = ord(inp[3]) #B
if flag & 4: #B
i += 2 + ord(inp[i]) + 256*ord(inp[i+1]) #B
if flag & 8: #B
i = inp.index('\0', i) + 1 #B
if flag & 16: #B
i = inp.index('\0', i) + 1 #B
if flag & 2: #B
i += 2 #B
if i > len(inp): #C
raise IndexError("not enough data") #C
except (IndexError, ValueError): #C
continue #C
else:
block = inp[i:] #D
inp = None #D
decoder = zlib.decompressobj(-zlib.MAX_WBITS) #D
if not block:
continue
if not block: #E
yield decoder.flush() #E
break
yield decoder.decompress(block) #F
# <end id="_1314_14473_9229"/>
#A Read the raw data from Redis
#B Parse the header information so that we can get the compressed data
#C We haven't read the full header yet
#D We found the header, prepare the decompressor
#E We are out of data, yield the last chunk
#F Yield a decompressed block
#END
class TestCh06(unittest.TestCase):
def setUp(self):
import redis
self.conn = redis.Redis(db=15)
def tearDown(self):
self.conn.flushdb()
del self.conn
print
print
def test_add_update_contact(self):
import pprint
conn = self.conn
conn.delete('recent:user')
print "Let's add a few contacts..."
for i in xrange(10):
add_update_contact(conn, 'user', 'contact-%i-%i'%(i//3, i))
print "Current recently contacted contacts"
contacts = conn.lrange('recent:user', 0, -1)
pprint.pprint(contacts)
self.assertTrue(len(contacts) >= 10)
print
print "Let's pull one of the older ones up to the front"
add_update_contact(conn, 'user', 'contact-1-4')
contacts = conn.lrange('recent:user', 0, 2)
print "New top-3 contacts:"
pprint.pprint(contacts)
self.assertEquals(contacts[0], 'contact-1-4')
print
print "Let's remove a contact..."
print remove_contact(conn, 'user', 'contact-2-6')
contacts = conn.lrange('recent:user', 0, -1)
print "New contacts:"
pprint.pprint(contacts)
self.assertTrue(len(contacts) >= 9)
print
print "And let's finally autocomplete on "
all = conn.lrange('recent:user', 0, -1)
contacts = fetch_autocomplete_list(conn, 'user', 'c')
self.assertTrue(all == contacts)
equiv = [c for c in all if c.startswith('contact-2-')]
contacts = fetch_autocomplete_list(conn, 'user', 'contact-2-')
equiv.sort()
contacts.sort()
self.assertEquals(equiv, contacts)
conn.delete('recent:user')
def test_address_book_autocomplete(self):
self.conn.delete('members:test')
print "the start/end range of 'abc' is:", find_prefix_range('abc')
print
print "Let's add a few people to the guild"
for name in ['jeff', 'jenny', 'jack', 'jennifer']:
join_guild(self.conn, 'test', name)
print
print "now let's try to find users with names starting with 'je':"
r = autocomplete_on_prefix(self.conn, 'test', 'je')
print r
self.assertTrue(len(r) == 3)
print "jeff just left to join a different guild..."
leave_guild(self.conn, 'test', 'jeff')
r = autocomplete_on_prefix(self.conn, 'test', 'je')
print r
self.assertTrue(len(r) == 2)
self.conn.delete('members:test')
def test_distributed_locking(self):
self.conn.delete('lock:testlock')
print "Getting an initial lock..."
self.assertTrue(acquire_lock_with_timeout(self.conn, 'testlock', 1, 1))
print "Got it!"
print "Trying to get it again without releasing the first one..."
self.assertFalse(acquire_lock_with_timeout(self.conn, 'testlock', .01, 1))
print "Failed to get it!"
print
print "Waiting for the lock to timeout..."
time.sleep(2)
print "Getting the lock again..."
r = acquire_lock_with_timeout(self.conn, 'testlock', 1, 1)
self.assertTrue(r)
print "Got it!"
print "Releasing the lock..."
self.assertTrue(release_lock(self.conn, 'testlock', r))
print "Released it..."
print
print "Acquiring it again..."
self.assertTrue(acquire_lock_with_timeout(self.conn, 'testlock', 1, 1))
print "Got it!"
self.conn.delete('lock:testlock')
def test_counting_semaphore(self):
self.conn.delete('testsem', 'testsem:owner', 'testsem:counter')
print "Getting 3 initial semaphores with a limit of 3..."
for i in xrange(3):
self.assertTrue(acquire_fair_semaphore(self.conn, 'testsem', 3, 1))
print "Done!"
print "Getting one more that should fail..."
self.assertFalse(acquire_fair_semaphore(self.conn, 'testsem', 3, 1))
print "Couldn't get it!"
print
print "Lets's wait for some of them to time out"
time.sleep(2)
print "Can we get one?"
r = acquire_fair_semaphore(self.conn, 'testsem', 3, 1)
self.assertTrue(r)
print "Got one!"
print "Let's release it..."
self.assertTrue(release_fair_semaphore(self.conn, 'testsem', r))
print "Released!"
print
print "And let's make sure we can get 3 more!"
for i in xrange(3):
self.assertTrue(acquire_fair_semaphore(self.conn, 'testsem', 3, 1))
print "We got them!"
self.conn.delete('testsem', 'testsem:owner', 'testsem:counter')
def test_delayed_tasks(self):
import threading
self.conn.delete('queue:tqueue', 'delayed:')
print "Let's start some regular and delayed tasks..."
for delay in [0, .5, 0, 1.5]:
self.assertTrue(execute_later(self.conn, 'tqueue', 'testfn', [], delay))
r = self.conn.llen('queue:tqueue')
print "How many non-delayed tasks are there (should be 2)?", r
self.assertEquals(r, 2)
print
print "Let's start up a thread to bring those delayed tasks back..."
t = threading.Thread(target=poll_queue, args=(self.conn,))
t.setDaemon(1)
t.start()
print "Started."
print "Let's wait for those tasks to be prepared..."
time.sleep(2)
global QUIT
QUIT = True
t.join()
r = self.conn.llen('queue:tqueue')
print "Waiting is over, how many tasks do we have (should be 4)?", r
self.assertEquals(r, 4)
self.conn.delete('queue:tqueue', 'delayed:')
def test_multi_recipient_messaging(self):
self.conn.delete('ids:chat:', 'msgs:1', 'ids:1', 'seen:joe', 'seen:jeff', 'seen:jenny')
print "Let's create a new chat session with some recipients..."
chat_id = create_chat(self.conn, 'joe', ['jeff', 'jenny'], 'message 1')
print "Now let's send a few messages..."
for i in xrange(2, 5):
send_message(self.conn, chat_id, 'joe', 'message %s'%i)
print
print "And let's get the messages that are waiting for jeff and jenny..."
r1 = fetch_pending_messages(self.conn, 'jeff')
r2 = fetch_pending_messages(self.conn, 'jenny')
print "They are the same?", r1==r2
self.assertEquals(r1, r2)
print "Those messages are:"
import pprint
pprint.pprint(r1)
self.conn.delete('ids:chat:', 'msgs:1', 'ids:1', 'seen:joe', 'seen:jeff', 'seen:jenny')
def test_file_distribution(self):
import gzip, shutil, tempfile, threading
self.conn.delete('test:temp-1.txt', 'test:temp-2.txt', 'test:temp-3.txt', 'msgs:test:', 'seen:0', 'seen:source', 'ids:test:', 'chat:test:')
dire = tempfile.mkdtemp()
try:
print "Creating some temporary 'log' files..."
with open(dire + '/temp-1.txt', 'wb') as f:
f.write('one line\n')
with open(dire + '/temp-2.txt', 'wb') as f:
f.write(10000 * 'many lines\n')
out = gzip.GzipFile(dire + '/temp-3.txt.gz', mode='wb')
for i in xrange(100000):
out.write('random line %s\n'%(os.urandom(16).encode('hex'),))
out.close()
size = os.stat(dire + '/temp-3.txt.gz').st_size
print "Done."
print
print "Starting up a thread to copy logs to redis..."
t = threading.Thread(target=copy_logs_to_redis, args=(self.conn, dire, 'test:', 1, size))
t.setDaemon(1)
t.start()
print "Let's pause to let some logs get copied to Redis..."
time.sleep(.25)
print
print "Okay, the logs should be ready. Let's process them!"
index = [0]
counts = [0, 0, 0]
def callback(conn, line):
if line is None:
print "Finished with a file %s, linecount: %s"%(index[0], counts[index[0]])
index[0] += 1
elif line or line.endswith('\n'):
counts[index[0]] += 1
print "Files should have 1, 10000, and 100000 lines"
process_logs_from_redis(self.conn, '0', callback)
self.assertEquals(counts, [1, 10000, 100000])
print
print "Let's wait for the copy thread to finish cleaning up..."
t.join()
print "Done cleaning out Redis!"
finally:
print "Time to clean up files..."
shutil.rmtree(dire)
print "Cleaned out files!"
self.conn.delete('test:temp-1.txt', 'test:temp-2.txt', 'test:temp-3.txt', 'msgs:test:', 'seen:0', 'seen:source', 'ids:test:', 'chat:test:')
if __name__ == '__main__':
unittest.main()
|
monitored_session_test.py | # pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for monitored_session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import glob
import os
import sys
import threading
import time
import traceback
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import debug_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.distribute import collective_all_reduce_strategy
from tensorflow.python.distribute import distribute_coordinator
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import coordinator
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import summary_io
from tensorflow.python.training import training_util
def latest_summaries(base_dir):
"""Parse summary events from latest event file in base_dir."""
file_paths = glob.glob(os.path.join(base_dir, 'events.*'))
file_path = sorted(file_paths)[-1] if file_paths else None
latest_events = summary_io.summary_iterator(file_path) if file_path else []
return [e for e in latest_events if e.HasField('summary')]
class ScaffoldTest(test.TestCase):
"""Scaffold tests."""
def test_nothing_created_before_finalize(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
self.assertEqual(None, scaffold.init_op)
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertEqual(None, scaffold.ready_op)
self.assertEqual(None, scaffold.ready_for_local_init_op)
self.assertEqual(None, scaffold.local_init_op)
self.assertEqual(None, scaffold.saver)
def test_defaults_empty_graph(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
variables.VariableV1(1, name='my_var')
variables.VariableV1(
2, name='my_local_var', collections=[ops.GraphKeys.LOCAL_VARIABLES])
scaffold.finalize()
self.assertTrue(isinstance(scaffold.init_op, ops.Operation))
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertTrue(isinstance(scaffold.ready_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.ready_for_local_init_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.local_init_op, ops.Operation))
self.assertTrue(isinstance(scaffold.saver, saver_lib.Saver))
with self.cached_session() as sess:
self.assertItemsEqual([b'my_var', b'my_local_var'],
sess.run(scaffold.ready_op))
self.assertItemsEqual([b'my_var'],
sess.run(scaffold.ready_for_local_init_op))
sess.run(scaffold.init_op)
self.assertEqual(0, len(sess.run(scaffold.ready_for_local_init_op)))
sess.run(scaffold.local_init_op)
self.assertEqual(0, len(sess.run(scaffold.ready_op)))
def test_defaults_no_variables(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
constant_op.constant(1, name='my_const')
scaffold.finalize()
self.assertTrue(isinstance(scaffold.init_op, ops.Operation))
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertTrue(isinstance(scaffold.ready_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.ready_for_local_init_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.local_init_op, ops.Operation))
self.assertTrue(isinstance(scaffold.saver, saver_lib.Saver))
def test_caches_values(self):
with ops.Graph().as_default():
variables.VariableV1([1])
scaffold1 = monitored_session.Scaffold()
scaffold1.finalize()
scaffold2 = monitored_session.Scaffold()
scaffold2.finalize()
self.assertEqual(scaffold1.init_op, scaffold2.init_op)
self.assertEqual(scaffold1.ready_op, scaffold2.ready_op)
self.assertEqual(scaffold1.ready_for_local_init_op,
scaffold2.ready_for_local_init_op)
self.assertEqual(scaffold1.local_init_op, scaffold2.local_init_op)
self.assertEqual(scaffold1.saver, scaffold2.saver)
def test_raise_error_if_more_than_one_cached_item(self):
with ops.Graph().as_default():
variables.VariableV1([1])
ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())
ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())
with self.assertRaisesRegexp(RuntimeError, 'More than one item'):
monitored_session.Scaffold().finalize()
def test_uses_passed_values(self):
with ops.Graph().as_default():
variables.VariableV1([1])
saver = saver_lib.Saver()
scaffold = monitored_session.Scaffold(
init_op=2,
init_feed_dict=3,
init_fn=lambda scaffold, sess: 4,
ready_op=5,
ready_for_local_init_op=6,
local_init_op=7,
saver=saver)
scaffold.finalize()
self.assertEqual(2, scaffold.init_op)
self.assertEqual(3, scaffold.init_feed_dict)
self.assertTrue(callable(scaffold.init_fn))
self.assertEqual(5, scaffold.ready_op)
self.assertEqual(6, scaffold.ready_for_local_init_op)
self.assertEqual(7, scaffold.local_init_op)
self.assertEqual(saver, scaffold.saver)
def test_graph_is_finalized(self):
with ops.Graph().as_default():
variables.VariableV1([1])
monitored_session.Scaffold().finalize()
with self.assertRaisesRegexp(RuntimeError,
'Graph is finalized and cannot be modified'):
constant_op.constant([0])
def test_new_scaffold_from_default_scaffold(self):
scaffold1 = monitored_session.Scaffold()
with ops.Graph().as_default():
variables.VariableV1([1])
saver = saver_lib.Saver()
scaffold2 = monitored_session.Scaffold(
init_op=2,
init_feed_dict=3,
init_fn=lambda scaffold, sess: 4,
ready_op=5,
ready_for_local_init_op=6,
local_init_op=7,
saver=saver,
copy_from_scaffold=scaffold1)
scaffold2.finalize()
self.assertEqual(2, scaffold2.init_op)
self.assertEqual(3, scaffold2.init_feed_dict)
self.assertTrue(callable(scaffold2.init_fn))
self.assertEqual(5, scaffold2.ready_op)
self.assertEqual(6, scaffold2.ready_for_local_init_op)
self.assertEqual(7, scaffold2.local_init_op)
self.assertEqual(saver, scaffold2.saver)
def test_new_scaffold_from_existing_scaffold(self):
with ops.Graph().as_default():
variables.VariableV1([1])
saver = saver_lib.Saver()
scaffold1 = monitored_session.Scaffold(
init_op=2,
init_feed_dict=3,
init_fn=lambda scaffold, sess: 4,
ready_op=5,
ready_for_local_init_op=6,
local_init_op=7,
saver=saver)
scaffold2 = monitored_session.Scaffold(
init_op=4,
init_feed_dict=6,
init_fn=lambda scaffold, sess: 8,
ready_op=10,
ready_for_local_init_op=12,
local_init_op=14,
saver=saver,
copy_from_scaffold=scaffold1)
scaffold2.finalize()
self.assertEqual(4, scaffold2.init_op)
self.assertEqual(6, scaffold2.init_feed_dict)
self.assertTrue(callable(scaffold2.init_fn))
self.assertEqual(10, scaffold2.ready_op)
self.assertEqual(12, scaffold2.ready_for_local_init_op)
self.assertEqual(14, scaffold2.local_init_op)
self.assertEqual(saver, scaffold2.saver)
def test_copy_from_scaffold_is_scaffold(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(
TypeError, 'copy_from_scaffold is not a Scaffold instance'):
monitored_session.Scaffold(copy_from_scaffold=1)
def _test_dir(temp_dir, test_name):
"""Create an empty dir to use for tests.
Args:
temp_dir: Tmp directory path.
test_name: Name of the test.
Returns:
Absolute path to the test directory.
"""
test_dir = os.path.join(temp_dir, test_name)
if os.path.isdir(test_dir):
for f in glob.glob('%s/*' % test_dir):
os.remove(f)
else:
os.makedirs(test_dir)
return test_dir
class FakeHook(session_run_hook.SessionRunHook):
def __init__(self):
self.should_stop = False
self.request = None
self.call_counter = collections.Counter()
self.last_run_context = None
self.last_run_values = None
def begin(self):
self.call_counter['begin'] += 1
def after_create_session(self, session, coord): # pylint: disable=unused-argument
self.call_counter['after_create_session'] += 1
def before_run(self, run_context):
self.call_counter['before_run'] += 1
self.last_run_context = run_context
return self.request
def after_run(self, run_context, run_values):
self.call_counter['after_run'] += 1
self.last_run_values = run_values
if self.should_stop:
run_context.request_stop()
def end(self, session):
self.call_counter['end'] += 1
class MonitoredTrainingSessionTest(test.TestCase):
"""Tests MonitoredTrainingSession."""
def test_saving_restoring_checkpoint(self):
logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(2, session.run(gstep))
def test_save_checkpoint_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_steps')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_checkpoint_steps=100,
log_step_count_steps=10) as session:
for _ in range(100):
session.run(new_gstep)
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(100, session.run(gstep))
def test_save_checkpoint_secs(self):
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_secs')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_checkpoint_secs=0.1,
log_step_count_steps=10) as session:
session.run(new_gstep)
time.sleep(0.2)
for _ in range(10):
session.run(new_gstep)
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(11, session.run(gstep))
def test_summaries_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_steps')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_summaries_steps=100,
log_step_count_steps=10) as session:
for _ in range(101):
session.run(new_gstep)
summaries = latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
def test_summaries_secs(self):
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_secs')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_summaries_steps=None,
save_summaries_secs=0.1,
log_step_count_steps=10) as session:
session.run(new_gstep)
time.sleep(0.2)
for _ in range(101):
session.run(new_gstep)
summaries = latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
def test_custom_saving(self):
logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')
fake_hook = FakeHook()
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
chief_only_hooks=[fake_hook],
save_checkpoint_secs=0) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# Check whether custom hook called or not
self.assertEqual(1, fake_hook.call_counter['begin'])
# A restart will not find the checkpoint, since we didn't save.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(0, session.run(gstep))
class MockExtended(object):
def __init__(self, between_graph, should_init, should_checkpoint,
should_save_summary):
self.experimental_between_graph = between_graph
self.experimental_should_init = should_init
self.should_checkpoint = should_checkpoint
self.should_save_summary = should_save_summary
class MockStrategy(object):
def __init__(self,
between_graph=False,
should_init=True,
should_checkpoint=None,
should_save_summary=None):
self.extended = MockExtended(between_graph, should_init, should_checkpoint,
should_save_summary)
class MonitoredTrainingSessionWithDistributeCoordinatorTest(test.TestCase):
"""Test distribute coordinator controls summary saving and checkpointing."""
def test_summary_hook_enabled(self):
context = distribute_coordinator._WorkerContext(
MockStrategy(should_save_summary=True), None, None, None)
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_enabled')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_summaries_steps=100,
log_step_count_steps=10) as session:
for _ in range(101):
session.run(new_gstep)
summaries = latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
def test_summary_hook_disabled(self):
context = distribute_coordinator._WorkerContext(
MockStrategy(should_save_summary=False), None, None, None)
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_disabled')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_summaries_steps=100,
log_step_count_steps=10) as session:
for _ in range(101):
session.run(new_gstep)
# No summary is saved.
summaries = latest_summaries(logdir)
self.assertEqual(len(summaries), 0)
def test_checkpoint_hook_enabled(self):
context = distribute_coordinator._WorkerContext(
MockStrategy(should_checkpoint=True), None, None, None)
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_enabled')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_checkpoint_steps=100,
log_step_count_steps=10) as session:
for _ in range(100):
session.run(new_gstep)
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(100, session.run(gstep))
def test_checkpoint_hook_disabled(self):
context = distribute_coordinator._WorkerContext(
MockStrategy(should_checkpoint=False), None, None, None)
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_disabled')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_checkpoint_steps=100,
log_step_count_steps=10) as session:
for _ in range(100):
session.run(new_gstep)
# No checkpoint is saved.
checkpoint = checkpoint_management.latest_checkpoint(logdir)
self.assertIsNone(checkpoint)
def test_checkpoint_hook_enable_on_non_chief_with_collective_ops(self):
strategy = collective_all_reduce_strategy.CollectiveAllReduceStrategy()
strategy.extended._is_chief = False
context = distribute_coordinator._WorkerContext(strategy, None, 'worker', 1)
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_disabled')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_checkpoint_steps=100,
log_step_count_steps=10) as session:
for _ in range(100):
session.run(new_gstep)
# No checkpoint is saved.
checkpoint = checkpoint_management.latest_checkpoint(logdir)
self.assertIsNone(checkpoint)
# But saved to a temporary directory.
checkpoint = checkpoint_management.latest_checkpoint(
os.path.join(logdir, 'tmp_worker_1'))
self.assertIsNotNone(checkpoint)
class StopAtNSession(monitored_session._WrappedSession):
"""A wrapped session that stops at the N-th call to _check_stop."""
def __init__(self, sess, n):
super(StopAtNSession, self).__init__(sess)
self._count = n
def _check_stop(self):
if self._count == 0:
return True
self._count -= 1
return False
class WrappedSessionTest(test.TestCase):
"""_WrappedSession tests."""
@test_util.run_deprecated_v1
def test_properties(self):
with self.cached_session() as sess:
constant_op.constant(0.0)
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEquals(sess.graph, wrapped_sess.graph)
self.assertEquals(sess.sess_str, wrapped_sess.sess_str)
@test_util.run_deprecated_v1
def test_should_stop_on_close(self):
with self.cached_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertFalse(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
@test_util.run_deprecated_v1
def test_should_stop_uses_check_stop(self):
with self.cached_session() as sess:
wrapped_sess = StopAtNSession(sess, 3)
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertTrue(wrapped_sess.should_stop())
@test_util.run_deprecated_v1
def test_should_stop_delegates_to_wrapped_session(self):
with self.cached_session() as sess:
wrapped_sess0 = StopAtNSession(sess, 4)
wrapped_sess1 = monitored_session._WrappedSession(wrapped_sess0)
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertTrue(wrapped_sess1.should_stop())
@test_util.run_deprecated_v1
def test_close_twice(self):
with self.cached_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
@test_util.run_deprecated_v1
def test_run(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
self.assertEqual(42, sess.run(v, feed_dict={c: 42}))
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEqual(51, wrapped_sess.run(v, feed_dict={c: 51}))
def busy_wait_for_coord_stop(coord):
while not coord.should_stop():
time.sleep(0.001)
class CoordinatedSessionTest(test.TestCase):
"""_CoordinatedSession tests."""
@test_util.run_deprecated_v1
def test_properties(self):
with self.cached_session() as sess:
constant_op.constant(0.0)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEquals(sess.graph, coord_sess.graph)
self.assertEquals(sess.sess_str, coord_sess.sess_str)
@test_util.run_deprecated_v1
def test_run(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEqual(42, coord_sess.run(v, feed_dict={c: 42}))
@test_util.run_deprecated_v1
def test_should_stop_on_close(self):
with self.cached_session() as sess:
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord_sess.close()
self.assertTrue(coord_sess.should_stop())
@test_util.run_deprecated_v1
def test_should_stop_on_coord_stop(self):
with self.cached_session() as sess:
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord.request_stop()
self.assertTrue(coord_sess.should_stop())
@test_util.run_deprecated_v1
def test_dont_request_stop_on_exception_in_main_thread(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
self.assertEqual(0, coord_sess.run(c))
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
with self.assertRaisesRegexp(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
self.assertFalse(coord.should_stop())
self.assertFalse(coord_sess.should_stop())
@test_util.run_deprecated_v1
def test_stop_threads_on_close_after_exception(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
threads = [
threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)
]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(0, coord_sess.run(c))
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
for t in threads:
self.assertTrue(t.is_alive())
with self.assertRaisesRegexp(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
def test_stop_threads_on_close(self):
with self.cached_session() as sess:
coord = coordinator.Coordinator()
threads = [
threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)
]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
@test_util.run_deprecated_v1
def test_propagates_exception_trace(self):
assertion = control_flow_ops.Assert(False, ['This should fail.'])
with self.cached_session() as sess:
coord = coordinator.Coordinator(clean_stop_exception_types=())
coord_sess = monitored_session._CoordinatedSession(sess, coord)
try:
coord_sess.run([assertion])
self.fail('No exception was raised by assertion.')
except errors_impl.InvalidArgumentError:
# Extract the name of the file where the exception was first raised.
_, _, exc_traceback = sys.exc_info()
tb = traceback.extract_tb(exc_traceback)
exc_source_file = tb[-1][0]
exc_source_basename = os.path.basename(exc_source_file)
# If it's monitored_session.py then the original stack trace was not
# correctly propagated.
self.assertIn(
exc_source_basename, ['session.py', 'monitored_session.py'],
'The exception was raised from an unrecognized file. This unit '
'test probably needs to be updated. Traceback:\n%s\n' % tb)
self.assertEqual(
exc_source_basename, 'session.py',
'Original stack trace was not propagated by MonitoredSession. '
'Traceback:\n%s' % tb)
class AbortAtNSession(object):
"""A mock session that aborts at the N-th run call."""
def __init__(self, sess, n):
self._sess = sess
self._count = n
def close(self):
pass
def run(self, *args, **kwargs):
if self._count == 0:
raise errors_impl.AbortedError('Aborted at N', None, None)
self._count -= 1
return self._sess.run(*args, **kwargs)
class StopCoordinatorWithException(session_run_hook.SessionRunHook):
"""With this hook Coordinator throws an exception after N-runs."""
def __init__(self, calls_before_stopping, exception_to_raise=None):
self._started_the_side_thread_already = False
self._lock = threading.Lock()
self._stored_exception_event = threading.Event()
self._calls_before_stopping = calls_before_stopping
self._exception_to_raise = (exception_to_raise or errors_impl.AbortedError(
None, None, 'Aborted at N'))
def _maybe_stop_with_exception(self, coord):
while True:
with self._lock:
if self._calls_before_stopping == 0:
try:
raise self._exception_to_raise
except Exception as e: # pylint: disable=broad-except
coord.request_stop(e)
self._stored_exception_event.set()
break
def after_create_session(self, session, coord):
if self._started_the_side_thread_already:
return
separate_thread = threading.Thread(
target=self._maybe_stop_with_exception, args=(coord,))
coord.register_thread(separate_thread)
separate_thread.start()
self._started_the_side_thread_already = True
# Coordinator will take care of joining `separate_thread`.
def after_run(self, run_context, run_values):
stopping_now = False
with self._lock:
self._calls_before_stopping -= 1
if self._calls_before_stopping == 0:
stopping_now = True
if stopping_now:
self._stored_exception_event.wait()
class FailTrainingAfterCoordinatorStopped(StopCoordinatorWithException):
"""With this hook training encounters an exception after N-runs."""
def __init__(self, calls_before_stopping):
StopCoordinatorWithException.__init__(self, calls_before_stopping)
self._coord = None
def after_create_session(self, session, coord):
self._coord = coord
return StopCoordinatorWithException.after_create_session(
self, session, coord)
def after_run(self, run_context, run_values):
StopCoordinatorWithException.after_run(self, run_context, run_values)
try:
# After a `run`, an exception could have been stored inside the
# coordinator.
self._coord.raise_requested_exception()
except errors_impl.AbortedError:
# In real world, the main thread may or may not know about the exception
# that stopped the coordinator. Because the coordinator has stopped, the
# main thread could have gotten stuck as well (for example, the
# coordinator was supposed to execute `FIFOQueue.enqueue` while the main
# thread is executing a blocking `FIFOQueue.dequeue`). After it got stuck,
# the session is going to get garbage collected after some time with:
raise errors_impl.CancelledError(None, None,
'Session got garbage-collected.')
class CountingSessionCreator(object):
"""A creator that counts the number of created sessions."""
def __init__(self, session):
self._initial_session = session
# We only have one session per test case. We can't re-create it, thus
# it shouldn't be closed.
self._initial_session.close = lambda *args: None
self._create_session_calls = 0
@property
def number_of_sessions_created(self):
return self._create_session_calls
def create_session(self):
self._create_session_calls += 1
return self._initial_session
class RecoverableSessionTest(test.TestCase):
"""_RecoverableSession tests."""
class _SessionReturner(object):
def __init__(self, sess):
self._sess = sess
def create_session(self):
return self._sess
@test_util.run_deprecated_v1
def test_properties(self):
with self.cached_session() as sess:
constant_op.constant(0.0)
recoverable_sess = monitored_session._RecoverableSession(
self._SessionReturner(sess))
self.assertEquals(sess.graph, recoverable_sess.graph)
self.assertEquals(sess.sess_str, recoverable_sess.sess_str)
@test_util.run_deprecated_v1
def test_run(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
recoverable_sess = monitored_session._RecoverableSession(
self._SessionReturner(sess))
self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))
@test_util.run_deprecated_v1
def test_recovery(self):
with self.cached_session() as sess:
class StackSessionCreator(object):
def __init__(self, sess):
self.sessions_to_use = [
AbortAtNSession(sess, x + 1) for x in range(3)
]
def create_session(self):
return self.sessions_to_use.pop(0)
c = constant_op.constant(0)
v = array_ops.identity(c)
session_creator = StackSessionCreator(sess)
# List of 3 sessions to use for recovery. The first one aborts
# after 1 run() call, the second after 2 run calls, the third
# after 3 run calls.
self.assertEqual(3, len(session_creator.sessions_to_use))
# Make the recoverable session uses these 3 sessions in sequence by
# passing a factory that pops from the session_to_use list.
recoverable_sess = monitored_session._RecoverableSession(session_creator)
self.assertEqual(
2, len(session_creator.sessions_to_use)) # One session popped.
# Using first session.
self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))
self.assertEqual(
2, len(session_creator.sessions_to_use)) # Still 2 sessions available
# This will fail and recover by picking up the second session.
self.assertEqual(42, recoverable_sess.run(v, feed_dict={c: 42}))
self.assertEqual(
1, len(session_creator.sessions_to_use)) # Still 1 session available
self.assertEqual(33, recoverable_sess.run(v, feed_dict={c: 33}))
self.assertEqual(
1, len(session_creator.sessions_to_use)) # Still 1 session available
# This will fail and recover by picking up the last session.
self.assertEqual(24, recoverable_sess.run(v, feed_dict={c: 24}))
self.assertEqual(
0, len(session_creator.sessions_to_use)) # All sessions used.
self.assertEqual(11, recoverable_sess.run(v, feed_dict={c: 11}))
self.assertEqual(0, recoverable_sess.run(v, feed_dict={c: 0}))
# This will fail and throw a real error as the pop() will fail.
with self.assertRaisesRegexp(IndexError, 'pop from empty list'):
recoverable_sess.run(v, feed_dict={c: -12})
@test_util.run_deprecated_v1
def test_recovery_from_coordinator_exception(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = monitored_session.MonitoredSession(
session_creator,
[StopCoordinatorWithException(calls_before_stopping=2)])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run(v, feed_dict={c: 51}))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run(v, feed_dict={c: 42}))
# Even though the coordinator was asked to stop, the underlying session is
# recreated and is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
@test_util.run_deprecated_v1
def test_recovery_from_non_preemption_in_coordinator(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
hook = StopCoordinatorWithException(
calls_before_stopping=2,
exception_to_raise=errors_impl.UnknownError(
None, None, 'Some fatal exception inside the coordinator.'))
session = monitored_session.MonitoredSession(session_creator, [hook])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run(v, feed_dict={c: 51}))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run(v, feed_dict={c: 42}))
# The coordinator was asked to stop due to non-redeemable error. Training
# should stop and the session should not be recreated.
self.assertTrue(session.should_stop())
self.assertEqual(1, session_creator.number_of_sessions_created)
with self.assertRaises(errors_impl.UnknownError):
session.close()
@test_util.run_deprecated_v1
def test_recovery_from_session_getting_stuck(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = monitored_session.MonitoredSession(
session_creator,
[FailTrainingAfterCoordinatorStopped(calls_before_stopping=2)])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
# Training will not fail, since it's the call number 0.
self.assertEqual(51, session.run(v, feed_dict={c: 51}))
self.assertFalse(session.should_stop())
# Training will fail during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run(v, feed_dict={c: 42}))
# Even though the coordinator stopped which and training failed, the
# underlying session is recreated and training is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
@test_util.run_deprecated_v1
def test_step_fn_recovery_from_coordinator_exception_when_run_hooks(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = monitored_session.MonitoredSession(
session_creator,
[StopCoordinatorWithException(calls_before_stopping=2)])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: value})
return step_fn
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# Even though the coordinator was asked to stop, the underlying session is
# recreated and is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
@test_util.run_deprecated_v1
def test_recovery_from_non_preemption_in_coordinator_when_run_hooks(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
hook = StopCoordinatorWithException(
calls_before_stopping=2,
exception_to_raise=errors_impl.UnknownError(
None, None, 'Some fatal exception inside the coordinator.'))
session = monitored_session.MonitoredSession(session_creator, [hook])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: value})
return step_fn
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# The coordinator was asked to stop due to non-redeemable error. Training
# should stop and the session should not be recreated.
self.assertTrue(session.should_stop())
self.assertEqual(1, session_creator.number_of_sessions_created)
with self.assertRaises(errors_impl.UnknownError):
session.close()
@test_util.run_deprecated_v1
def test_recovery_from_session_getting_stuck_when_run_hooks(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = monitored_session.MonitoredSession(
session_creator,
[FailTrainingAfterCoordinatorStopped(calls_before_stopping=2)])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: value})
return step_fn
# Training will not fail, since it's the call number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# Training will fail during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# Even though the coordinator stopped which and training failed, the
# underlying session is recreated and training is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
def create_raw_session_with_failing_coordinator(self, session_creator, hook):
"""Return MonitoredSession that triggers coordinator failures."""
session = monitored_session.MonitoredSession(session_creator, [hook])
# We would like to test a situation where during fetches through the
# raw session, the coordinator fails with an exception. To do that, we
# are going to use (raw_session + StopCoordinatorWithException) hook
# combination that is stored in
# `MonitoredSession._RecoverableSession._CoordinatedSession._sess`
# at this point:
session._tf_sess = lambda: session._sess._sess._sess
# `run()` on such a session is equivalent to `run()` on the raw session
# with separate coordinator threads independently stopping with an
# exception.
return session
@test_util.run_deprecated_v1
def test_step_fn_recovery_from_coordinator_exception_with_raw_session(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = self.create_raw_session_with_failing_coordinator(
session_creator,
StopCoordinatorWithException(calls_before_stopping=2))
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.session.run(fetches=v, feed_dict={c: value})
return step_fn
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# Even though the coordinator was asked to stop, the underlying session is
# recreated and is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
@test_util.run_deprecated_v1
def test_recovery_from_non_preemption_in_coordinator_with_raw_session(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = self.create_raw_session_with_failing_coordinator(
session_creator,
StopCoordinatorWithException(
calls_before_stopping=2,
exception_to_raise=errors_impl.UnknownError(
None, None, 'Some fatal exception inside the coordinator.')))
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: value})
return step_fn
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# The coordinator was asked to stop due to non-redeemable error. Training
# should stop and the session should not be recreated.
self.assertTrue(session.should_stop())
self.assertEqual(1, session_creator.number_of_sessions_created)
with self.assertRaises(errors_impl.UnknownError):
session.close()
@test_util.run_deprecated_v1
def test_recovery_from_session_getting_stuck_with_raw_session(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = self.create_raw_session_with_failing_coordinator(
session_creator,
FailTrainingAfterCoordinatorStopped(calls_before_stopping=2))
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: value})
return step_fn
# Training will not fail, since it's the call number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# Training will fail during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# Even though the coordinator stopped which and training failed, the
# underlying session is recreated and training is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
class FakeSession(monitored_session._WrappedSession):
def __init__(self, sess):
monitored_session._WrappedSession.__init__(self, sess)
self.args_called = {}
def run(self, fetches, **kwargs):
self.args_called = dict(kwargs)
# Call run only with fetches since we directly pass other arguments.
return monitored_session._WrappedSession.run(self, fetches)
class HookedSessionTest(test.TestCase):
"""Tests of _HookedSession."""
def testRunPassesAllArguments(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_run = FakeSession(sess)
mon_sess = monitored_session._HookedSession(sess=mock_run, hooks=[])
a_tensor = constant_op.constant([0], name='a_tensor')
self.evaluate(variables.global_variables_initializer())
output = mon_sess.run(fetches=a_tensor,
feed_dict='a_feed',
options='an_option',
run_metadata='a_metadata')
self.assertEqual(output, [0])
self.assertEqual(mock_run.args_called, {
'feed_dict': 'a_feed',
'options': 'an_option',
'run_metadata': 'a_metadata'
})
def testCallsHooksBeginEnd(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
self.evaluate(variables.global_variables_initializer())
mon_sess.run(a_tensor)
for hook in [mock_hook, mock_hook2]:
self.assertEqual(
hook.last_run_values,
session_run_hook.SessionRunValues(
results=None,
options=config_pb2.RunOptions(),
run_metadata=config_pb2.RunMetadata()))
self.assertEqual(hook.last_run_context.original_args,
session_run_hook.SessionRunArgs(a_tensor))
self.assertEqual(hook.last_run_context.session, sess)
self.assertEqual(hook.call_counter['begin'], 0)
self.assertEqual(hook.call_counter['after_create_session'], 0)
self.assertEqual(hook.call_counter['before_run'], 1)
self.assertEqual(hook.call_counter['after_run'], 1)
def testShouldStop(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
constant_op.constant([0], name='a_tensor')
self.evaluate(variables.global_variables_initializer())
mon_sess.run(fetches='a_tensor')
self.assertFalse(mon_sess.should_stop())
mock_hook.should_stop = True
mon_sess.run(fetches='a_tensor')
self.assertTrue(mon_sess.should_stop())
def testFetchesHookRequests(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
another_tensor = constant_op.constant([5], name='another_tensor')
third_tensor = constant_op.constant([10], name='third_tensor')
mock_hook.request = session_run_hook.SessionRunArgs([another_tensor])
mock_hook2.request = session_run_hook.SessionRunArgs([third_tensor])
self.evaluate(variables.global_variables_initializer())
output = mon_sess.run(fetches=a_tensor)
self.assertEqual(output, [0])
self.assertEqual(mock_hook.last_run_values.results, [5])
self.assertEqual(mock_hook2.last_run_values.results, [10])
def testOnlyHooksHaveFeeds(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
self.evaluate(variables.global_variables_initializer())
self.assertEqual(mon_sess.run(fetches=add_tensor), [15])
def testBothHooksAndUserHaveFeeds(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
c_tensor = constant_op.constant([0], name='c_tensor')
add_tensor = a_tensor + b_tensor + c_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
self.evaluate(variables.global_variables_initializer())
feed_dict = {c_tensor: [20]}
self.assertEqual(
mon_sess.run(fetches=add_tensor, feed_dict=feed_dict), [35])
# User feed_dict should not be changed
self.assertEqual(len(feed_dict), 1)
def testHooksFeedConflicts(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [10]})
self.evaluate(variables.global_variables_initializer())
with self.assertRaisesRegexp(RuntimeError, 'Same tensor is fed'):
mon_sess.run(fetches=add_tensor)
def testHooksAndUserFeedConflicts(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
self.evaluate(variables.global_variables_initializer())
with self.assertRaisesRegexp(RuntimeError, 'Same tensor is fed'):
mon_sess.run(fetches=add_tensor, feed_dict={b_tensor: [10]})
class RaiseOnceAtCountN(session_run_hook.SessionRunHook):
"""Hook that raises an Exception at step N."""
def __init__(self, n, ex):
self.n = n
self.ex = ex
self.raised = False
def before_run(self, run_context):
# Raise the first time we reach step N.
self.n -= 1
if 0 == self.n and not self.raised:
self.raised = True
raise self.ex
return None
class RunOptionsMetadataHook(session_run_hook.SessionRunHook):
"""A hook that observes & optionally modifies RunOptions and RunMetadata."""
def __init__(self, trace_level, timeout_in_ms, output_partition_graphs,
debug_tensor_watch, report_tensor_allocations_upon_oom):
self._trace_level = trace_level
self._timeout_in_ms = timeout_in_ms
self._output_partition_graphs = output_partition_graphs
self._debug_tensor_watch = debug_tensor_watch
self._report_tensor_allocations_upon_oom = (
report_tensor_allocations_upon_oom)
self.run_options_list = []
self.run_metadata_list = []
def before_run(self, run_context):
options = config_pb2.RunOptions(
trace_level=self._trace_level,
timeout_in_ms=self._timeout_in_ms,
output_partition_graphs=self._output_partition_graphs,
report_tensor_allocations_upon_oom=self
._report_tensor_allocations_upon_oom)
options.debug_options.debug_tensor_watch_opts.extend(
[self._debug_tensor_watch])
return session_run_hook.SessionRunArgs(None, None, options=options)
def after_run(self, run_context, run_values):
self.run_options_list.append(run_values.options)
self.run_metadata_list.append(run_values.run_metadata)
class MonitoredSessionTest(test.TestCase):
"""MonitoredSession tests."""
def test_defaults(self):
with ops.Graph().as_default():
a_var = variables.VariableV1(0)
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
def test_last_step(self):
logdir = _test_dir(self.get_temp_dir(), 'test_last_step')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
# Run till step 3 and save.
hooks = [basic_session_run_hooks.StopAtStepHook(last_step=3)]
with monitored_session.MonitoredSession(hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
self.assertEqual(1, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(3, session.run(do_step))
self.assertTrue(session.should_stop())
save_path = saver_lib._get_saver_or_default().save(
session._coordinated_creator.tf_sess,
os.path.join(logdir, 'step-3'))
# Run till step 5 and save.
def load_ckpt(scaffold, sess):
scaffold.saver.restore(sess, save_path)
session_creator = monitored_session.ChiefSessionCreator(
monitored_session.Scaffold(init_fn=load_ckpt))
hooks = [basic_session_run_hooks.StopAtStepHook(last_step=5)]
with monitored_session.MonitoredSession(
hooks=hooks, session_creator=session_creator) as session:
self.assertEqual(3, session.run(gstep))
self.assertFalse(session.should_stop())
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(5, session.run(do_step))
self.assertTrue(session.should_stop())
def test_num_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_num_steps')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
# Do 3 steps and save.
hooks = [basic_session_run_hooks.StopAtStepHook(num_steps=3)]
with monitored_session.MonitoredSession(hooks=hooks) as session:
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertTrue(session.should_stop())
save_path = saver_lib._get_saver_or_default().save(
session._coordinated_creator.tf_sess,
os.path.join(logdir, 'step-3'))
# Restore and do 4 steps.
def load_ckpt(scaffold, sess):
scaffold.saver.restore(sess, save_path)
session_creator = monitored_session.ChiefSessionCreator(
scaffold=monitored_session.Scaffold(init_fn=load_ckpt))
hooks = [basic_session_run_hooks.StopAtStepHook(num_steps=4)]
with monitored_session.MonitoredSession(
hooks=hooks, session_creator=session_creator) as session:
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertTrue(session.should_stop())
# This set of tests, verifies the supervised session behavior when exceptions
# are raised next to the innermost session run() call.
@test_util.run_deprecated_v1
def test_recovery(self):
logdir = _test_dir(self.get_temp_dir(), 'test_recovery')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
scaffold = monitored_session.Scaffold()
# Use a hook to save the model every 100 steps. It also saves it at
# the end.
hooks = [
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=1, scaffold=scaffold)
]
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir),
hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir)) as session:
self.assertEqual(2, session.run(gstep))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold,
checkpoint_filename_with_path=checkpoint_management.
latest_checkpoint(logdir))) as session:
self.assertEqual(2, session.run(gstep))
def test_retry_initialization_on_aborted_error(self):
# Tests that we silently retry on abort during initialization.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
self.init_raised_aborted_error = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
if not self.init_raised_aborted_error:
self.init_raised_aborted_error = True
raise errors_impl.AbortedError(None, None, 'Abort')
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold=monitored_session.Scaffold(
init_fn=_init_fn))) as session:
self.assertFalse(session.should_stop())
self.assertEqual(0, session.run(gstep))
self.assertTrue(self.init_raised_aborted_error)
def _retry_test(self, ex):
# Tests that we silently retry on error. Note that this does not test
# recovery as we do not use a CheckpointSaver in this test.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(4, ex)
with monitored_session.MonitoredSession(hooks=[hook]) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Here at step 3, the hook triggers and raises AbortedError. The
# MonitoredSession automatically retries and restart from a freshly
# initialized session, so the step is back to 0 and running do_step
# moves it to 1.
self.assertEqual(1, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertTrue(hook.raised)
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
def test_retry_on_aborted_error(self):
self._retry_test(errors_impl.AbortedError(None, None, 'Abort'))
def test_retry_on_unavailable_error(self):
self._retry_test(errors_impl.UnavailableError(None, None, 'Unavailable'))
def test_recover_and_retry_on_aborted_error(self):
# Tests that we silently retry and recover on abort. This test uses
# a CheckpointSaver to have something to recover from.
logdir = _test_dir(self.get_temp_dir(),
'test_recover_and_retry_on_aborted_error')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
scaffold = monitored_session.Scaffold()
abort_hook = RaiseOnceAtCountN(
4, errors_impl.AbortedError(None, None, 'Abort'))
# Save after each step.
ckpt_hook = basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=1, scaffold=scaffold)
hooks = [abort_hook, ckpt_hook]
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir),
hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Here at step 3, the hook triggers and raises AbortedError. The
# MonitoredSession automatically restores and retries.
self.assertEqual(3, session.run(do_step))
self.assertTrue(abort_hook.raised)
self.assertFalse(session.should_stop())
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
def test_exit_cleanly_on_out_of_range_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, errors_impl.OutOfRangeError(None, None,
'EOI'))
session = monitored_session.MonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises OutOfRange. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_exit_cleanly_on_stop_iteration_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, StopIteration)
session = monitored_session.MonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises StopIteration. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_regular_exception_pass_through_run(self):
# Tests that regular exceptions just pass through a "with
# MonitoredSession" block and set the session in stop mode.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(4, RuntimeError('regular exception'))
session = monitored_session.MonitoredSession(hooks=[hook])
with self.assertRaisesRegexp(RuntimeError, 'regular exception'):
with session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# This triggers the hook and raises the exception
session.run(do_step)
# We should not hit this
self.assertFalse(True)
self.assertTrue(hook.raised)
self.assertTrue(session.should_stop())
def test_regular_exception_reported_to_coord_pass_through_run(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through a "run()" call within a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
session = monitored_session.MonitoredSession()
run_performed_without_error = False
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
# Call run() which should perform normally.
self.assertEqual(0, session.run(gstep))
run_performed_without_error = True
self.assertTrue(run_performed_without_error)
def test_regular_exception_reported_to_coord_pass_through_return(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through returning from a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
session = monitored_session.MonitoredSession()
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
self.assertTrue(session.should_stop())
# This set of tests, verifies the session behavior when exceptions are raised
# from code inside a "with MonitoredSession:" context.
def test_stop_cleanly_when_no_exception_in_with_body(self):
# Tests that regular exceptions pass through
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.MonitoredSession()
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Should have closed.
self.assertTrue(session.should_stop())
self.assertTrue(session._is_closed())
def test_raises_regular_exceptions_in_with_body(self):
# Tests that regular exceptions in "with body" are seen outside.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.MonitoredSession()
# We should see that exception.
with self.assertRaisesRegexp(RuntimeError, 'regular exception'):
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Will be visible outside the "with body".
raise RuntimeError('regular exception')
# Should have closed.
self.assertTrue(session.should_stop())
self.assertTrue(session._is_closed())
def test_graph(self):
with ops.Graph().as_default() as g:
with monitored_session.MonitoredSession() as session:
self.assertEqual(g, session.graph)
def test_graph_finalized_during_run_unfinalized_after_exit(self):
with ops.Graph().as_default() as g:
a_var = variables.VariableV1(0)
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
self.assertTrue(g.finalized)
self.assertFalse(g.finalized)
def test_keep_finalized_graph_as_finalized(self):
with ops.Graph().as_default() as g:
a_var = variables.VariableV1(0)
monitored_session.Scaffold().finalize()
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
self.assertTrue(g.finalized)
self.assertTrue(g.finalized)
def test_merge_run_options_from_hooks(self):
"""Test for rewriting RunOptions and observing RunMetadata with hooks."""
with ops.Graph().as_default():
my_const = constant_op.constant(42, name='my_const')
_ = constant_op.constant(24, name='my_const_2')
watch_a = debug_pb2.DebugTensorWatch(
node_name='my_const',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook_a = RunOptionsMetadataHook(2, 30000, False, watch_a, False)
watch_b = debug_pb2.DebugTensorWatch(
node_name='my_const_2',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook_b = RunOptionsMetadataHook(3, 60000, True, watch_b, True)
with monitored_session.MonitoredSession(
hooks=[hook_a, hook_b]) as session:
self.assertEqual(42, session.run(my_const))
# trace_level=3 should have overridden trace_level=2;
# timeout_in_ms=60000 should have overridden 30000;
# output_partition_graphs=True should have overridden False.
# The two debug tensor watches should have been merged.
self.assertEqual([
config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=60000,
output_partition_graphs=True,
debug_options=debug_pb2.DebugOptions(
debug_tensor_watch_opts=[watch_a, watch_b]),
report_tensor_allocations_upon_oom=True),
], hook_b.run_options_list)
self.assertEqual(1, len(hook_b.run_metadata_list))
self.assertTrue(
isinstance(hook_b.run_metadata_list[0], config_pb2.RunMetadata))
self.assertGreater(len(hook_b.run_metadata_list[0].partition_graphs), 0)
def test_merge_caller_and_hook_run_options(self):
"""Test that RunOptions from caller and hooks can be merged properly."""
with ops.Graph().as_default():
my_const = constant_op.constant(42, name='my_const')
_ = constant_op.constant(24, name='my_const_2')
hook_watch = debug_pb2.DebugTensorWatch(
node_name='my_const_2',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook = RunOptionsMetadataHook(2, 60000, False, hook_watch, False)
with monitored_session.MonitoredSession(hooks=[hook]) as session:
caller_watch = debug_pb2.DebugTensorWatch(
node_name='my_const',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
caller_options = config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=30000,
output_partition_graphs=True,
report_tensor_allocations_upon_oom=True)
caller_options.debug_options.debug_tensor_watch_opts.extend(
[caller_watch])
self.assertEqual(42, session.run(my_const, options=caller_options))
# trace_level=3 from the caller should override 2 from the hook.
# timeout_in_ms=60000 from the hook should override from the caller.
# output_partition_graph=True from the caller should override False
# from the hook.
# The two debug watches from the caller and the hook should be merged,
# in that order.
self.assertEqual([
config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=60000,
output_partition_graphs=True,
debug_options=debug_pb2.DebugOptions(
debug_tensor_watch_opts=[caller_watch, hook_watch]),
report_tensor_allocations_upon_oom=True),
], hook.run_options_list)
self.assertEqual(1, len(hook.run_metadata_list))
self.assertTrue(
isinstance(hook.run_metadata_list[0], config_pb2.RunMetadata))
self.assertGreater(len(hook.run_metadata_list[0].partition_graphs), 0)
@test_util.run_deprecated_v1
def test_with_statement_and_close(self):
# Test case for https://github.com/tensorflow/tensorflow/issues/12224
# where close() inside the with should have a better error message.
with self.assertRaisesRegexp(RuntimeError, 'Session is already closed'):
with monitored_session.MonitoredSession() as session:
session.close()
def test_step_fn_example(self):
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
def step_fn(step_context):
value = step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})
return value
with monitored_session.MonitoredSession() as session:
self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)
def test_step_function_stops(self):
with ops.Graph().as_default():
def step_fn(step_context):
step_context.request_stop()
with monitored_session.MonitoredSession() as session:
self.assertEqual(None, session.run_step_fn(step_fn))
self.assertTrue(session.should_stop())
def test_step_request_stop_without_a_with_block(self):
with ops.Graph().as_default():
was_stop_iteration_raised = False
def step_fn(step_context):
step_context.request_stop()
session = monitored_session.MonitoredSession()
try:
self.assertEqual(None, session.run_step_fn(step_fn))
except StopIteration:
was_stop_iteration_raised = True
self.assertTrue(was_stop_iteration_raised)
self.assertFalse(session.should_stop())
def test_step_request_stop_in_a_loop(self):
with ops.Graph().as_default():
def step_fn(step_context):
step_context.request_stop()
with monitored_session.MonitoredSession() as session:
while not session.should_stop():
_ = session.run_step_fn(step_fn)
self.fail('An exception should be raised on the line above.')
def test_step_request_stop_with_returning_a_type(self):
with ops.Graph().as_default():
def step_fn(step_context):
del step_context
return 'a type'
with monitored_session.MonitoredSession() as session:
self.assertEqual('a type', session.run_step_fn(step_fn))
def test_step_with_extra_arguments(self):
with ops.Graph().as_default():
def step_fn(step_context, extra_foo):
del step_context, extra_foo
with monitored_session.MonitoredSession() as session:
with self.assertRaisesRegexp(
ValueError,
'`step_fn` may either have one `step_context` argument'):
self.assertEqual(None, session.run_step_fn(step_fn))
def test_step_fn_belongs_to_a_class(self):
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
class Model(object):
def step_fn(self, step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})
with monitored_session.MonitoredSession() as session:
model = Model()
self.assertNear(3.2, session.run_step_fn(model.step_fn), 0.1)
def test_step_fn_belongs_to_a_class_and_has_extra_methods(self):
with ops.Graph().as_default():
class Model(object):
def step_fn(self, step_context, extra_foo):
del step_context, extra_foo
with monitored_session.MonitoredSession() as session:
with self.assertRaisesRegexp(
ValueError,
'`step_fn` may either have one `step_context` argument'):
model = Model()
self.assertEqual(None, session.run_step_fn(model.step_fn))
def test_step_fn_with_hooks(self):
with ops.Graph().as_default():
var = resource_variable_ops.ResourceVariable(0.0)
# This test higlights the interaction of hooks with
# `Monitoredsession.run_step_fn`. The order of execution of operations
# below is:
# 0. stage_0
# 1. stage_1_0 or stage_1_1 in an undefined order
# 2. stage_2
stage_0 = state_ops.assign_add(var, 0.3)
stage_1_0 = state_ops.assign_add(var, 0.7)
# The order of `stage_1_0` and `stage_1_1` is undefined by
# `MonitoredSession`, but we should be able to assert when both of them
# are complete. To obtain a consistent result of adding two different
# constants to `var`, we rely on a control dependency and
# `ResourceVariable`. Otherwise, it is possible that one of the
# additions overwites the result of the other addition.
with ops.control_dependencies([stage_1_0]):
stage_1_1 = state_ops.assign_add(var, 0.5)
stage_2 = state_ops.assign_add(var, 1.1)
class Hook(session_run_hook.SessionRunHook):
def __init__(self, testing):
self._testing = testing
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(fetches=stage_1_0)
def after_run(self, run_context, run_values):
self._testing.assertNear(0.3 + 0.5 + 0.7,
run_context.session.run(var), 0.1)
self._testing.assertNear(0.3 + 0.5 + 0.7 + 1.1,
run_context.session.run(stage_2), 0.1)
def step_fn(step_context):
self.assertNear(0.3, step_context.session.run(stage_0), 0.1)
return step_context.run_with_hooks(fetches=stage_1_1)
with monitored_session.MonitoredSession(hooks=[Hook(self)]) as session:
self.assertEqual(0.3 + 0.5 + 0.7, session.run_step_fn(step_fn))
def test_step_fn_has_the_same_hooks_behavior_without_recovery(self):
with ops.Graph().as_default():
var = resource_variable_ops.ResourceVariable(0.0)
stage_0 = state_ops.assign_add(var, 0.3)
stage_1_0 = state_ops.assign_add(var, 0.7)
with ops.control_dependencies([stage_1_0]):
stage_1_1 = state_ops.assign_add(var, 0.5)
stage_2 = state_ops.assign_add(var, 1.1)
class Hook(session_run_hook.SessionRunHook):
def __init__(self, testing):
self._testing = testing
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(fetches=stage_1_0)
def after_run(self, run_context, run_values):
self._testing.assertNear(0.3 + 0.5 + 0.7,
run_context.session.run(var), 0.1)
self._testing.assertNear(0.3 + 0.5 + 0.7 + 1.1,
run_context.session.run(stage_2), 0.1)
def step_fn(step_context):
self.assertNear(0.3, step_context.session.run(stage_0), 0.1)
return step_context.run_with_hooks(fetches=stage_1_1)
with monitored_session.SingularMonitoredSession(
hooks=[Hook(self)]) as session:
self.assertEqual(0.3 + 0.5 + 0.7, session.run_step_fn(step_fn))
def test_step_fn_with_hooks_and_request_stop(self):
with ops.Graph().as_default():
trace_the_hook = {'before_run': False, 'after_run': False}
class Hook(session_run_hook.SessionRunHook):
def before_run(self, run_context):
trace_the_hook['before_run'] = True
def after_run(self, run_context, run_values):
trace_the_hook['after_run'] = True
def step_fn(step_context):
step_context.request_stop()
with monitored_session.MonitoredSession(hooks=[Hook()]) as session:
self.assertEqual(None, session.run_step_fn(step_fn))
self.assertTrue(session.should_stop())
# `step_context.request_stop()` in a step_fn interrupts the flow of
# running the hooks.
self.assertFalse(trace_the_hook['before_run'])
self.assertFalse(trace_the_hook['after_run'])
def test_recovers_from_an_exception_in_step_fn(self):
trace_the_exception = {'run_already': False}
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
def step_fn(step_context):
if not trace_the_exception['run_already']:
trace_the_exception['run_already'] = True
raise errors_impl.AbortedError(None, None, 'Abort')
return step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})
with monitored_session.MonitoredSession() as session:
self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)
self.assertTrue(trace_the_exception['run_already'])
def test_recovers_from_an_exception_in_step_fn_after_hooks(self):
trace_the_exception = {'run_already': False, 'side_effect_counter': 0}
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
graph_state = variables.VariableV1(0.0)
graph_side_effect = state_ops.assign_add(graph_state, 0.31)
def step_fn(step_context):
trace_the_exception['side_effect_counter'] += 1
step_context.session.run(graph_side_effect)
value = step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})
if not trace_the_exception['run_already']:
trace_the_exception['run_already'] = True
raise errors_impl.AbortedError(None, None, 'Abort')
return value
with self.cached_session() as test_session:
with monitored_session.MonitoredSession(
CountingSessionCreator(test_session)) as session:
session.run(variables.global_variables_initializer())
self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)
self.assertTrue(trace_the_exception['run_already'])
# Make sure the rest of the body of the step_fn is re-executed upon
# AbortedError:
self.assertEqual(2, trace_the_exception['side_effect_counter'])
self.assertNear(0.62, session.run(graph_state), 0.1)
def test_step_fn_doesnt_recover_when_it_wasnt_asked_to(self):
trace_the_exception = {'run_already': False}
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
def step_fn(step_context):
if not trace_the_exception['run_already']:
trace_the_exception['run_already'] = True
raise errors_impl.AbortedError(None, None, 'Abort')
value = step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})
return value
with monitored_session.SingularMonitoredSession() as session:
with self.assertRaisesRegexp(errors_impl.AbortedError, 'Abort'):
self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)
self.fail()
self.assertTrue(trace_the_exception['run_already'])
def test_step_fn_exception_from_before_run(self):
trace_the_exception = {'run_already': False, 'side_effect_counter': 0}
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
vv = constant_op.constant(3.2)
graph_state = variables.VariableV1(0.0)
graph_side_effect = state_ops.assign_add(graph_state, 0.31)
class Hook(session_run_hook.SessionRunHook):
def __init__(self, testing):
self._testing = testing
def before_run(self, run_context):
if not trace_the_exception['run_already']:
trace_the_exception['run_already'] = True
raise errors_impl.AbortedError(None, None, 'Abort')
return session_run_hook.SessionRunArgs(fetches=vv)
def after_run(self, run_context, run_values):
self._testing.assertNear(3.2, run_values.results, 0.1)
def step_fn(step_context):
trace_the_exception['side_effect_counter'] += 1
step_context.session.run(graph_side_effect)
return step_context.run_with_hooks(fetches=v, feed_dict={c: 1.3})
with self.cached_session() as test_session:
with monitored_session.MonitoredSession(
CountingSessionCreator(test_session),
hooks=[Hook(self)]) as session:
test_session.run(variables.global_variables_initializer())
self.assertNear(1.3, session.run_step_fn(step_fn), 0.1)
self.assertEqual(2, trace_the_exception['side_effect_counter'])
self.assertNear(0.62, session.run(graph_state), 0.1)
class SingularMonitoredSessionTest(test.TestCase):
"""Tests SingularMonitoredSession."""
def test_handles_initialization(self):
with ops.Graph().as_default():
a_var = variables.VariableV1(0)
with monitored_session.SingularMonitoredSession() as session:
# If it's not initialized, following statement raises an error.
self.assertEqual(0, session.run(a_var))
def test_do_not_handle_aborted_error(self):
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
class _RaiseAbortedHook(session_run_hook.SessionRunHook):
def before_run(self, run_context):
raise errors_impl.AbortedError(None, None, 'Abort')
with monitored_session.SingularMonitoredSession(
hooks=[_RaiseAbortedHook()]) as session:
with self.assertRaises(errors_impl.AbortedError):
self.assertEqual(0, session.run(gstep))
with self.assertRaises(errors_impl.AbortedError):
with monitored_session.SingularMonitoredSession(
hooks=[_RaiseAbortedHook()]) as session:
self.assertEqual(0, session.run(gstep))
def test_exit_cleanly_on_out_of_range_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, errors_impl.OutOfRangeError(None, None,
'EOI'))
session = monitored_session.SingularMonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises OutOfRange. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_regular_exception_reported_to_coord_pass_through_run(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through a "run()" call within a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
session = monitored_session.SingularMonitoredSession()
run_performed_without_error = False
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
# Call run() which should perform normally.
self.assertEqual(0, session.run(gstep))
run_performed_without_error = True
self.assertTrue(run_performed_without_error)
def test_stop_cleanly_when_no_exception_in_with_body(self):
# Tests that regular exceptions pass through
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.SingularMonitoredSession()
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Should have closed.
self.assertTrue(session.should_stop())
self.assertEqual(None, session.raw_session())
def test_graph(self):
with ops.Graph().as_default() as g:
with monitored_session.SingularMonitoredSession() as session:
self.assertEqual(g, session.graph)
def test_raw_session(self):
with ops.Graph().as_default():
with monitored_session.SingularMonitoredSession() as session:
self.assertTrue(isinstance(session.raw_session(), session_lib.Session))
if __name__ == '__main__':
test.main()
|
dataclient.py | """
This file implements a threaded stream controller to abstract a data stream
back to the ray clientserver.
"""
import logging
import queue
import threading
import grpc
from typing import Any
from typing import Dict
import ray.core.generated.ray_client_pb2 as ray_client_pb2
import ray.core.generated.ray_client_pb2_grpc as ray_client_pb2_grpc
logger = logging.getLogger(__name__)
# The maximum field value for request_id -- which is also the maximum
# number of simultaneous in-flight requests.
INT32_MAX = (2**31) - 1
class DataClient:
def __init__(self, channel: "grpc._channel.Channel", client_id: str):
"""Initializes a thread-safe datapath over a Ray Client gRPC channel.
Args:
channel: connected gRPC channel
client_id: the generated ID representing this client
"""
self.channel = channel
self.request_queue = queue.Queue()
self.data_thread = self._start_datathread()
self.ready_data: Dict[int, Any] = {}
self.cv = threading.Condition()
self._req_id = 0
self._client_id = client_id
self.data_thread.start()
def _next_id(self) -> int:
self._req_id += 1
if self._req_id > INT32_MAX:
self._req_id = 1
# Responses that aren't tracked (like opportunistic releases)
# have req_id=0, so make sure we never mint such an id.
assert self._req_id != 0
return self._req_id
def _start_datathread(self) -> threading.Thread:
return threading.Thread(target=self._data_main, args=(), daemon=True)
def _data_main(self) -> None:
stub = ray_client_pb2_grpc.RayletDataStreamerStub(self.channel)
resp_stream = stub.Datapath(
iter(self.request_queue.get, None),
metadata=(("client_id", self._client_id), ))
try:
for response in resp_stream:
if response.req_id == 0:
# This is not being waited for.
logger.debug(f"Got unawaited response {response}")
continue
with self.cv:
self.ready_data[response.req_id] = response
self.cv.notify_all()
except grpc.RpcError as e:
if grpc.StatusCode.CANCELLED == e.code():
# Gracefully shutting down
logger.info("Cancelling data channel")
else:
logger.error(
f"Got Error from data channel -- shutting down: {e}")
raise e
def close(self) -> None:
if self.request_queue is not None:
self.request_queue.put(None)
if self.data_thread is not None:
self.data_thread.join()
def _blocking_send(self, req: ray_client_pb2.DataRequest
) -> ray_client_pb2.DataResponse:
req_id = self._next_id()
req.req_id = req_id
self.request_queue.put(req)
data = None
with self.cv:
self.cv.wait_for(lambda: req_id in self.ready_data)
data = self.ready_data[req_id]
del self.ready_data[req_id]
return data
def GetObject(self, request: ray_client_pb2.GetRequest,
context=None) -> ray_client_pb2.GetResponse:
datareq = ray_client_pb2.DataRequest(get=request, )
resp = self._blocking_send(datareq)
return resp.get
def PutObject(self, request: ray_client_pb2.PutRequest,
context=None) -> ray_client_pb2.PutResponse:
datareq = ray_client_pb2.DataRequest(put=request, )
resp = self._blocking_send(datareq)
return resp.put
def ReleaseObject(self,
request: ray_client_pb2.ReleaseRequest,
context=None) -> None:
datareq = ray_client_pb2.DataRequest(release=request, )
self.request_queue.put(datareq)
|
remote_control.py | #!/usr/bin/env python3
import RPi.GPIO as GPIO
import time, threading
from datetime import datetime
from sqlalchemy import create_engine, MetaData, Table, Column, Integer, DateTime, String, ForeignKey
from sqlalchemy.sql import select, func
#zaehler = 1
time.sleep(1)
CONN = create_engine('sqlite:////home/ampel2go/ampel2go_community/102_user_display_and_settings_module/db.sqlite3')
META_DATA = MetaData(bind=CONN)
MAIN_OCCUPANCY = Table(
'main_occupancy', META_DATA,
Column('id', Integer, primary_key=True),
Column('capacity', Integer),
Column('date', DateTime),
Column('person_count', Integer),
Column('direction', Integer),
)
class IRRemote:
def __init__(self, callback = None):
self.decoding = False
self.pList = []
self.timer = time.time()
if callback == 'DECODE':
self.callback = self.print_ir_code
else:
self.callback = callback
self.checkTime = 150 # time in milliseconds
self.verbose = False
self.repeatCodeOn = True
self.lastIRCode = 0
self.maxPulseListLength = 70
def pWidth(self, pin):
"""pWidth, function to record the width of the highs and lows
of the IR remote signal and start the function to look for the
end of the IR remote signal"""
self.pList.append(time.time()-self.timer)
self.timer = time.time()
if self.decoding == False:
self.decoding = True
check_loop = threading.Thread(name='self.pulse_checker',target=self.pulse_checker)
check_loop.start()
return
def pulse_checker(self):
"""pulse_checker, function to look for the end of the IR remote
signal and activate the signal decode function followed by
the callback function.
End of signal is determined by 1 of 2 ways
1 - if the length of the pulse list is larger than self.maxPulseListLength
- used for initial button press codes
2 - if the length of time receiving the pulse is great than self.checkTime
- used for repeat codes"""
timer = time.time()
while True:
check = (time.time()-timer)*1000
if check > self.checkTime:
# print(check, len(self.pList))
break
if len(self.pList) > self.maxPulseListLength:
# print(check, len(self.pList))
break
time.sleep(0.001)
if len(self.pList) > self.maxPulseListLength:
decode = self.decode_pulse(self.pList)
self.lastIRCode = decode
# if the length of self.pList is less than 10
# assume repeat code found
elif len(self.pList) < 10:
if self.repeatCodeOn == True:
decode = self.lastIRCode
else:
decode = 0
self.lastIRCode = decode
else:
decode = 0
self.lastIRCode = decode
self.pList = []
self.decoding = False
if self.callback != None:
self.callback(decode)
return
def decode_pulse(self,pList):
"""decode_pulse, function to decode the high and low
timespans captured by the pWidth function into a binary
number"""
bitList = []
sIndex = -1
# convert the timespans in seconds to milli-seconds
# look for the start of the IR remote signal
for p in range(0,len(pList)):
try:
pList[p]=float(pList[p])*1000
if self.verbose == True:
print(pList[p])
if pList[p]<11:
if sIndex == -1:
sIndex = p
except:
pass
# if no acceptable start is found return -1
if sIndex == -1:
return -1
if sIndex+1 >= len(pList):
return -1
#print(sIndex, pList[sIndex], pList[sIndex+1])
if (pList[sIndex]<4 or pList[sIndex]>11):
return -1
if (pList[sIndex+1]<2 or pList[sIndex+1]>6):
return -1
""" pulses are made up of 2 parts, a fixed length low (approx 0.5-0.6ms)
and a variable length high. The length of the high determines whether or
not a 0,1 or control pulse/bit is being sent. Highes of length approx 0.5-0.6ms
indicate a 0, and length of approx 1.6-1.7 ms indicate a 1"""
for i in range(sIndex+2,len(pList),2):
if i+1 < len(pList):
if pList[i+1]< 0.9:
bitList.append(0)
elif pList[i+1]< 2.5:
bitList.append(1)
elif (pList[i+1]> 2.5 and pList[i+1]< 45):
#print('end of data found')
break
else:
break
#if self.verbose == True:
# print(bitList)
# convert the list of 1s and 0s into a
# binary number
pulse = 0
bitShift = 0
for b in bitList:
pulse = (pulse<<bitShift) + b
bitShift = 1
return pulse
def set_callback(self, callback = None):
"""set_callback, function to allow the user to set
or change the callback function used at any time"""
self.callback = callback
return
def remove_callback(self):
"""remove_callback, function to allow the user to remove
the callback function used at any time"""
self.callback = None
return
def print_ir_code(self, code):
"""print_ir_code, function to display IR code received"""
#print(hex(code))
return
def set_verbose(self, verbose = True):
"""set_verbose, function to turn verbose mode
on or off. Used to print out pulse width list
and bit list"""
self.verbose = verbose
return
def set_repeat(self, repeat = True):
"""set_repeat, function to enable and disable
the IR repeat code functionality"""
self.repeatCodeOn = repeat
return
zaehler = 1
def writeDB(x):
selection = select(
[MAIN_OCCUPANCY.c.id, MAIN_OCCUPANCY.c.capacity, MAIN_OCCUPANCY.c.person_count, MAIN_OCCUPANCY.c.direction, func.max(MAIN_OCCUPANCY.c.id)])
for i in CONN.execute(selection):
capacity = i['capacity']
latest_person_count = i['person_count']
direction = i['direction']
if x == 1:
print(1)
if x == 2:
zaehler = 1
#print("1er")
if x == 3:
zaehler = 10
#print("10er")
if x == 5:
capacity = capacity + 1
#print(capacity)
if x == 6:
capacity = capacity - 1
#print(capacity)
if x == 7:
latest_person_count = latest_person_count - 1
#print(latest_person_count)
if x == 8:
latest_person_count = latest_person_count + 1
#print(latest_person_count)
if x == 9:
direction = direction* -1
#print(direction)
time.sleep(0.5)
now = datetime.now()
now = now.replace(microsecond=0)
insert = MAIN_OCCUPANCY.insert().values(capacity=capacity, date=now, person_count=latest_person_count, direction=direction)
CONN.execute(insert)
if __name__ == "__main__":
def remote_callback(code):
#print(hex(code))
if code == 0xff629d:
writeDB(1)
#print("Power")
elif code == 0xff22dd:
#print('A')
writeDB(2)
elif code == 0xff02fd:
#print('B')
writeDB(3)
elif code == 0xffc23d:
#print('C')
writeDB(4)
elif code == 0xff9867:
#print('Up Arrow')
writeDB(5)
elif code == 0xff38c7:
#print('Down Arrow')
writeDB(6)
elif code == 0xff30cf:
#print('Left Arrow')
writeDB(7)
elif code == 0xff7a85:
#print('Right Arrow')
writeDB(8)
elif code == 0xff18e7:
#print('Select')
writeDB(9)
else:
print('.') # unknown code
return
ir = IRRemote('DECODE')
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM) # uses numbering outside circles
GPIO.setup(16,GPIO.IN) # set pin 16 to input
GPIO.add_event_detect(16,GPIO.BOTH,callback=ir.pWidth)
#ir.set_verbose()
print('Starting IR remote sensing using DECODE function')
time.sleep(5)
print('Setting up callback')
ir.set_verbose(False)
ir.set_callback(remote_callback)
ir.set_repeat(True)
try:
while True:
time.sleep(1)
except:
print('Removing callback and cleaning up GPIO')
ir.remove_callback()
GPIO.cleanup(16) |
test_http.py | import asyncio
import contextlib
import os
import sys
import threading
import time
from collections import ChainMap
from http.server import BaseHTTPRequestHandler, HTTPServer
import pytest
import fsspec.asyn
import fsspec.utils
fsspec.utils.setup_logging(logger_name="fsspec.http")
requests = pytest.importorskip("requests")
port = 9898
data = b"\n".join([b"some test data"] * 1000)
realfile = "http://localhost:%i/index/realfile" % port
index = b'<a href="%s">Link</a>' % realfile.encode()
listing = open(
os.path.join(os.path.dirname(__file__), "data", "listing.html"), "rb"
).read()
win = os.name == "nt"
class HTTPTestHandler(BaseHTTPRequestHandler):
static_files = {
"/index/realfile": data,
"/index/otherfile": data,
"/index": index,
"/data/20020401": listing,
}
dynamic_files = {}
files = ChainMap(dynamic_files, static_files)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _respond(self, code=200, headers=None, data=b""):
headers = headers or {}
headers.update({"User-Agent": "test"})
self.send_response(code)
for k, v in headers.items():
self.send_header(k, str(v))
self.end_headers()
if data:
self.wfile.write(data)
def do_GET(self):
file_path = self.path.rstrip("/")
file_data = self.files.get(file_path)
if file_data is None:
return self._respond(404)
if "Range" in self.headers:
ran = self.headers["Range"]
b, ran = ran.split("=")
start, end = ran.split("-")
if start:
file_data = file_data[int(start) : (int(end) + 1) if end else None]
else:
# suffix only
file_data = file_data[-int(end) :]
if "give_length" in self.headers:
response_headers = {"Content-Length": len(file_data)}
self._respond(200, response_headers, file_data)
elif "give_range" in self.headers:
self._respond(
200,
{"Content-Range": "0-%i/%i" % (len(file_data) - 1, len(file_data))},
file_data,
)
else:
self._respond(200, data=file_data)
def do_POST(self):
length = self.headers.get("Content-Length")
file_path = self.path.rstrip("/")
if length is None:
assert self.headers.get("Transfer-Encoding") == "chunked"
self.files[file_path] = b"".join(self.read_chunks())
else:
self.files[file_path] = self.rfile.read(length)
self._respond(200)
do_PUT = do_POST
def read_chunks(self):
length = -1
while length != 0:
line = self.rfile.readline().strip()
if len(line) == 0:
length = 0
else:
length = int(line, 16)
yield self.rfile.read(length)
self.rfile.readline()
def do_HEAD(self):
if "head_not_auth" in self.headers:
return self._respond(
403, {"Content-Length": 123}, b"not authorized for HEAD request"
)
elif "head_ok" not in self.headers:
return self._respond(405)
file_path = self.path.rstrip("/")
file_data = self.files.get(file_path)
if file_data is None:
return self._respond(404)
if "give_length" in self.headers:
response_headers = {"Content-Length": len(file_data)}
if "zero_length" in self.headers:
response_headers["Content-Length"] = 0
self._respond(200, response_headers)
elif "give_range" in self.headers:
self._respond(
200, {"Content-Range": "0-%i/%i" % (len(file_data) - 1, len(file_data))}
)
elif "give_etag" in self.headers:
self._respond(200, {"ETag": "xxx"})
else:
self._respond(200) # OK response, but no useful info
@contextlib.contextmanager
def serve():
server_address = ("", port)
httpd = HTTPServer(server_address, HTTPTestHandler)
th = threading.Thread(target=httpd.serve_forever)
th.daemon = True
th.start()
try:
yield "http://localhost:%i" % port
finally:
httpd.socket.close()
httpd.shutdown()
th.join()
@pytest.fixture(scope="module")
def server():
with serve() as s:
yield s
@pytest.fixture
def reset_files():
yield
# Reset the newly added files after the
# test is completed.
HTTPTestHandler.dynamic_files.clear()
def test_list(server):
h = fsspec.filesystem("http")
out = h.glob(server + "/index/*")
assert out == [server + "/index/realfile"]
def test_list_invalid_args(server):
with pytest.raises(TypeError):
h = fsspec.filesystem("http", use_foobar=True)
h.glob(server + "/index/*")
def test_list_cache(server):
h = fsspec.filesystem("http", use_listings_cache=True)
out = h.glob(server + "/index/*")
assert out == [server + "/index/realfile"]
def test_list_cache_with_expiry_time_cached(server):
h = fsspec.filesystem("http", use_listings_cache=True, listings_expiry_time=30)
# First, the directory cache is not initialized.
assert not h.dircache
# By querying the filesystem with "use_listings_cache=True",
# the cache will automatically get populated.
out = h.glob(server + "/index/*")
assert out == [server + "/index/realfile"]
# Verify cache content.
assert len(h.dircache) == 1
out = h.glob(server + "/index/*")
assert out == [server + "/index/realfile"]
def test_list_cache_with_expiry_time_purged(server):
h = fsspec.filesystem("http", use_listings_cache=True, listings_expiry_time=0.3)
# First, the directory cache is not initialized.
assert not h.dircache
# By querying the filesystem with "use_listings_cache=True",
# the cache will automatically get populated.
out = h.glob(server + "/index/*")
assert out == [server + "/index/realfile"]
assert len(h.dircache) == 1
# Verify cache content.
assert server + "/index/" in h.dircache
assert len(h.dircache.get(server + "/index/")) == 1
# Wait beyond the TTL / cache expiry time.
time.sleep(0.31)
# Verify that the cache item should have been purged.
cached_items = h.dircache.get(server + "/index/")
assert cached_items is None
# Verify that after clearing the item from the cache,
# it can get populated again.
out = h.glob(server + "/index/*")
assert out == [server + "/index/realfile"]
cached_items = h.dircache.get(server + "/index/")
assert len(cached_items) == 1
def test_list_cache_reuse(server):
h = fsspec.filesystem("http", use_listings_cache=True, listings_expiry_time=5)
# First, the directory cache is not initialized.
assert not h.dircache
# By querying the filesystem with "use_listings_cache=True",
# the cache will automatically get populated.
out = h.glob(server + "/index/*")
assert out == [server + "/index/realfile"]
# Verify cache content.
assert len(h.dircache) == 1
# Verify another instance without caching enabled does not have cache content.
h = fsspec.filesystem("http", use_listings_cache=False)
assert not h.dircache
# Verify that yet another new instance, with caching enabled,
# will see the same cache content again.
h = fsspec.filesystem("http", use_listings_cache=True, listings_expiry_time=5)
assert len(h.dircache) == 1
# However, yet another instance with a different expiry time will also not have
# any valid cache content.
h = fsspec.filesystem("http", use_listings_cache=True, listings_expiry_time=666)
assert len(h.dircache) == 0
def test_ls_raises_filenotfound(server):
h = fsspec.filesystem("http")
with pytest.raises(FileNotFoundError):
h.ls(server + "/not-a-key")
def test_list_cache_with_max_paths(server):
h = fsspec.filesystem("http", use_listings_cache=True, max_paths=5)
out = h.glob(server + "/index/*")
assert out == [server + "/index/realfile"]
def test_list_cache_with_skip_instance_cache(server):
h = fsspec.filesystem("http", use_listings_cache=True, skip_instance_cache=True)
out = h.glob(server + "/index/*")
assert out == [server + "/index/realfile"]
def test_isdir(server):
h = fsspec.filesystem("http")
assert h.isdir(server + "/index/")
assert not h.isdir(server + "/index/realfile")
assert not h.isdir(server + "doesnotevenexist")
def test_policy_arg(server):
h = fsspec.filesystem("http", size_policy="get")
out = h.glob(server + "/index/*")
assert out == [server + "/index/realfile"]
def test_exists(server):
h = fsspec.filesystem("http")
assert not h.exists(server + "/notafile")
with pytest.raises(FileNotFoundError):
h.cat(server + "/notafile")
def test_read(server):
h = fsspec.filesystem("http")
out = server + "/index/realfile"
with h.open(out, "rb") as f:
assert f.read() == data
with h.open(out, "rb", block_size=0) as f:
assert f.read() == data
with h.open(out, "rb") as f:
assert f.read(100) + f.read() == data
def test_file_pickle(server):
import pickle
# via HTTPFile
h = fsspec.filesystem("http", headers={"give_length": "true", "head_ok": "true"})
out = server + "/index/realfile"
with h.open(out, "rb") as f:
pic = pickle.dumps(f)
assert f.read() == data
with pickle.loads(pic) as f:
assert f.read() == data
# via HTTPStreamFile
h = fsspec.filesystem("http")
out = server + "/index/realfile"
with h.open(out, "rb") as f:
out = pickle.dumps(f)
assert f.read() == data
with pickle.loads(out) as f:
assert f.read() == data
def test_methods(server):
h = fsspec.filesystem("http")
url = server + "/index/realfile"
assert h.exists(url)
assert h.cat(url) == data
@pytest.mark.parametrize(
"headers",
[
{},
{"give_length": "true"},
{"give_length": "true", "head_ok": "true"},
{"give_range": "true"},
{"give_length": "true", "head_not_auth": "true"},
{"give_range": "true", "head_not_auth": "true"},
],
)
def test_random_access(server, headers):
h = fsspec.filesystem("http", headers=headers)
url = server + "/index/realfile"
with h.open(url, "rb") as f:
if headers:
assert f.size == len(data)
assert f.read(5) == data[:5]
if headers:
f.seek(5, 1)
assert f.read(5) == data[10:15]
else:
with pytest.raises(ValueError):
f.seek(5, 1)
def test_mapper_url(server):
h = fsspec.filesystem("http")
mapper = h.get_mapper(server + "/index/")
assert mapper.root.startswith("http:")
assert list(mapper)
mapper2 = fsspec.get_mapper(server + "/index/")
assert mapper2.root.startswith("http:")
assert list(mapper) == list(mapper2)
def test_content_length_zero(server):
h = fsspec.filesystem(
"http", headers={"give_length": "true", "zero_length": "true"}
)
url = server + "/index/realfile"
with h.open(url, "rb") as f:
assert f.read() == data
def test_download(server, tmpdir):
h = fsspec.filesystem("http", headers={"give_length": "true", "head_ok": "true "})
url = server + "/index/realfile"
fn = os.path.join(tmpdir, "afile")
h.get(url, fn)
assert open(fn, "rb").read() == data
def test_multi_download(server, tmpdir):
h = fsspec.filesystem("http", headers={"give_length": "true", "head_ok": "true "})
urla = server + "/index/realfile"
urlb = server + "/index/otherfile"
fna = os.path.join(tmpdir, "afile")
fnb = os.path.join(tmpdir, "bfile")
h.get([urla, urlb], [fna, fnb])
assert open(fna, "rb").read() == data
assert open(fnb, "rb").read() == data
def test_ls(server):
h = fsspec.filesystem("http")
l = h.ls(server + "/data/20020401/", detail=False)
nc = server + "/data/20020401/GRACEDADM_CLSM0125US_7D.A20020401.030.nc4"
assert nc in l
assert len(l) == 11
assert all(u["type"] == "file" for u in h.ls(server + "/data/20020401/"))
assert h.glob(server + "/data/20020401/*.nc4") == [nc]
def test_mcat(server):
h = fsspec.filesystem("http", headers={"give_length": "true", "head_ok": "true "})
urla = server + "/index/realfile"
urlb = server + "/index/otherfile"
out = h.cat([urla, urlb])
assert out == {urla: data, urlb: data}
def test_cat_file_range(server):
h = fsspec.filesystem("http", headers={"give_length": "true", "head_ok": "true "})
urla = server + "/index/realfile"
assert h.cat(urla, start=1, end=10) == data[1:10]
assert h.cat(urla, start=1) == data[1:]
assert h.cat(urla, start=-10) == data[-10:]
assert h.cat(urla, start=-10, end=-2) == data[-10:-2]
assert h.cat(urla, end=-10) == data[:-10]
def test_mcat_cache(server):
urla = server + "/index/realfile"
urlb = server + "/index/otherfile"
fs = fsspec.filesystem("simplecache", target_protocol="http")
assert fs.cat([urla, urlb]) == {urla: data, urlb: data}
def test_mcat_expand(server):
h = fsspec.filesystem("http", headers={"give_length": "true", "head_ok": "true "})
out = h.cat(server + "/index/*")
assert out == {server + "/index/realfile": data}
def test_info(server):
fs = fsspec.filesystem("http", headers={"give_etag": "true", "head_ok": "true"})
info = fs.info(server + "/index/realfile")
assert info["ETag"] == "xxx"
@pytest.mark.parametrize("method", ["POST", "PUT"])
def test_put_file(server, tmp_path, method, reset_files):
src_file = tmp_path / "file_1"
src_file.write_bytes(data)
dwl_file = tmp_path / "down_1"
fs = fsspec.filesystem("http", headers={"head_ok": "true", "give_length": "true"})
with pytest.raises(FileNotFoundError):
fs.info(server + "/hey")
fs.put_file(src_file, server + "/hey", method=method)
assert fs.info(server + "/hey")["size"] == len(data)
fs.get_file(server + "/hey", dwl_file)
assert dwl_file.read_bytes() == data
@pytest.mark.xfail(
condition=sys.flags.optimize > 1, reason="no docstrings when optimised"
)
def test_docstring():
h = fsspec.filesystem("http")
# most methods have empty docstrings and draw from base class, but this one
# is generated
assert h.pipe.__doc__
def test_async_other_thread(server):
import threading
loop = asyncio.get_event_loop()
th = threading.Thread(target=loop.run_forever)
th.daemon = True
th.start()
fs = fsspec.filesystem("http", asynchronous=True, loop=loop)
asyncio.run_coroutine_threadsafe(fs.set_session(), loop=loop).result()
url = server + "/index/realfile"
cor = fs._cat([url])
fut = asyncio.run_coroutine_threadsafe(cor, loop=loop)
assert fut.result() == {url: data}
loop.call_soon_threadsafe(loop.stop)
@pytest.mark.skipif(sys.version_info < (3, 7), reason="no asyncio.run in py36")
def test_async_this_thread(server):
async def _():
fs = fsspec.filesystem("http", asynchronous=True)
session = await fs.set_session() # creates client
url = server + "/index/realfile"
with pytest.raises((NotImplementedError, RuntimeError)):
fs.cat([url])
out = await fs._cat([url])
del fs
assert out == {url: data}
await session.close()
asyncio.run(_())
def _inner_pass(fs, q, fn):
# pass the FS instance, but don't use it; in new process, the instance
# cache should be skipped to make a new instance
import traceback
try:
fs = fsspec.filesystem("http")
q.put(fs.cat(fn))
except Exception:
q.put(traceback.format_exc())
@pytest.mark.parametrize("method", ["spawn", "forkserver"])
def test_processes(server, method):
import multiprocessing as mp
if win and method != "spawn":
pytest.skip("Windows can only spawn")
ctx = mp.get_context(method)
fn = server + "/index/realfile"
fs = fsspec.filesystem("http")
q = ctx.Queue()
p = ctx.Process(target=_inner_pass, args=(fs, q, fn))
p.start()
out = q.get()
assert out == fs.cat(fn)
p.join()
|
compression.py | """
Module for compression and decompression investigations.
This module is the main contribution of my master thesis.
"""
import bz2
from datetime import datetime
import gzip
import logging
import lzma
from pathlib import Path
import shutil
import threading
import zlib
import cv2
import numpy as np
class CompressionError(RuntimeError):
"""Generic error class for compression errors."""
pass
class Compressor():
"""Main class to interface compression module."""
def __init__(self,
res_dir,
img_dir,
img_ext="exr",
algo=None,
settings=None,
ext_logger=None):
if ext_logger is not None:
self.logger = ext_logger
else:
self.logger = self._create_logger()
self.res_dir = self._check_dir(res_dir)
self.image_dir = self._check_dir(img_dir)
self.raw_dir = self._check_dir(res_dir / "raw")
self.img_extension = "." + img_ext
self.imgs = {}
self.xyzs = {}
self._res = None
if algo is None:
algo = "lzma"
if settings is None:
settings = {"level": 9}
self.select_algo(algo, settings)
self.algo = algo
self.logger.debug(f"Compressing with algorithm {self.algo}")
self.logger.debug(f"Compressing with settings {self._settings}")
self._threads = []
def get_frame_ids(self):
"""Extract list of frame ids from file names of Inst(rument) images."""
scene_name = "Inst"
image_names = scene_name + "*" + self.img_extension
file_names = self.image_dir.glob(image_names)
ids = []
for file_name in file_names:
file_name = str(file_name.name).strip(self.img_extension)
file_name = file_name.strip(scene_name)
ids.append(file_name.strip("_"))
self.logger.debug(f"Found {len(ids)} frame ids")
return ids
def load_image(self, img_id):
"""
Load a single image into memory.
:type img_id: str
:param img_id: id of the image to load
"""
self.logger.debug(f"Load image {img_id}")
img_path = self.image_dir / ("Inst_" + img_id + self.img_extension)
img = cv2.imread(str(img_path), cv2.IMREAD_UNCHANGED)
self.imgs[img_id] = img
if self.imgs:
id0 = list(self.imgs.keys())[0]
if self.imgs[id0] is not None:
self._res = self.imgs[id0].shape
if not img.shape == self._res:
self.logger.debug(f"Images must have size {self._res}!")
raise CompressionError(f"Images must have size {self._res}!")
xyz_file = ("Inst_" + img_id + self.img_extension + ".xyz")
xyz_path = self.image_dir / xyz_file
if xyz_path.is_file():
self.xyzs[img_id] = xyz_path
else:
self.xyzs[img_id] = None
def load_images(self, img_ids=None):
"""Load composition images using ids."""
if img_ids is None:
self.img_ids = self.get_frame_ids()
else:
self.img_ids = img_ids
for img_id in self.img_ids:
self.load_image(img_id)
self.logger.debug(f"Loaded {len(self.imgs.keys())} images")
def unload_image(self, img_id):
"""Unload image with given img_id, keeps ID."""
self.imgs[img_id] = None
def unload_images(self):
"""Unloads images to free memory, keeps IDs."""
self.imgs = {}
def comp_decomp_series(self, max_threads=7):
"""
Compresses and decompresses multiple images using :py:func:comp_decomp
"""
method = self.comp_decomp
self.logger.debug(f"{method} img series with {max_threads} threads")
self.img_ids = self.get_frame_ids()
for img_id in self.img_ids:
for thr in self._threads:
if not thr.is_alive():
self._threads.pop(self._threads.index(thr))
if len(self._threads) < max_threads - 1:
# Allow up to 2 additional threads
thr = threading.Thread(target=method, args=(None,img_id))
thr.start()
self._threads.append(thr)
else:
# If too many, also compress in main thread to not drop a frame
method(None,img_id)
for thr in self._threads:
thr.join()
self.unload_images()
def comp_decomp(self, img=None, img_id=None):
"""
Default function that applies compression and decompression.
:param img: Image to be compressed and decompressed.
"""
compressed_img = self.compress(img, img_id)
decompressed_img = self.decompress(compressed_img)
if img_id is not None:
self.logger.debug(f"Save image {img_id}")
filename = self.res_dir / (str(img_id) + ".png")
params = (cv2.IMWRITE_PNG_COMPRESSION, 9)
cv2.imwrite(str(filename), decompressed_img, params)
if self.xyzs[img_id] is not None:
xyz_file = str(filename) + ".xyz"
shutil.copyfile(self.xyzs[img_id], xyz_file)
self.logger.debug(f"Save prior file {xyz_file}")
def compress(self, img=None, img_id=None):
"""
Compresses images using predefined algorithm or file format.
:param img: Image to be compressed.
:returns: A compressed image.
"""
if img is None and img_id is not None:
self.load_image(img_id)
img = self.imgs[img_id]
img_cmp = self._comp_met(img, self._settings)
if img_id is not None:
filename_raw = self.raw_dir / (str(img_id) + "." + self.algo)
with open(str(self.raw_dir / filename_raw), "wb") as file:
file.write(img_cmp)
self.unload_image(img_id)
return img_cmp
def decompress(self, img):
"""
Decompresses images using predefined algorithm or file format.
:returns: Decompressed image.
"""
img_dcmp = self._decomp_met(img)
return img_dcmp
def select_algo(self, algo, settings):
"""
Select compression and decompression algorithm or file format.
:param algo: string to describe algorithm or file format to use for
image compression.
:param settings: dictionary to describe settings for the compression
algorithm. Default is {"level": 9}, i.e. highest compression.
"""
algo = algo.lower()
##### Compression algorithms #####
if algo == "bz2":
comp = self._decorate_builtin_compress(bz2.compress)
settings["compresslevel"] = settings["level"]
decomp = self._decorate_builtin_decompress(bz2.decompress)
elif algo == "gzip":
comp = self._decorate_builtin_compress(gzip.compress)
settings["compresslevel"] = settings["level"]
decomp = self._decorate_builtin_decompress(gzip.decompress)
elif algo == "lzma":
comp = self._decorate_builtin_compress(lzma.compress)
settings["preset"] = settings["level"]
decomp = self._decorate_builtin_decompress(lzma.decompress)
elif algo == "zlib":
comp = self._decorate_builtin_compress(zlib.compress)
decomp = self._decorate_builtin_decompress(zlib.decompress)
##### File formats #####
elif algo == "jpeg" or algo == "jpg":
comp = self._decorate_cv_compress(cv2.imencode)
settings["ext"] = ".jpg"
params = (cv2.IMWRITE_JPEG_QUALITY, settings["level"] * 10)
if "progressive" in settings:
if isinstance(settings["progressive"], bool):
params += (cv2.IMWRITE_JPEG_PROGRESSIVE,
settings["progressive"])
else:
raise CompressionError("JPEG progressive requires bool")
if "optimize" in settings:
if isinstance(settings["optimize"], bool):
params += (cv2.IMWRITE_JPEG_OPTIMIZE, settings["optimize"])
else:
raise CompressionError("JPEG optimize requires bool input")
if "rst_interval" in settings:
if isinstance(settings["rst_interval"], int):
params += (cv2.IMWRITE_JPEG_RST_INTERVAL,
settings["rst_interval"])
else:
raise CompressionError("JPEG rst_interval requires int")
if "luma_quality" in settings:
if isinstance(settings["luma_quality"], int):
params += (cv2.IMWRITE_JPEG_LUMA_QUALITY,
settings["luma_quality"])
else:
raise CompressionError("JPEG luma_quality requires int")
if "chroma_quality" in settings:
if isinstance(settings["chroma_quality"], int):
params += (cv2.IMWRITE_JPEG_CHROMA_QUALITY,
settings["chroma_quality"])
else:
raise CompressionError("JPEG chroma_quality requires int")
settings["params"] = params
decomp = self._decorate_cv_decompress(cv2.imdecode)
elif algo == "jpeg2000" or algo == "jp2":
comp = self._decorate_cv_compress(cv2.imencode)
settings["ext"] = ".jp2"
level = int(settings["level"] * 100) # Ranges from 0 to 1000
params = (cv2.IMWRITE_JPEG2000_COMPRESSION_X1000, level)
settings["params"] = params
decomp = self._decorate_cv_decompress(cv2.imdecode)
elif algo == "png":
comp = self._decorate_cv_compress(cv2.imencode)
settings["ext"] = ".png"
params = (cv2.IMWRITE_PNG_COMPRESSION, settings["level"])
if "strategy" in settings:
if isinstance(settings["strategy"], int):
params += (cv2.IMWRITE_PNG_STRATEGY, settings["strategy"])
else:
raise CompressionError("PNG strategy requires int")
if "bilevel" in settings:
if isinstance(settings["bilevel"], bool):
params += (cv2.IMWRITE_PNG_BILEVEL, settings["bilevel"])
else:
raise CompressionError("PNG bilevel requires bool")
settings["params"] = params
decomp = self._decorate_cv_decompress(cv2.imdecode)
elif algo == "tiff":
# According to: http://libtiff.org/support.html
comp = self._decorate_cv_compress(cv2.imencode)
settings["ext"] = ".tiff"
params = ()
if "scheme" in settings:
# Valid values
# 1: None
# 2: CCITT 1D
# 3: CCITT Group 3
# 4: CCITT Group 4
# 5: LZW
# 7: JPEG
# Also some more experimental ones exist
if isinstance(settings["scheme"], int):
params += (cv2.IMWRITE_TIFF_COMPRESSION,
settings["scheme"])
else:
raise CompressionError("TIFF scheme requires int")
if "resunit" in settings:
if isinstance(settings["resunit"], int):
params += (cv2.IMWRITE_TIFF_RESUNIT, settings["resunit"])
else:
raise CompressionError("TIFF resunit requries int")
if "xdpi" in settings:
if isinstance(settings["xdpi"], int):
params += (cv2.IMWRITE_TIFF_XDPI, settings["xdpi"])
else:
raise CompressionError("TIFF xdpi requires int")
if "ydpi" in settings:
if isinstance(settings["ydpi"], int):
params += (cv2.IMWRITE_TIFF_XDPI, settings["ydpi"])
else:
raise CompressionError("TIFF ydpi requires int")
settings["params"] = params
decomp = self._decorate_cv_decompress(cv2.imdecode)
elif algo == "exr":
comp = self._decorate_cv_compress(cv2.imencode)
settings["ext"] = ".exr"
params = ()
if "type" in settings:
if isinstance(settings["type"], int):
params += (cv2.IMWRITE_EXR_TYPE, settings["type"])
else:
raise CompressionError("EXR type requires int")
settings["params"] = params
decomp = self._decorate_cv_decompress(cv2.imdecode)
else:
raise CompressionError("Unknown compression algorithm.")
self._comp_met = comp
self._decomp_met = decomp
self._settings = settings
@staticmethod
def _decorate_builtin_compress(func):
def compress(img, settings):
if img.dtype == np.float32 and np.max(img) <= 1.:
img_temp = img * 255
img = img_temp.astype(np.uint8)
elif img.dtype == np.uint16:
img_temp = img / 255
img = img_temp.astype(np.uint8)
elif img.dtype == np.uint8:
pass
else:
raise RuntimeError("Invalid compression input")
img_cmp = func(img, **settings)
return img_cmp
return compress
def _decorate_builtin_decompress(self, func):
def decompress(img):
img_dcmp = func(img)
img_dcmp = np.frombuffer(img_dcmp, dtype=np.uint8)
img_dcmp = img_dcmp.reshape(self._res)
return img_dcmp
return decompress
@staticmethod
def _decorate_cv_compress(func):
def compress(img, settings):
if img.dtype == np.float32 and np.max(img) <= 1.:
img_temp = img * 255
img = img_temp.astype(np.uint8)
elif img.dtype == np.uint16:
img_temp = img / 255
img = img_temp.astype(np.uint8)
elif img.dtype == np.uint8:
pass
else:
raise RuntimeError("Invalid compression input")
#if settings["ext"] == ".jpg":
# img_temp = img / 255
# img = img_temp.astype(np.uint8)
_, img_cmp = func(settings["ext"], img, settings["params"])
img_cmp = np.array(img_cmp).tobytes()
return img_cmp
return compress
def _decorate_cv_decompress(self, func):
def decompress(img):
img = np.frombuffer(img, dtype=np.uint8)
img_dcmp = func(img, cv2.IMREAD_UNCHANGED)
return img_dcmp
return decompress
@staticmethod
def _create_logger():
"""
Creates local logger in case no external logger was provided.
"""
now = datetime.now().strftime("%Y-%m-%dT%H%M%S%z")
filename = (now + "_compression.log")
log_dir = Path(__file__).resolve().parent.parent.parent
log_dir = log_dir / "data" / "logs"
if not log_dir.is_dir:
Path.mkdir(log_dir)
log_file = log_dir / filename
logger = logging.getLogger("compression")
logger.setLevel(logging.DEBUG)
logger_formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(funcName)s - %(message)s")
file_handler = logging.FileHandler(str(log_file))
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logger_formatter)
logger.addHandler(file_handler)
logger.debug("\n\n############## NEW COMPRESSION LOG ##############\n")
return logger
def _check_dir(self, directory, create=True):
"""
Resolves directory and creates it, if it doesn't existing.
:type directory: Path or str
:param directory: Directory to be created if not existing
:type create: bool
:param create: Set to false if directory should not be created and
instead an exception shall be raise
"""
self.logger.debug(f"Checking if directory {directory} exists...")
if isinstance(directory, str):
directory = Path(directory)
dir_resolved = directory.resolve()
if not dir_resolved.exists():
if create:
self.logger.debug(f"{directory} doesn't exist. Creating it...")
Path.mkdir(dir_resolved)
self.logger.debug("Finished!")
else:
raise RuntimeError(f"Directory {directory} does not exist!")
else:
self.logger.debug("Exists!")
return dir_resolved
|
execution_manager.py | import subprocess
import threading
import os
from utils import logger_utils
from utils.managers.manager import Manager
from utils.managers.project_manager import ProjectManager
class ExecutionManager(Manager):
__LOGGER = logger_utils.get_logger(__name__)
@classmethod
def execute_program(cls, project, main_file):
main_path = "{}{}\\out\\{}\\{}.py".format(ProjectManager.PATH, project, project, main_file)
if os.path.exists(main_path):
app_daemon = threading.Thread(target=ExecutionManager.call_subprocess(main_path, "python"),
name="{} daemon".format(project))
app_daemon.daemon = True
app_daemon.start()
else:
ExecutionManager.__LOGGER.error("Cannot find generated source code")
raise FileNotFoundError("Cannot find generated source code")
@classmethod
def call_subprocess(cls, path, language):
subprocess.Popen([language, path])
|
client.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Christopher Lenz
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
from datetime import datetime
import os
import os.path
import shutil
import time
import tempfile
import threading
import unittest
from couchdb import client, http, util
from couchdb.tests import testutil
class ServerTestCase(testutil.TempDatabaseMixin, unittest.TestCase):
def test_init_with_resource(self):
sess = http.Session()
res = http.Resource(client.DEFAULT_BASE_URL, sess)
serv = client.Server(url=res)
serv.config()
def test_init_with_session(self):
sess = http.Session()
serv = client.Server(client.DEFAULT_BASE_URL, session=sess)
serv.config()
self.assertTrue(serv.resource.session is sess)
def test_exists(self):
self.assertTrue(client.Server(client.DEFAULT_BASE_URL))
self.assertFalse(client.Server('http://localhost:9999'))
def test_repr(self):
repr(self.server)
def test_server_vars(self):
version = self.server.version()
self.assertTrue(isinstance(version, util.strbase))
config = self.server.config()
self.assertTrue(isinstance(config, dict))
tasks = self.server.tasks()
self.assertTrue(isinstance(tasks, list))
def test_server_stats(self):
stats = self.server.stats()
self.assertTrue(isinstance(stats, dict))
stats = self.server.stats('httpd/requests')
self.assertTrue(isinstance(stats, dict))
self.assertTrue(len(stats) == 1 and len(stats['httpd']) == 1)
def test_get_db_missing(self):
self.assertRaises(http.ResourceNotFound,
lambda: self.server['couchdb-python/missing'])
def test_create_db_conflict(self):
name, db = self.temp_db()
self.assertRaises(http.PreconditionFailed, self.server.create,
name)
def test_delete_db(self):
name, db = self.temp_db()
assert name in self.server
self.del_db(name)
assert name not in self.server
def test_delete_db_missing(self):
self.assertRaises(http.ResourceNotFound, self.server.delete,
'couchdb-python/missing')
def test_replicate(self):
aname, a = self.temp_db()
bname, b = self.temp_db()
id, rev = a.save({'test': 'a'})
result = self.server.replicate(aname, bname)
self.assertEqual(result['ok'], True)
self.assertEqual(b[id]['test'], 'a')
doc = b[id]
doc['test'] = 'b'
b.update([doc])
self.server.replicate(bname, aname)
self.assertEqual(a[id]['test'], 'b')
self.assertEqual(b[id]['test'], 'b')
def test_replicate_continuous(self):
aname, a = self.temp_db()
bname, b = self.temp_db()
result = self.server.replicate(aname, bname, continuous=True)
self.assertEqual(result['ok'], True)
version = tuple(int(i) for i in self.server.version().split('.')[:2])
if version >= (0, 10):
self.assertTrue('_local_id' in result)
def test_iter(self):
aname, a = self.temp_db()
bname, b = self.temp_db()
dbs = list(self.server)
self.assertTrue(aname in dbs)
self.assertTrue(bname in dbs)
def test_len(self):
self.temp_db()
self.temp_db()
self.assertTrue(len(self.server) >= 2)
def test_uuids(self):
ls = self.server.uuids()
assert type(ls) == list
ls = self.server.uuids(count=10)
assert type(ls) == list and len(ls) == 10
def test_235_unicode_server(self):
url = client.DEFAULT_BASE_URL
if not isinstance(url, util.utype):
url = url.decode('utf-8')
server = client.Server(url)
dbname = 'couchdb-python/test-235-unicode-server'
db = server.create(dbname)
try:
db.update([{'foo': u'\ua000'}])
finally:
server.delete(dbname)
def test_basic_auth(self):
url = "http://root:password@localhost:5984/"
server = client.Server(url)
dbname = 'couchdb-python/test_basic_auth'
self.assertRaises(http.Unauthorized, server.create, dbname)
def test_user_management(self):
url = client.DEFAULT_BASE_URL
if not isinstance(url, util.utype):
url = url.decode('utf-8')
server = client.Server(url)
try:
server.add_user('foo', 'secret', roles=['hero'])
token = server.login('foo', 'secret')
self.assertTrue(server.verify_token(token))
self.assertTrue(server.logout(token))
finally:
server.remove_user('foo')
class DatabaseTestCase(testutil.TempDatabaseMixin, unittest.TestCase):
def test_save_new(self):
doc = {'foo': 'bar'}
id, rev = self.db.save(doc)
self.assertTrue(id is not None)
self.assertTrue(rev is not None)
self.assertEqual((id, rev), (doc['_id'], doc['_rev']))
doc = self.db.get(id)
self.assertEqual(doc['foo'], 'bar')
def test_save_new_with_id(self):
doc = {'_id': 'foo'}
id, rev = self.db.save(doc)
self.assertTrue(doc['_id'] == id == 'foo')
self.assertEqual(doc['_rev'], rev)
def test_save_existing(self):
doc = {}
id_rev_old = self.db.save(doc)
doc['foo'] = True
id_rev_new = self.db.save(doc)
self.assertTrue(doc['_rev'] == id_rev_new[1])
self.assertTrue(id_rev_old[1] != id_rev_new[1])
def test_save_new_batch(self):
doc = {'_id': 'foo'}
id, rev = self.db.save(doc, batch='ok')
self.assertTrue(rev is None)
self.assertTrue('_rev' not in doc)
def test_save_existing_batch(self):
doc = {'_id': 'foo'}
self.db.save(doc)
id_rev_old = self.db.save(doc)
id_rev_new = self.db.save(doc, batch='ok')
self.assertTrue(id_rev_new[1] is None)
self.assertEqual(id_rev_old[1], doc['_rev'])
def test_exists(self):
self.assertTrue(self.db)
self.assertFalse(client.Database('couchdb-python/missing'))
def test_name(self):
# Access name assigned during creation.
name, db = self.temp_db()
self.assertTrue(db.name == name)
# Access lazily loaded name,
self.assertTrue(client.Database(db.resource.url).name == name)
def test_commit(self):
self.assertTrue(self.db.commit()['ok'] == True)
def test_create_large_doc(self):
self.db['foo'] = {'data': '0123456789' * 110 * 1024} # 10 MB
self.assertEqual('foo', self.db['foo']['_id'])
def test_doc_id_quoting(self):
self.db['foo/bar'] = {'foo': 'bar'}
self.assertEqual('bar', self.db['foo/bar']['foo'])
del self.db['foo/bar']
self.assertEqual(None, self.db.get('foo/bar'))
def test_unicode(self):
self.db[u'føø'] = {u'bår': u'Iñtërnâtiônàlizætiøn', 'baz': 'ASCII'}
self.assertEqual(u'Iñtërnâtiônàlizætiøn', self.db[u'føø'][u'bår'])
self.assertEqual(u'ASCII', self.db[u'føø'][u'baz'])
def test_disallow_nan(self):
try:
self.db['foo'] = {'number': float('nan')}
self.fail('Expected ValueError')
except ValueError:
pass
def test_disallow_none_id(self):
deldoc = lambda: self.db.delete({'_id': None, '_rev': None})
self.assertRaises(ValueError, deldoc)
def test_doc_revs(self):
doc = {'bar': 42}
self.db['foo'] = doc
old_rev = doc['_rev']
doc['bar'] = 43
self.db['foo'] = doc
new_rev = doc['_rev']
new_doc = self.db.get('foo')
self.assertEqual(new_rev, new_doc['_rev'])
new_doc = self.db.get('foo', rev=new_rev)
self.assertEqual(new_rev, new_doc['_rev'])
old_doc = self.db.get('foo', rev=old_rev)
self.assertEqual(old_rev, old_doc['_rev'])
revs = [i for i in self.db.revisions('foo')]
self.assertEqual(revs[0]['_rev'], new_rev)
self.assertEqual(revs[1]['_rev'], old_rev)
gen = self.db.revisions('crap')
self.assertRaises(StopIteration, lambda: next(gen))
self.assertTrue(self.db.compact())
while self.db.info()['compact_running']:
pass
# 0.10 responds with 404, 0.9 responds with 500, same content
doc = 'fail'
try:
doc = self.db.get('foo', rev=old_rev)
except http.ServerError:
doc = None
assert doc is None
def test_attachment_crud(self):
doc = {'bar': 42}
self.db['foo'] = doc
old_rev = doc['_rev']
self.db.put_attachment(doc, 'Foo bar', 'foo.txt', 'text/plain')
self.assertNotEqual(old_rev, doc['_rev'])
doc = self.db['foo']
attachment = doc['_attachments']['foo.txt']
self.assertEqual(len('Foo bar'), attachment['length'])
self.assertEqual('text/plain', attachment['content_type'])
self.assertEqual(b'Foo bar',
self.db.get_attachment(doc, 'foo.txt').read())
self.assertEqual(b'Foo bar',
self.db.get_attachment('foo', 'foo.txt').read())
old_rev = doc['_rev']
self.db.delete_attachment(doc, 'foo.txt')
self.assertNotEqual(old_rev, doc['_rev'])
self.assertEqual(None, self.db['foo'].get('_attachments'))
def test_attachment_crud_with_files(self):
doc = {'bar': 42}
self.db['foo'] = doc
old_rev = doc['_rev']
fileobj = util.StringIO(b'Foo bar baz')
self.db.put_attachment(doc, fileobj, 'foo.txt')
self.assertNotEqual(old_rev, doc['_rev'])
doc = self.db['foo']
attachment = doc['_attachments']['foo.txt']
self.assertEqual(len('Foo bar baz'), attachment['length'])
self.assertEqual('text/plain', attachment['content_type'])
self.assertEqual(b'Foo bar baz',
self.db.get_attachment(doc, 'foo.txt').read())
self.assertEqual(b'Foo bar baz',
self.db.get_attachment('foo', 'foo.txt').read())
old_rev = doc['_rev']
self.db.delete_attachment(doc, 'foo.txt')
self.assertNotEqual(old_rev, doc['_rev'])
self.assertEqual(None, self.db['foo'].get('_attachments'))
def test_empty_attachment(self):
doc = {}
self.db['foo'] = doc
old_rev = doc['_rev']
self.db.put_attachment(doc, '', 'empty.txt')
self.assertNotEqual(old_rev, doc['_rev'])
doc = self.db['foo']
attachment = doc['_attachments']['empty.txt']
self.assertEqual(0, attachment['length'])
def test_default_attachment(self):
doc = {}
self.db['foo'] = doc
self.assertTrue(self.db.get_attachment(doc, 'missing.txt') is None)
sentinel = object()
self.assertTrue(self.db.get_attachment(doc, 'missing.txt', sentinel) is sentinel)
def test_attachment_from_fs(self):
tmpdir = tempfile.mkdtemp()
tmpfile = os.path.join(tmpdir, 'test.txt')
f = open(tmpfile, 'w')
f.write('Hello!')
f.close()
doc = {}
self.db['foo'] = doc
with open(tmpfile) as f:
self.db.put_attachment(doc, f)
doc = self.db.get('foo')
self.assertTrue(doc['_attachments']['test.txt']['content_type'] == 'text/plain')
shutil.rmtree(tmpdir)
def test_attachment_no_filename(self):
doc = {}
self.db['foo'] = doc
self.assertRaises(ValueError, self.db.put_attachment, doc, '')
def test_json_attachment(self):
doc = {}
self.db['foo'] = doc
self.db.put_attachment(doc, '{}', 'test.json', 'application/json')
self.assertEqual(self.db.get_attachment(doc, 'test.json').read(), b'{}')
def test_include_docs(self):
doc = {'foo': 42, 'bar': 40}
self.db['foo'] = doc
rows = list(self.db.query(
'function(doc) { emit(doc._id, null); }',
include_docs=True
))
self.assertEqual(1, len(rows))
self.assertEqual(doc, rows[0].doc)
def test_query_multi_get(self):
for i in range(1, 6):
self.db.save({'i': i})
res = list(self.db.query('function(doc) { emit(doc.i, null); }',
keys=list(range(1, 6, 2))))
self.assertEqual(3, len(res))
for idx, i in enumerate(range(1, 6, 2)):
self.assertEqual(i, res[idx].key)
def test_find(self):
if self.server.version_info()[0] < 2:
return
docs = [
dict(type='Person', name='John Doe'),
dict(type='Person', name='Mary Jane'),
dict(type='City', name='Gotham City')
]
self.db.update(docs)
# the sort needs an index over `name`, the selector selects by `type`
idx = self.db.index()
idx['foo', 'bar'] = [{'type': 'asc'}, {'name': 'asc'}]
res = list(self.db.find(
{
'selector': {
'type': 'Person'
},
'fields': ['name'],
# we need to specify the complete index here
'sort': [{'type': 'asc'}, {'name': 'asc'}]
}
))
self.assertEqual(2, len(res))
expect = ['John Doe', 'Mary Jane']
for i, doc in enumerate(res):
self.assertEqual(set(['name']), doc.keys())
self.assertEqual(expect[i], doc['name'])
def test_explain(self):
if self.server.version_info()[0] < 2:
return
mango = {'selector': {'type': 'Person'}, 'fields': ['name']}
res = self.db.explain(mango)
self.assertEqual(['name'], res['fields'])
self.assertEqual({'type': {'$eq': 'Person'}}, res['selector'])
self.assertEqual(0, res['skip'])
self.assertEqual(self.db.name, res['dbname'])
def test_index(self):
if self.server.version_info()[0] < 2:
return
res = list(self.db.index())
self.assertEqual(1, len(res))
self.assertEqual({'ddoc': None, 'def': {'fields': [{'_id': 'asc'}]},
'name': '_all_docs', 'type': 'special'},
res[0])
def test_add_index(self):
if self.server.version_info()[0] < 2:
return
idx = self.db.index()
idx['foo', 'bar'] = [{'type': 'asc'}]
idxs = list(idx)
self.assertEqual(2, len(idxs))
for i in idxs:
if i['ddoc'] is not None: # special `_all_docs` index
self.assertEqual({'ddoc': '_design/foo',
'def': {'fields': [{'type': 'asc'}]},
'name': 'bar',
'type': 'json'},
i)
return
self.failed()
def test_remove_index(self):
if self.server.version_info()[0] < 2:
return
idx = self.db.index()
idx['foo', 'bar'] = [{'type': 'asc'}]
res = list(idx)
self.assertEqual(2, len(res))
del idx['foo', 'bar']
res = list(idx)
self.assertEqual(1, len(res))
def test_bulk_update_conflict(self):
docs = [
dict(type='Person', name='John Doe'),
dict(type='Person', name='Mary Jane'),
dict(type='City', name='Gotham City')
]
self.db.update(docs)
# update the first doc to provoke a conflict in the next bulk update
doc = docs[0].copy()
self.db[doc['_id']] = doc
results = self.db.update(docs)
self.assertEqual(False, results[0][0])
assert isinstance(results[0][2], http.ResourceConflict)
def test_bulk_update_all_or_nothing(self):
docs = [
dict(type='Person', name='John Doe'),
dict(type='Person', name='Mary Jane'),
dict(type='City', name='Gotham City')
]
self.db.update(docs)
# update the first doc to provoke a conflict in the next bulk update
doc = docs[0].copy()
doc['name'] = 'Jane Doe'
self.db[doc['_id']] = doc
results = self.db.update(docs, all_or_nothing=True)
self.assertEqual(True, results[0][0])
doc = self.db.get(doc['_id'], conflicts=True)
assert '_conflicts' in doc
revs = self.db.get(doc['_id'], open_revs='all')
assert len(revs) == 2
def test_bulk_update_bad_doc(self):
self.assertRaises(TypeError, self.db.update, [object()])
def test_copy_doc(self):
self.db['foo'] = {'status': 'testing'}
result = self.db.copy('foo', 'bar')
self.assertEqual(result, self.db['bar'].rev)
def test_copy_doc_conflict(self):
self.db['bar'] = {'status': 'idle'}
self.db['foo'] = {'status': 'testing'}
self.assertRaises(http.ResourceConflict, self.db.copy, 'foo', 'bar')
def test_copy_doc_overwrite(self):
self.db['bar'] = {'status': 'idle'}
self.db['foo'] = {'status': 'testing'}
result = self.db.copy('foo', self.db['bar'])
doc = self.db['bar']
self.assertEqual(result, doc.rev)
self.assertEqual('testing', doc['status'])
def test_copy_doc_srcobj(self):
self.db['foo'] = {'status': 'testing'}
self.db.copy(self.db['foo'], 'bar')
self.assertEqual('testing', self.db['bar']['status'])
def test_copy_doc_destobj_norev(self):
self.db['foo'] = {'status': 'testing'}
self.db.copy('foo', {'_id': 'bar'})
self.assertEqual('testing', self.db['bar']['status'])
def test_copy_doc_src_dictlike(self):
class DictLike(object):
def __init__(self, doc):
self.doc = doc
def items(self):
return self.doc.items()
self.db['foo'] = {'status': 'testing'}
self.db.copy(DictLike(self.db['foo']), 'bar')
self.assertEqual('testing', self.db['bar']['status'])
def test_copy_doc_dest_dictlike(self):
class DictLike(object):
def __init__(self, doc):
self.doc = doc
def items(self):
return self.doc.items()
self.db['foo'] = {'status': 'testing'}
self.db['bar'] = {}
self.db.copy('foo', DictLike(self.db['bar']))
self.assertEqual('testing', self.db['bar']['status'])
def test_copy_doc_src_baddoc(self):
self.assertRaises(TypeError, self.db.copy, object(), 'bar')
def test_copy_doc_dest_baddoc(self):
self.assertRaises(TypeError, self.db.copy, 'foo', object())
def test_changes(self):
self.db['foo'] = {'bar': True}
self.assertEqual(self.db.changes(since=0)['last_seq'], 1)
first = next(self.db.changes(feed='continuous'))
self.assertEqual(first['seq'], 1)
self.assertEqual(first['id'], 'foo')
def test_changes_releases_conn(self):
# Consume an entire changes feed to read the whole response, then check
# that the HTTP connection made it to the pool.
list(self.db.changes(feed='continuous', timeout=0))
scheme, netloc = util.urlsplit(client.DEFAULT_BASE_URL)[:2]
self.assertTrue(self.db.resource.session.connection_pool.conns[(scheme, netloc)])
def test_changes_releases_conn_when_lastseq(self):
# Consume a changes feed, stopping at the 'last_seq' item, i.e. don't
# let the generator run any further, then check the connection made it
# to the pool.
for obj in self.db.changes(feed='continuous', timeout=0):
if 'last_seq' in obj:
break
scheme, netloc = util.urlsplit(client.DEFAULT_BASE_URL)[:2]
self.assertTrue(self.db.resource.session.connection_pool.conns[(scheme, netloc)])
def test_changes_conn_usable(self):
# Consume a changes feed to get a used connection in the pool.
list(self.db.changes(feed='continuous', timeout=0))
# Try using the connection again to make sure the connection was left
# in a good state from the previous request.
self.assertTrue(self.db.info()['doc_count'] == 0)
def test_changes_conn_usable_selector(self):
if self.server.version_info()[0] < 2:
return
# Consume a changes feed to get a used connection in the pool.
list(self.db.changes(feed='continuous',
filter='_selector',
timeout=0,
_selector={'selector': {}}))
# Try using the connection again to make sure the connection was left
# in a good state from the previous request.
self.assertTrue(self.db.info()['doc_count'] == 0)
def test_changes_usable_selector(self):
if self.server.version_info()[0] < 2:
return
# Consume a changes feed to get a used connection in the pool.
list(self.db.changes(filter='_selector',
_selector={'selector': {}}))
# Try using the connection again to make sure the connection was left
# in a good state from the previous request.
self.assertTrue(self.db.info()['doc_count'] == 0)
def test_changes_heartbeat(self):
def wakeup():
time.sleep(.3)
self.db.save({})
threading.Thread(target=wakeup).start()
for change in self.db.changes(feed='continuous', heartbeat=100):
break
def test_purge(self):
doc = {'a': 'b'}
self.db['foo'] = doc
self.assertEqual(self.db.purge([doc])['purge_seq'], 1)
def test_json_encoding_error(self):
doc = {'now': datetime.now()}
self.assertRaises(TypeError, self.db.save, doc)
def test_security(self):
security = self.db.security
self.assertEqual(security, {})
security['members'] = {'names': ['test'], 'roles': []}
self.db.security = security
class ViewTestCase(testutil.TempDatabaseMixin, unittest.TestCase):
def test_row_object(self):
row = list(self.db.view('_all_docs', keys=['blah']))[0]
self.assertEqual(row.id, None)
self.assertEqual(row.key, 'blah')
self.assertEqual(row.value, None)
self.assertEqual(row.error, 'not_found')
self.db.save({'_id': 'xyz', 'foo': 'bar'})
row = list(self.db.view('_all_docs', keys=['xyz']))[0]
self.assertEqual(row.id, 'xyz')
self.assertEqual(row.key, 'xyz')
self.assertEqual(list(row.value.keys()), ['rev'])
self.assertEqual(row.error, None)
def test_view_multi_get(self):
for i in range(1, 6):
self.db.save({'i': i})
self.db['_design/test'] = {
'language': 'javascript',
'views': {
'multi_key': {'map': 'function(doc) { emit(doc.i, null); }'}
}
}
res = list(self.db.view('test/multi_key', keys=list(range(1, 6, 2))))
self.assertEqual(3, len(res))
for idx, i in enumerate(range(1, 6, 2)):
self.assertEqual(i, res[idx].key)
def test_ddoc_info(self):
self.db['_design/test'] = {
'language': 'javascript',
'views': {
'test': {'map': 'function(doc) { emit(doc.type, null); }'}
}
}
info = self.db.info('test')
self.assertEqual(info['view_index']['compact_running'], False)
def test_view_compaction(self):
for i in range(1, 6):
self.db.save({'i': i})
self.db['_design/test'] = {
'language': 'javascript',
'views': {
'multi_key': {'map': 'function(doc) { emit(doc.i, null); }'}
}
}
self.db.view('test/multi_key')
self.assertTrue(self.db.compact('test'))
def test_view_cleanup(self):
for i in range(1, 6):
self.db.save({'i': i})
self.db['_design/test'] = {
'language': 'javascript',
'views': {
'multi_key': {'map': 'function(doc) { emit(doc.i, null); }'}
}
}
self.db.view('test/multi_key')
ddoc = self.db['_design/test']
ddoc['views'] = {
'ids': {'map': 'function(doc) { emit(doc._id, null); }'}
}
self.db.update([ddoc])
self.db.view('test/ids')
self.assertTrue(self.db.cleanup())
def test_view_function_objects(self):
if 'python' not in self.server.config()['query_servers']:
return
for i in range(1, 4):
self.db.save({'i': i, 'j':2*i})
def map_fun(doc):
yield doc['i'], doc['j']
res = list(self.db.query(map_fun, language='python'))
self.assertEqual(3, len(res))
for idx, i in enumerate(range(1,4)):
self.assertEqual(i, res[idx].key)
self.assertEqual(2*i, res[idx].value)
def reduce_fun(keys, values):
return sum(values)
res = list(self.db.query(map_fun, reduce_fun, 'python'))
self.assertEqual(1, len(res))
self.assertEqual(12, res[0].value)
def test_init_with_resource(self):
self.db['foo'] = {}
view = client.PermanentView(self.db.resource('_all_docs').url, '_all_docs')
self.assertEqual(len(list(view())), 1)
def test_iter_view(self):
self.db['foo'] = {}
view = client.PermanentView(self.db.resource('_all_docs').url, '_all_docs')
self.assertEqual(len(list(view)), 1)
def test_update_seq(self):
self.db['foo'] = {}
rows = self.db.view('_all_docs', update_seq=True)
self.assertEqual(rows.update_seq, 1)
def test_tmpview_repr(self):
mapfunc = "function(doc) {emit(null, null);}"
view = client.TemporaryView(self.db.resource('_temp_view'), mapfunc)
self.assertTrue('TemporaryView' in repr(view))
self.assertTrue(mapfunc in repr(view))
def test_wrapper_iter(self):
class Wrapper(object):
def __init__(self, doc):
pass
self.db['foo'] = {}
self.assertTrue(isinstance(list(self.db.view('_all_docs', wrapper=Wrapper))[0], Wrapper))
def test_wrapper_rows(self):
class Wrapper(object):
def __init__(self, doc):
pass
self.db['foo'] = {}
self.assertTrue(isinstance(self.db.view('_all_docs', wrapper=Wrapper).rows[0], Wrapper))
def test_properties(self):
for attr in ['rows', 'total_rows', 'offset']:
self.assertTrue(getattr(self.db.view('_all_docs'), attr) is not None)
def test_rowrepr(self):
self.db['foo'] = {}
rows = list(self.db.query("function(doc) {emit(null, 1);}"))
self.assertTrue('Row' in repr(rows[0]))
self.assertTrue('id' in repr(rows[0]))
rows = list(self.db.query("function(doc) {emit(null, 1);}", "function(keys, values, combine) {return sum(values);}"))
self.assertTrue('Row' in repr(rows[0]))
self.assertTrue('id' not in repr(rows[0]))
class ShowListTestCase(testutil.TempDatabaseMixin, unittest.TestCase):
show_func = """
function(doc, req) {
return {"body": req.id + ":" + (req.query.r || "<default>")};
}
"""
list_func = """
function(head, req) {
start({headers: {'Content-Type': 'text/csv'}});
if (req.query.include_header) {
send('id' + '\\r\\n');
}
var row;
while (row = getRow()) {
send(row.id + '\\r\\n');
}
}
"""
design_doc = {'_id': '_design/foo',
'shows': {'bar': show_func},
'views': {'by_id': {'map': "function(doc) {emit(doc._id, null)}"},
'by_name': {'map': "function(doc) {emit(doc.name, null)}"}},
'lists': {'list': list_func}}
def setUp(self):
super(ShowListTestCase, self).setUp()
# Workaround for possible bug in CouchDB. Adding a timestamp avoids a
# 409 Conflict error when pushing the same design doc that existed in a
# now deleted database.
design_doc = dict(self.design_doc)
design_doc['timestamp'] = time.time()
self.db.save(design_doc)
self.db.update([{'_id': '1', 'name': 'one'}, {'_id': '2', 'name': 'two'}])
def test_show_urls(self):
self.assertEqual(self.db.show('_design/foo/_show/bar')[1].read(), b'null:<default>')
self.assertEqual(self.db.show('foo/bar')[1].read(), b'null:<default>')
def test_show_docid(self):
self.assertEqual(self.db.show('foo/bar')[1].read(), b'null:<default>')
self.assertEqual(self.db.show('foo/bar', '1')[1].read(), b'1:<default>')
self.assertEqual(self.db.show('foo/bar', '2')[1].read(), b'2:<default>')
def test_show_params(self):
self.assertEqual(self.db.show('foo/bar', r='abc')[1].read(), b'null:abc')
def test_list(self):
self.assertEqual(self.db.list('foo/list', 'foo/by_id')[1].read(), b'1\r\n2\r\n')
self.assertEqual(self.db.list('foo/list', 'foo/by_id', include_header='true')[1].read(), b'id\r\n1\r\n2\r\n')
def test_list_keys(self):
self.assertEqual(self.db.list('foo/list', 'foo/by_id', keys=['1'])[1].read(), b'1\r\n')
def test_list_view_params(self):
self.assertEqual(self.db.list('foo/list', 'foo/by_name', startkey='o', endkey='p')[1].read(), b'1\r\n')
self.assertEqual(self.db.list('foo/list', 'foo/by_name', descending=True)[1].read(), b'2\r\n1\r\n')
class UpdateHandlerTestCase(testutil.TempDatabaseMixin, unittest.TestCase):
update_func = """
function(doc, req) {
if (!doc) {
if (req.id) {
return [{_id : req.id}, "new doc"]
}
return [null, "empty doc"];
}
doc.name = "hello";
return [doc, "hello doc"];
}
"""
design_doc = {'_id': '_design/foo',
'language': 'javascript',
'updates': {'bar': update_func}}
def setUp(self):
super(UpdateHandlerTestCase, self).setUp()
# Workaround for possible bug in CouchDB. Adding a timestamp avoids a
# 409 Conflict error when pushing the same design doc that existed in a
# now deleted database.
design_doc = dict(self.design_doc)
design_doc['timestamp'] = time.time()
self.db.save(design_doc)
self.db.update([{'_id': 'existed', 'name': 'bar'}])
def test_empty_doc(self):
self.assertEqual(self.db.update_doc('foo/bar')[1].read(), b'empty doc')
def test_new_doc(self):
self.assertEqual(self.db.update_doc('foo/bar', 'new')[1].read(), b'new doc')
def test_update_doc(self):
self.assertEqual(self.db.update_doc('foo/bar', 'existed')[1].read(), b'hello doc')
class ViewIterationTestCase(testutil.TempDatabaseMixin, unittest.TestCase):
num_docs = 100
def docfromnum(self, num):
return {'_id': util.utype(num), 'num': int(num / 2)}
def docfromrow(self, row):
return {'_id': row['id'], 'num': row['key']}
def setUp(self):
super(ViewIterationTestCase, self).setUp()
design_doc = {'_id': '_design/test',
'views': {'nums': {'map': 'function(doc) {emit(doc.num, null);}'},
'nulls': {'map': 'function(doc) {emit(null, null);}'}}}
self.db.save(design_doc)
self.db.update([self.docfromnum(num) for num in range(self.num_docs)])
def test_allrows(self):
rows = list(self.db.iterview('test/nums', 10))
self.assertEqual(len(rows), self.num_docs)
self.assertEqual([self.docfromrow(row) for row in rows],
[self.docfromnum(num) for num in range(self.num_docs)])
def test_batchsizes(self):
# Check silly _batch values.
self.assertRaises(ValueError, lambda: next(self.db.iterview('test/nums', 0)))
self.assertRaises(ValueError, lambda: next(self.db.iterview('test/nums', -1)))
# Test various _batch sizes that are likely to cause trouble.
self.assertEqual(len(list(self.db.iterview('test/nums', 1))), self.num_docs)
self.assertEqual(len(list(self.db.iterview('test/nums', int(self.num_docs / 2)))), self.num_docs)
self.assertEqual(len(list(self.db.iterview('test/nums', self.num_docs * 2))), self.num_docs)
self.assertEqual(len(list(self.db.iterview('test/nums', self.num_docs - 1))), self.num_docs)
self.assertEqual(len(list(self.db.iterview('test/nums', self.num_docs))), self.num_docs)
self.assertEqual(len(list(self.db.iterview('test/nums', self.num_docs + 1))), self.num_docs)
def test_batchsizes_with_skip(self):
self.assertEqual(
len(list(self.db.iterview('test/nums', self.num_docs // 10, skip=self.num_docs // 2))),
self.num_docs // 2)
def test_limit(self):
# limit=0 doesn't make sense for iterview.
self.assertRaises(ValueError, lambda: next(self.db.iterview('test/nums', 10, limit=0)))
# Test various limit sizes that are likely to cause trouble.
for limit in [1, int(self.num_docs / 4), self.num_docs - 1, self.num_docs,
self.num_docs + 1]:
self.assertEqual([self.docfromrow(doc) for doc in self.db.iterview('test/nums', 10, limit=limit)],
[self.docfromnum(x) for x in range(min(limit, self.num_docs))])
# Test limit same as batch size, in case of weird edge cases.
limit = int(self.num_docs / 4)
self.assertEqual([self.docfromrow(doc) for doc in self.db.iterview('test/nums', limit, limit=limit)],
[self.docfromnum(x) for x in range(limit)])
def test_descending(self):
self.assertEqual([self.docfromrow(doc) for doc in self.db.iterview('test/nums', 10, descending=True)],
[self.docfromnum(x) for x in range(self.num_docs - 1, -1, -1)])
self.assertEqual([self.docfromrow(doc) for doc in self.db.iterview('test/nums', 10, limit=int(self.num_docs / 4), descending=True)],
[self.docfromnum(x) for x in range(self.num_docs - 1, int(self.num_docs * 3 / 4) - 1, -1)])
def test_startkey(self):
self.assertEqual([self.docfromrow(doc) for doc in self.db.iterview('test/nums', 10, startkey=int(self.num_docs / 2) - 1)],
[self.docfromnum(x) for x in range(self.num_docs - 2, self.num_docs)])
self.assertEqual([self.docfromrow(doc) for doc in self.db.iterview('test/nums', 10, startkey=1, descending=True)],
[self.docfromnum(x) for x in range(3, -1, -1)])
def test_nullkeys(self):
self.assertEqual(len(list(self.db.iterview('test/nulls', 10))), self.num_docs)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ServerTestCase, 'test'))
suite.addTest(unittest.makeSuite(DatabaseTestCase, 'test'))
suite.addTest(unittest.makeSuite(ViewTestCase, 'test'))
suite.addTest(unittest.makeSuite(ShowListTestCase, 'test'))
suite.addTest(unittest.makeSuite(UpdateHandlerTestCase, 'test'))
suite.addTest(unittest.makeSuite(ViewIterationTestCase, 'test'))
suite.addTest(testutil.doctest_suite(client))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
interactive_session.py | import code
import logging
from threading import Thread
from .helpers import setup_logging
# Set up logging
setup_logging()
logger = logging.getLogger(__name__)
class InteractiveSession:
"""
Starting an InteractiveConsole and constantly checking
"""
mitm_handler = None
def __init__(self, mitm_handler):
self.mitm_handler = mitm_handler
# Create InteractiveConsole shell that inherits environment and starts in a new Thread
# To correctly end the InteractiveConsole use CTRL+D
logger.info("[!] Starting InteractiveConsole shell...")
variables = {**globals(), **locals()}
shell = code.InteractiveConsole(variables)
shell_thread = Thread(target=shell.interact)
shell_thread.start()
# Start loop for mitm_handler to continuously check incoming data while the shell_thread is alive.
while shell_thread.is_alive():
self.mitm_handler.handle_incoming_data()
def scan_enable(self):
self.mitm_handler.scan_enable()
def scan_disable(self):
self.mitm_handler.scan_disable()
def advertise_enable(self):
self.mitm_handler.advertise_enable()
def advertise_disable(self):
self.mitm_handler.advertise_disable()
def connect(self, bd_addr):
self.mitm_handler.connect(bd_addr)
def disconnect(self):
self.mitm_handler.disconnect()
def connect_and_imitate(self, imitated_bd_addr, spoofed_bd_addr):
self.mitm_handler.connect(imitated_bd_addr)
self.mitm_handler.imitate_advertise_enable(imitated_bd_addr, spoofed_bd_addr)
def imitate_advertise_enable(self, imitated_bd_addr, spoofed_bd_addr):
self.mitm_handler.imitate_advertise_enable(imitated_bd_addr, spoofed_bd_addr)
|
startcollection.py | # coding:utf-8
import threading
import datetime
import time
from terminal.gethostapdfield import HOSTAPD
class T1:
def thread2(self):
count = 0
while True:
count += 1
print 'swm'
if count == 10:
break
return 'hello'
class T2:
def thread1(self):
count = 0
while True:
print 'nuonuo'
count +=1
if count ==20:
break
return 'swm'
def start():
t1 = T1()
t2 = T2()
for i in range(0, 3):
print datetime.datetime.now(), i
thread1 = threading.ThreadW(target=t1.thread2)
thread2 = threading.Thread(target=t2.thread1)
thread1.setDaemon(1)
thread2.setDaemon(1)
a = thread1.start()
b = thread2.start()
thread2.join()
thread1.join()
print a
print b
print threading.activeCount()
print datetime.datetime.now(), 'stop the program'
return
if __name__ == '__main__':
# python多任务
# mobi = HOSTAPD()
# 采集手机信息的一个线程
# thread1 = threading.Thread(target=mobi.startcollect)
# 采集网络信息的线程
# thread2 = threading.Thread(target=t2.start)
# 开启线程
# thread1.start()
# thread1.start()
# thread2.start()
# thread1.join()
# thread2.join()
# time.sleep(5)
print datetime.datetime.now(), 'start collect'
start() |
Slider.py | import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import serial
import threading
class ServoSlider(QWidget):
def __init__(self, parent = None):
super(ServoSlider, self).__init__(parent)
layout = QVBoxLayout()
self.sl = QSlider(Qt.Horizontal)
self.sl.setMinimum(0)
self.sl.setMaximum(180)
self.sl.setValue(90)
self.sl.setTickPosition(QSlider.TicksBelow)
self.sl.setTickInterval(5)
layout.addWidget(self.sl)
self.sl.valueChanged.connect(self.value_change)
self.setLayout(layout)
self.setWindowTitle("Servo Controller")
self.ser = serial.Serial('/dev/tty.usbmodem1411')
thread = threading.Thread(target=self.read_from_serial, args=(self.ser, ))
thread.start()
def value_change(self):
size = self.sl.value()
self.ser.write(str(size) + "\n")
def read_from_serial(self, ser):
while True:
print(ser.readline())
def main():
app = QApplication(sys.argv)
ex = ServoSlider()
ex.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main() |
datasets.py | # Dataset utils and dataloaders
import glob
import logging
import math
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.general import xyxy2xywh, xywh2xyxy
from utils.torch_utils import torch_distributed_zero_first
# Parameters
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng'] # acceptable image suffixes
vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
logger = logging.getLogger(__name__)
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(files):
# Returns a single hash value of a list of files
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
rank=-1, world_size=1, workers=8, image_weights=False):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad,
rank=rank,
image_weights=image_weights)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader
# Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()
dataloader = loader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages: # for inference
def __init__(self, path, img_size=640):
p = str(Path(path)) # os-agnostic
p = os.path.abspath(p) # absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception('ERROR: %s does not exist' % p)
images = [x for x in files if x.split('.')[-1].lower() in img_formats]
videos = [x for x in files if x.split('.')[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'images'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
(p, img_formats, vid_formats)
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nf, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nf, path), end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe='0', img_size=640):
self.img_size = img_size
if pipe.isnumeric():
pipe = eval(pipe) # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640):
self.mode = 'images'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
return [x.replace(sa, sb, 1).replace('.' + x.split('.')[-1], '.txt') for x in img_paths]
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, rank=-1):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
elif p.is_file(): # file
with open(p, 'r') as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
else:
raise Exception('%s does not exist' % p)
self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])
assert self.img_files, 'No images found'
except Exception as e:
raise Exception('Error loading data from %s: %s\nSee %s' % (path, e, help_url))
# Check cache
self.label_files = img2label_paths(self.img_files) # labels
cache_path = Path(self.label_files[0]).parent.with_suffix('.cache') # cached labels
if cache_path.is_file():
cache = torch.load(cache_path) # load
if cache['hash'] != get_hash(self.label_files + self.img_files) or 'results' not in cache: # changed
cache = self.cache_labels(cache_path) # re-cache
else:
cache = self.cache_labels(cache_path) # cache
# Display cache
[nf, nm, ne, nc, n] = cache.pop('results') # found, missing, empty, corrupted, total
desc = f"Scanning '{cache_path}' for images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
tqdm(None, desc=desc, total=n, initial=n)
assert nf > 0 or not augment, f'No labels found in {cache_path}. Can not train without labels. See {help_url}'
# Read cache
cache.pop('hash') # remove hash
labels, shapes = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.img_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
if single_cls:
for x in self.labels:
x[:, 0] = 0
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
self.indices = range(n)
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
self.img_hw0, self.img_hw = [None] * n, [None] * n
results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads
pbar = tqdm(enumerate(results), total=n)
for i, x in pbar:
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i)
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
def cache_labels(self, path=Path('./labels.cache')):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for i, (im_file, lb_file) in enumerate(pbar):
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels'
# verify labels
if os.path.isfile(lb_file):
nf += 1 # label found
with open(lb_file, 'r') as f:
l = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
if len(l):
assert l.shape[1] == 5, 'labels require 5 columns each'
assert (l >= 0).all(), 'negative labels'
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels'
assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'
else:
ne += 1 # label empty
l = np.zeros((0, 5), dtype=np.float32)
else:
nm += 1 # label missing
l = np.zeros((0, 5), dtype=np.float32)
x[im_file] = [l, shape]
except Exception as e:
nc += 1
print('WARNING: Ignoring corrupted image and/or label %s: %s' % (im_file, e))
pbar.desc = f"Scanning '{path.parent / path.stem}' for images and labels... " \
f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
if nf == 0:
print(f'WARNING: No labels found in {path}. See {help_url}')
x['hash'] = get_hash(self.label_files + self.img_files)
x['results'] = [nf, nm, ne, nc, i + 1]
torch.save(x, path) # save for next time
logging.info(f"New cache created: {path}")
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
index = self.indices[index] # linear, shuffled, or image_weights
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp https://arxiv.org/pdf/1710.09412.pdf
if random.random() < hyp['mixup']:
img2, labels2 = load_mosaic(self, random.randint(0, self.n - 1))
r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
x = self.labels[index]
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not mosaic:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
if self.augment:
# flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
# flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def load_mosaic(self, index):
# loads images in a mosaic
labels4 = []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_perspective
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4 = random_perspective(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(img[:, :, ::-1]) # base
# ax[1].imshow(img2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
if perspective:
xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
else: # affine
xy = xy[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# clip boxes
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + 1e-16) > area_thr) & (ar < ar_thr) # candidates
def cutout(image, labels):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def flatten_recursive(path='../coco128'):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(path + '_flat')
create_folder(new_path)
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_boxes('../coco128')
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
files = list(path.rglob('*.*'))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in img_formats:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file, 'r') as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0)): # from utils.datasets import *; autosplit('../coco128')
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
# Arguments
path: Path to images directory
weights: Train, val, test weights (list)
"""
path = Path(path) # images dir
files = list(path.rglob('*.*'))
n = len(files) # number of files
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
[(path / x).unlink() for x in txt if (path / x).exists()] # remove existing
for i, img in tqdm(zip(indices, files), total=n):
if img.suffix[1:] in img_formats:
with open(path / txt[i], 'a') as f:
f.write(str(img) + '\n') # add image to txt file
|
frontendcomm.py | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
In addition to the remote_call mechanism implemented in CommBase:
- Implements _wait_reply, so blocking calls can be made.
"""
import pickle
import socket
import sys
import threading
import time
from jupyter_client.localinterfaces import localhost
from tornado import ioloop
import zmq
from IPython.core.getipython import get_ipython
from spyder_kernels.comms.commbase import CommBase, CommError
from spyder_kernels.py3compat import TimeoutError, PY2
def get_free_port():
"""Find a free port on the local machine."""
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, b'\0' * 8)
sock.bind((localhost(), 0))
port = sock.getsockname()[1]
sock.close()
return port
def frontend_request(blocking=True, timeout=None):
"""
Send a request to the frontend.
If blocking is True, The return value will be returned.
"""
if not get_ipython().kernel.frontend_comm.is_open():
raise CommError("Can't make a request to a closed comm")
# Get a reply from the last frontend to have sent a message
return get_ipython().kernel.frontend_call(
blocking=blocking,
broadcast=False,
timeout=timeout)
class FrontendComm(CommBase):
"""Mixin to implement the spyder_shell_api."""
def __init__(self, kernel):
super(FrontendComm, self).__init__()
# Comms
self.kernel = kernel
self.kernel.comm_manager.register_target(
self._comm_name, self._comm_open)
self.comm_port = None
self.register_call_handler('_send_comm_config',
self._send_comm_config)
# self.kernel.parent is IPKernelApp unless we are in tests
if self.kernel.parent:
# Create a new socket
self.context = zmq.Context()
self.comm_socket = self.context.socket(zmq.ROUTER)
self.comm_socket.linger = 1000
self.comm_port = get_free_port()
self.comm_port = self.kernel.parent._bind_socket(
self.comm_socket, self.comm_port)
if hasattr(zmq, 'ROUTER_HANDOVER'):
# Set router-handover to workaround zeromq reconnect problems
# in certain rare circumstances.
# See ipython/ipykernel#270 and zeromq/libzmq#2892
self.comm_socket.router_handover = 1
self.comm_thread_close = threading.Event()
self.comm_socket_thread = threading.Thread(target=self.poll_thread)
self.comm_socket_thread.start()
# Patch parent.close . This function only exists in Python 3.
if not PY2:
parent_close = self.kernel.parent.close
def close():
"""Close comm_socket_thread."""
self.close_thread()
parent_close()
self.kernel.parent.close = close
def close_thread(self):
"""Close comm."""
self.comm_thread_close.set()
self.comm_socket.close()
self.context.term()
self.comm_socket_thread.join()
def poll_thread(self):
"""Receive messages from comm socket."""
if not PY2:
# Create an event loop for the handlers.
ioloop.IOLoop().initialize()
while not self.comm_thread_close.is_set():
self.poll_one()
def poll_one(self):
"""Receive one message from comm socket."""
out_stream = None
if self.kernel.shell_streams:
# If the message handler needs to send a reply,
# use the regular shell stream.
out_stream = self.kernel.shell_streams[0]
try:
ident, msg = self.kernel.session.recv(self.comm_socket, 0)
except zmq.error.ContextTerminated:
return
except Exception:
self.kernel.log.warning("Invalid Message:", exc_info=True)
return
msg_type = msg['header']['msg_type']
if msg_type == 'shutdown_request':
self.comm_thread_close.set()
return
handler = self.kernel.shell_handlers.get(msg_type, None)
if handler is None:
self.kernel.log.warning("Unknown message type: %r", msg_type)
else:
try:
handler(out_stream, ident, msg)
except Exception:
self.kernel.log.error("Exception in message handler:",
exc_info=True)
return
sys.stdout.flush()
sys.stderr.flush()
# Flush to ensure reply is sent
if out_stream:
out_stream.flush(zmq.POLLOUT)
def remote_call(self, comm_id=None, blocking=False, callback=None,
timeout=None):
"""Get a handler for remote calls."""
return super(FrontendComm, self).remote_call(
blocking=blocking,
comm_id=comm_id,
callback=callback,
timeout=timeout)
def wait_until(self, condition, timeout=None):
"""Wait until condition is met. Returns False if timeout."""
if condition():
return True
t_start = time.time()
while not condition():
if timeout is not None and time.time() > t_start + timeout:
return False
if threading.current_thread() is self.comm_socket_thread:
# Wait for a reply on the comm channel.
self.poll_one()
else:
# Wait 10ms for a reply
time.sleep(0.01)
return True
# --- Private --------
def _wait_reply(self, call_id, call_name, timeout, retry=True):
"""Wait until the frontend replies to a request."""
def reply_received():
"""The reply is there!"""
return call_id in self._reply_inbox
if not self.wait_until(reply_received):
if retry:
self._wait_reply(call_id, call_name, timeout, False)
return
raise TimeoutError(
"Timeout while waiting for '{}' reply.".format(
call_name))
def _comm_open(self, comm, msg):
"""
A new comm is open!
"""
self.calling_comm_id = comm.comm_id
self._register_comm(comm)
self._set_pickle_protocol(msg['content']['data']['pickle_protocol'])
self._send_comm_config()
def on_outgoing_call(self, call_dict):
"""A message is about to be sent"""
call_dict["comm_port"] = self.comm_port
return super(FrontendComm, self).on_outgoing_call(call_dict)
def _send_comm_config(self):
"""Send the comm config to the frontend."""
self.remote_call()._set_comm_port(self.comm_port)
self.remote_call()._set_pickle_protocol(pickle.HIGHEST_PROTOCOL)
def _comm_close(self, msg):
"""Close comm."""
comm_id = msg['content']['comm_id']
comm = self._comms[comm_id]['comm']
# Pretend it is already closed to avoid problems when closing
comm._closed = True
del self._comms[comm_id]
def _async_error(self, error_wrapper):
"""
Send an async error back to the frontend to be displayed.
"""
self.remote_call()._async_error(error_wrapper)
def _register_comm(self, comm):
"""
Remove side effect ipykernel has.
"""
def handle_msg(msg):
"""Handle a comm_msg message"""
if comm._msg_callback:
comm._msg_callback(msg)
comm.handle_msg = handle_msg
super(FrontendComm, self)._register_comm(comm)
|
runserver.py | #!/usr/bin/env python
# Copyright 2013 Brett Slatkin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs the dpxdt API server, optionally with local queue workers."""
import logging
import os
import sys
import threading
# Local Libraries
import gflags
FLAGS = gflags.FLAGS
# Local modules
from dpxdt import runworker
from dpxdt import server
gflags.DEFINE_bool(
'local_queue_workers', False,
'When true, run queue worker threads locally in the same process '
'as the server.')
gflags.DEFINE_bool(
'reload_code', False,
'Reload code on every request. Should only be used in local development.')
gflags.DEFINE_bool(
'ignore_auth', False,
'Ignore any need for authentication for API and frontend accesses. You '
'should only do this for local development!')
gflags.DEFINE_integer('port', 5000, 'Port to run the HTTP server on.')
gflags.DEFINE_string('host', '0.0.0.0', 'Host argument for the server.')
def main(argv):
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '%s\nUsage: %s ARGS\n%s' % (e, sys.argv[0], FLAGS)
sys.exit(1)
logging.basicConfig(
format='%(levelname)s %(filename)s:%(lineno)s] %(message)s')
if FLAGS.verbose:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
if FLAGS.verbose_queries:
logging.getLogger('sqlalchemy.engine').setLevel(logging.DEBUG)
if FLAGS.local_queue_workers:
coordinator = runworker.run_workers()
# If the babysitter thread dies, the whole process goes down.
def worker_babysitter():
try:
coordinator.wait_one()
finally:
os._exit(1)
babysitter_thread = threading.Thread(target=worker_babysitter)
babysitter_thread.setDaemon(True)
babysitter_thread.start()
if FLAGS.ignore_auth:
server.app.config['IGNORE_AUTH'] = True
server.app.run(debug=FLAGS.reload_code, host=FLAGS.host, port=FLAGS.port)
if __name__ == '__main__':
main(sys.argv)
|
crawler.py | import sys
import socket
import time
import multiprocessing
def sendmsg(msgstr,client):
msg = msgstr.encode("utf-8")
data_length = len(msg) + 8
code = 689
dlbytes = int.to_bytes(data_length,4,'little')
header = dlbytes + dlbytes + int.to_bytes(code,4,'little')
client.send(header)
sent = 0
while sent < len(msg):
tn = client.send(msg[sent:])
sent += tn
def start(roomid,client):
sendmsg('type@=loginreq/username@=wnjstudio/password@=douyu/roomid@={}/\0'.format(roomid),client)
sendmsg('type@=joingroup/rid@={}/gid@=-9999/\0'.format(roomid),client)
print('---------------connected to 4340108---------------')
while True:
data = client.recv(12)
if not data:
break
length = int.from_bytes(data[:4],'little')-8
# l2 = int.from_bytes(data[4:8],'little')
# if length!=l2:
# continue
message = client.recv(length)
if not message:
break
if not message.endswith(b'\x00'):
leftlen = length-len(message)
message += client.recv(leftlen)
print(message.decode('utf-8','ignore'))
# print(message)
sys.stdout.flush()
def keeplive(client):
while True:
sendmsg('type@=keeplive/tick@='+str(int(time.time()))+'/\0',client)
time.sleep(30)
if __name__ == '__main__':
client = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
host = socket.gethostbyname("openbarrage.douyutv.com")
port = 8601
client.connect((host,port))
pget = multiprocessing.Process(target=start,args=(sys.argv[1],client,))
plive = multiprocessing.Process(target=keeplive,args=(client,))
pget.start()
plive.start()
|
preview.py | from io import BytesIO
import picamera
import threading
from PIL import Image, ImageDraw
from time import sleep
from display import show
class Preview:
running = False
camera_preview_thread = None
camera = None
lock = None
snap_path = None
resolution = (480,320)
screen = (480, 320)
review_time = 3
def __init__(self, resolution, review_time=3, camera=None):
if camera is None:
camera = picamera.PiCamera()
camera.start_preview()
self.resolution = resolution
self.review_time = review_time
self.lock = threading.Lock()
self.camera = camera
def camera_preview(self):
show_cmd = (
'ffmpeg -hide_banner -loglevel error -vcodec png '
'-i /dev/shm/tmp.png -vcodec rawvideo -f rawvideo '
'-pix_fmt rgb565 /dev/fb1 -y'
)
stream = BytesIO()
while self.running:
try:
self.lock.acquire()
path = self.snap_path
self.snap_path = None
finally:
self.lock.release()
if path is None:
self.camera.resolution = (480, 320)
self.camera.capture(stream, format='png')
stream.truncate()
show(stream)
else:
self.camera.resolution = self.resolution
self.camera.capture(stream, format='jpeg')
stream.seek(0)
with open(path, 'wb') as f:
f.write(stream.getbuffer())
stream.seek(0)
image = Image.open(stream)
image = image.resize((480,320), Image.NEAREST)
draw = ImageDraw.Draw(image)
draw.rectangle((0,0,image.size[0]-1, image.size[1]-1), outline=(255,255,255))
show(image)
sleep(self.review_time)
def start(self):
self.running = True
self.camera_preview_thread = threading.Thread(target=self.camera_preview)
self.camera_preview_thread.daemon = True
self.camera_preview_thread.start()
def stop(self):
self.running = False
self.camera_preview_thread.join()
def snap(self, path):
try:
self.lock.acquire()
self.snap_path = path
finally:
self.lock.release()
|
multiprocessing3_queue.py | # View more python learning tutorial on my Youtube and Youku channel!!!
# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg
# Youku video tutorial: http://i.youku.com/pythontutorial
import multiprocessing as mp
def job(q):
res = 0
for i in range(1000):
res += i + i ** 2 + i ** 3
q.put(res) # queue
if __name__ == '__main__':
## queue,存储计算的值,q要传进去
q = mp.Queue()
p1 = mp.Process(target=job, args=(q,))
p2 = mp.Process(target=job, args=(q,))
p1.start()
p2.start()
p1.join()
p2.join()
res1 = q.get()
res2 = q.get()
print(res1 + res2)
|
run_demos.py | import importlib
import json
import os
import subprocess
import sys
import threading
import time
import requests
LAUNCH_PERIOD = 60
GRADIO_DEMO_DIR = "../../demo"
sys.path.insert(0, GRADIO_DEMO_DIR)
with open("demos.json") as demos_file:
demo_port_sets = json.load(demos_file)
def launch_demo(demo_folder):
subprocess.call(f"cd {demo_folder} && python run.py", shell=True)
for demo_name, port in demo_port_sets:
demo_folder = os.path.join(GRADIO_DEMO_DIR, demo_name)
demo_file = os.path.join(demo_folder, "run.py")
with open(demo_file, "r") as file:
filedata = file.read()
filedata = filedata.replace(f"iface.launch()", f"iface.launch(server_port={port})")
with open(demo_file, "w") as file:
file.write(filedata)
demo_thread = threading.Thread(target=launch_demo, args=(demo_folder,))
demo_thread.start()
start_time = time.time()
while True:
for demo_name, _ in demo_port_sets:
r = requests.head(f"http://localhost:80/demo/{demo_name}/")
if r.status_code != 200:
print(demo_name, "down")
if time.time() - start_time > LAUNCH_PERIOD:
raise ValueError(f"{demo_name} is down.")
else:
print(demo_name, "up")
time.sleep(10)
|
cell_continuous_handler.py | from .cell_clear_selection import CellClearSelection
from ipme.utils.constants import COLORS, BORDER_COLORS, PLOT_HEIGHT, PLOT_WIDTH, SIZING_MODE, RUG_DIST_RATIO, RUG_SIZE
from ipme.utils.stats import kde
from ipme.utils.functions import find_inds_before_after, find_indices
from bokeh.models import BoxSelectTool, HoverTool
from bokeh.models import ColumnDataSource
from bokeh import events
from bokeh.plotting import figure
import numpy as np
import threading
from functools import partial
class CellContinuousHandler:
def __init__(self):
pass
@staticmethod
def initialize_glyphs_interactive(variableCell, space):
so = variableCell.plot[space].line('x', 'y', line_color = COLORS[0], line_width = 2, source = variableCell.source[space])
re = variableCell.plot[space].line('x', 'y', line_color = COLORS[1], line_width = 2, source = variableCell.reconstructed[space])
variableCell.plot[space].line('x', 'y', line_color = COLORS[2], line_width = 2, source = variableCell.selection[space])
variableCell.plot[space].dash('x', 'y', size='size', angle = 90.0, angle_units = 'deg', line_color = COLORS[0], source = variableCell.samples[space])
variableCell.plot[space].dash('x', 'y', size='size', angle = 90.0, angle_units = 'deg', line_color = COLORS[1], source = variableCell.sel_samples[space])
##Add BoxSelectTool
variableCell.plot[space].add_tools(BoxSelectTool(dimensions = 'width', renderers = [so]))
##Tooltips
TOOLTIPS = [("x", "@x"), ("y","@y"),]
hover = HoverTool( tooltips = TOOLTIPS, renderers = [so,re], mode = 'mouse')
variableCell.plot[space].tools.append(hover)
@staticmethod
def initialize_glyphs_static(variableCell, space):
so = variableCell.plot[space].line('x', 'y', line_color = COLORS[0], line_width = 2, source = variableCell.source[space])
variableCell.plot[space].dash('x', 'y', size='size', angle = 90.0, angle_units = 'deg', line_color = COLORS[0], source = variableCell.samples[space])
##Tooltips
TOOLTIPS = [("x", "@x"), ("y","@y"),]
hover = HoverTool( tooltips = TOOLTIPS, renderers = [so], mode = 'mouse')
variableCell.plot[space].tools.append(hover)
@staticmethod
def initialize_fig(variableCell, space):
variableCell.plot[space] = figure( x_range = variableCell.x_range[space], tools = "wheel_zoom,reset,box_zoom", toolbar_location = 'right',
plot_width = PLOT_WIDTH, plot_height = PLOT_HEIGHT, sizing_mode = SIZING_MODE)
variableCell.plot[space].border_fill_color = BORDER_COLORS[0]
variableCell.plot[space].xaxis.axis_label = ""
variableCell.plot[space].yaxis.visible = False
variableCell.plot[space].toolbar.logo = None
variableCell.plot[space].xaxis[0].ticker.desired_num_ticks = 3
@staticmethod
def initialize_fig_interactive(variableCell, space):
CellContinuousHandler.initialize_fig(variableCell, space)
##Events
variableCell.plot[space].on_event(events.Tap, partial(CellContinuousHandler.clear_selection_callback, variableCell, space))
variableCell.plot[space].on_event(events.SelectionGeometry, partial(CellContinuousHandler.selectionbox_callback, variableCell, space))
##on_change
variableCell.ic.sample_inds_update[space].on_change('data', partial(variableCell.sample_inds_callback, space))
@staticmethod
def initialize_fig_static(variableCell, space):
CellContinuousHandler.initialize_fig(variableCell, space)
##on_change
variableCell.ic.sample_inds_update[space].on_change('data', partial(variableCell.sample_inds_callback, space))
@staticmethod
def initialize_cds(variableCell, space):
samples = variableCell.get_data_for_cur_idx_dims_values(space)
variableCell.source[space] = ColumnDataSource(data = kde(samples))
max_v = variableCell.source[space].data['y'].max()
variableCell.samples[space] = ColumnDataSource(data = dict( x = samples, y = np.asarray([-max_v/RUG_DIST_RATIO]*len(samples)), size = np.asarray([RUG_SIZE]*len(samples))))
@staticmethod
def initialize_cds_interactive(variableCell, space):
CellContinuousHandler.initialize_cds(variableCell, space)
variableCell.sel_samples[space] = ColumnDataSource(data = dict(x = np.array([]), y = np.array([]), size = np.array([])))
variableCell.selection[space] = ColumnDataSource(data = dict(x = np.array([]), y = np.array([])))
variableCell.reconstructed[space] = ColumnDataSource(data = dict(x = np.array([]), y = np.array([])))
variableCell.clear_selection[space] = ColumnDataSource(data = dict(x = [], y = [], isIn = []))
variableCell.ic.var_x_range[(space, variableCell.name)] = ColumnDataSource(data = dict(xmin = np.array([]), xmax = np.array([])))
@staticmethod
def initialize_cds_static(variableCell, space):
CellContinuousHandler.initialize_cds(variableCell, space)
#########TEST###########
@staticmethod
def update_cds_interactive(variableCell, space):
"""
Updates interaction-related ColumnDataSources (cds).
"""
sel_var_idx_dims_values = variableCell.ic.get_sel_var_idx_dims_values()
sel_space = variableCell.ic.get_sel_space()
var_x_range = variableCell.ic.get_var_x_range()
global_update = variableCell.ic.get_global_update()
if global_update:
if variableCell.name in sel_var_idx_dims_values and space == sel_space and variableCell.cur_idx_dims_values == sel_var_idx_dims_values[variableCell.name]:
variableCell.update_selection_cds(space, var_x_range[(space, variableCell.name)].data['xmin'][0], var_x_range[(space, variableCell.name)].data['xmax'][0])
else:
variableCell.selection[space].data = dict( x = np.array([]), y = np.array([]))
variableCell.update_reconstructed_cds(space)
CellClearSelection.update_clear_selection_cds(variableCell, space)
@staticmethod
def update_cds_static(variableCell, space):
"""
Update source & samples cds in the static mode
"""
samples = variableCell.get_data_for_cur_idx_dims_values(space)
inds = variableCell.ic.get_sample_inds(space)
if len(inds):
sel_sample = samples[inds]
variableCell.source[space].data = kde(sel_sample)
max_v = variableCell.get_max_prob(space)
variableCell.samples[space].data = dict( x = sel_sample, y = np.asarray([-max_v/RUG_DIST_RATIO]*len(sel_sample)), size = np.asarray([RUG_SIZE]*len(sel_sample)))
else:
variableCell.source[space].data = kde(samples)
max_v = variableCell.get_max_prob(space)
variableCell.samples[space].data = dict( x = samples, y = np.asarray([-max_v/RUG_DIST_RATIO]*len(samples)), size = np.asarray([RUG_SIZE]*len(samples)))
## ONLY FOR INTERACTIVE CASE
@staticmethod
def selectionbox_callback(variableCell, space, event):
"""
Callback called when selection box is drawn.
"""
xmin = event.geometry['x0']
xmax = event.geometry['x1']
variableCell.ic.increase_selection_interactions()
variableCell.ic.set_selection(variableCell.name, space, (xmin, xmax), variableCell.cur_idx_dims_values)
for sp in variableCell.spaces:
samples = variableCell.samples[sp].data['x']
variableCell.ic.add_space_threads(threading.Thread(target = partial(CellContinuousHandler._selectionbox_space_thread, variableCell, sp, samples, xmin, xmax), daemon = True))
# CellContinuousHandler._selectionbox_space_thread(variableCell, sp, samples, xmin, xmax)
variableCell.ic.space_threads_join()
@staticmethod
def _selectionbox_space_thread(variableCell, space, samples, xmin, xmax):
x_range = variableCell.ic.get_var_x_range(space, variableCell.name)
xmin_list = x_range['xmin']
xmax_list = x_range['xmax']
if len(xmin_list):
variableCell.update_selection_cds(space, xmin_list[0], xmax_list[0])
else:
variableCell.selection[space].data = dict(x = np.array([]), y = np.array([]))
inds = find_indices(samples, lambda e: xmin <= e <= xmax, xmin, xmax)
variableCell.ic.set_sel_var_inds(space, variableCell.name, inds)
variableCell.compute_intersection_of_samples(space)
variableCell.ic.selection_threads_join(space)
@staticmethod
def update_source_cds_interactive(variableCell, space):
"""
Updates source ColumnDataSource (cds).
"""
samples = variableCell.get_data_for_cur_idx_dims_values(space)
variableCell.source[space].data = kde(samples)
max_v = variableCell.get_max_prob(space)
variableCell.samples[space].data = dict( x = samples, y = np.asarray([-max_v/RUG_DIST_RATIO]*len(samples)), size = np.asarray([RUG_SIZE]*len(samples)))
@staticmethod
def update_selection_cds_interactive(variableCell, space, xmin, xmax):
"""
Updates selection ColumnDataSource (cds).
"""
# Get kde points within [xmin,xmax]
data = {}
data['x'] = np.array([])
data['y'] = np.array([])
kde_indices = find_indices(variableCell.source[space].data['x'], lambda e: xmin <= e <= xmax, xmin, xmax)
if len(kde_indices) == 0:
variableCell.selection[space].data = dict( x = np.array([]), y = np.array([]))
return
data['x'] = variableCell.source[space].data['x'][kde_indices]
data['y'] = variableCell.source[space].data['y'][kde_indices]
# Add interpolated points at xmin, xmax
xmin_inds = find_inds_before_after(variableCell.source[space].data['x'], xmin)
if -1 not in xmin_inds:
xmin_l = variableCell.source[space].data['x'][xmin_inds[0]]
xmin_h = variableCell.source[space].data['x'][xmin_inds[1]]
ymin_l = variableCell.source[space].data['y'][xmin_inds[0]]
ymin_h = variableCell.source[space].data['y'][xmin_inds[1]]
ymin = ((ymin_h-ymin_l)/(xmin_h-xmin_l))*(xmin-xmin_l) + ymin_l
data['x'] = np.insert(data['x'], 0, xmin)
data['y'] = np.insert(data['y'], 0, ymin)
xmax_inds = find_inds_before_after(variableCell.source[space].data['x'], xmax)
if -1 not in xmax_inds:
xmax_l = variableCell.source[space].data['x'][xmax_inds[0]]
xmax_h = variableCell.source[space].data['x'][xmax_inds[1]]
ymax_l = variableCell.source[space].data['y'][xmax_inds[0]]
ymax_h = variableCell.source[space].data['y'][xmax_inds[1]]
ymax = ((ymax_h-ymax_l)/(xmax_h-xmax_l))*(xmax-xmax_l) + ymax_l
data['x'] = np.append(data['x'], xmax)
data['y'] = np.append(data['y'], ymax)
# Append and prepend zeros
data['y'] = np.insert(data['y'], 0, 0)
data['y'] = np.append(data['y'], 0)
data['x'] = np.insert(data['x'], 0, data['x'][0])
data['x'] = np.append(data['x'], data['x'][-1])
variableCell.selection[space].data = data
@staticmethod
def update_reconstructed_cds_interactive(variableCell, space):
"""
Updates reconstructed ColumnDataSource (cds).
"""
samples = variableCell.samples[space].data['x']
inds = variableCell.ic.get_sample_inds(space)
if len(inds):
sel_sample = samples[inds]
variableCell.reconstructed[space].data = kde(sel_sample)
max_v = variableCell.get_max_prob(space)
variableCell.sel_samples[space].data = dict( x = sel_sample, y = np.asarray([-max_v/RUG_DIST_RATIO]*len(sel_sample)), size = np.asarray([RUG_SIZE]*len(sel_sample)))
else:
variableCell.reconstructed[space].data = dict( x = np.array([]), y = np.array([]))
variableCell.sel_samples[space].data = dict( x = np.array([]), y = np.array([]), size = np.array([]))
max_v = variableCell.get_max_prob(space)
if max_v!=-1:
variableCell.samples[space].data['y'] = np.asarray([-max_v/RUG_DIST_RATIO]*len(variableCell.samples[space].data['x']))
@staticmethod
def clear_selection_callback(variableCell, space, event):
"""
Callback called when clear selection glyph is clicked.
"""
isIn = variableCell.clear_selection[space].data['isIn']
if 1 in isIn:
variableCell.ic.set_var_x_range(space, variableCell.name, dict(xmin = np.array([]), xmax = np.array([])))
variableCell.ic.delete_sel_var_idx_dims_values(variableCell.name)
for sp in variableCell.spaces:
variableCell.ic.add_space_threads(threading.Thread(target = partial(CellContinuousHandler._clear_selection_cds_update, variableCell, sp), daemon = True))
variableCell.ic.space_threads_join()
@staticmethod
def _clear_selection_cds_update(variableCell, space):
x_range = variableCell.ic.get_var_x_range(space, variableCell.name)
xmin_list = x_range['xmin']
xmax_list = x_range['xmax']
if len(xmin_list):
variableCell.update_selection_cds(space, xmin_list[0], xmax_list[0])
else:
variableCell.selection[space].data = dict(x = np.array([]), y = np.array([]))
variableCell.ic.delete_sel_var_inds(space, variableCell.name)
variableCell.compute_intersection_of_samples(space)
variableCell.ic.selection_threads_join(space)
|
utilities.py | #!/bin/env python
# -*coding: UTF-8 -*-
#
# Disclaimer:
# Functions get_sys_info, netcdf_and_hdf5_versions and show_versions are from:
# xarray/util/print_versions.py
#
import os
import sys
import warnings
import urllib
import json
import collections
import copy
from functools import reduce
from packaging import version
import importlib
import locale
import platform
import struct
import subprocess
import contextlib
import xarray as xr
import pandas as pd
import numpy as np
from scipy import interpolate
import pickle
import pkg_resources
import shutil
import threading
import time
from argopy.options import OPTIONS, set_options
from argopy.stores import httpstore
from argopy.errors import (
FtpPathError,
InvalidFetcher,
InvalidFetcherAccessPoint,
InvalidOption
)
try:
collectionsAbc = collections.abc
except AttributeError:
collectionsAbc = collections
path2pkl = pkg_resources.resource_filename("argopy", "assets/")
try:
collectionsAbc = collections.abc
except AttributeError:
collectionsAbc = collections
def clear_cache(fs=None):
""" Delete argopy cache folder content """
if os.path.exists(OPTIONS["cachedir"]):
# shutil.rmtree(OPTIONS["cachedir"])
for filename in os.listdir(OPTIONS["cachedir"]):
file_path = os.path.join(OPTIONS["cachedir"], filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print("Failed to delete %s. Reason: %s" % (file_path, e))
if fs:
fs.clear_cache()
def load_dict(ptype):
if ptype == "profilers":
with open(os.path.join(path2pkl, "dict_profilers.pickle"), "rb") as f:
loaded_dict = pickle.load(f)
return loaded_dict
elif ptype == "institutions":
with open(os.path.join(path2pkl, "dict_institutions.pickle"), "rb") as f:
loaded_dict = pickle.load(f)
return loaded_dict
else:
raise ValueError("Invalid dictionary pickle file")
def mapp_dict(Adictionnary, Avalue):
if Avalue not in Adictionnary:
return "Unknown"
else:
return Adictionnary[Avalue]
def list_available_data_src():
""" List all available data sources """
sources = {}
try:
from .data_fetchers import erddap_data as Erddap_Fetchers
sources["erddap"] = Erddap_Fetchers
except Exception:
warnings.warn(
"An error occurred while loading the ERDDAP data fetcher, "
"it will not be available !\n%s\n%s"
% (sys.exc_info()[0], sys.exc_info()[1])
)
pass
try:
from .data_fetchers import localftp_data as LocalFTP_Fetchers
sources["localftp"] = LocalFTP_Fetchers
except Exception:
warnings.warn(
"An error occurred while loading the local FTP data fetcher, "
"it will not be available !\n%s\n%s"
% (sys.exc_info()[0], sys.exc_info()[1])
)
pass
try:
from .data_fetchers import argovis_data as ArgoVis_Fetchers
sources["argovis"] = ArgoVis_Fetchers
except Exception:
warnings.warn(
"An error occurred while loading the ArgoVis data fetcher, "
"it will not be available !\n%s\n%s"
% (sys.exc_info()[0], sys.exc_info()[1])
)
pass
# return dict(sorted(sources.items()))
return sources
def list_available_index_src():
""" List all available index sources """
AVAILABLE_SOURCES = {}
try:
from .data_fetchers import erddap_index as Erddap_Fetchers
AVAILABLE_SOURCES["erddap"] = Erddap_Fetchers
except Exception:
warnings.warn(
"An error occurred while loading the ERDDAP index fetcher, "
"it will not be available !\n%s\n%s"
% (sys.exc_info()[0], sys.exc_info()[1])
)
pass
try:
from .data_fetchers import localftp_index as LocalFTP_Fetchers
AVAILABLE_SOURCES["localftp"] = LocalFTP_Fetchers
except Exception:
warnings.warn(
"An error occurred while loading the local FTP index fetcher, "
"it will not be available !\n%s\n%s"
% (sys.exc_info()[0], sys.exc_info()[1])
)
pass
return AVAILABLE_SOURCES
def list_standard_variables():
""" List of variables for standard users """
return [
"DATA_MODE",
"LATITUDE",
"LONGITUDE",
"POSITION_QC",
"DIRECTION",
"PLATFORM_NUMBER",
"CYCLE_NUMBER",
"PRES",
"TEMP",
"PSAL",
"PRES_QC",
"TEMP_QC",
"PSAL_QC",
"PRES_ADJUSTED",
"TEMP_ADJUSTED",
"PSAL_ADJUSTED",
"PRES_ADJUSTED_QC",
"TEMP_ADJUSTED_QC",
"PSAL_ADJUSTED_QC",
"PRES_ADJUSTED_ERROR",
"TEMP_ADJUSTED_ERROR",
"PSAL_ADJUSTED_ERROR",
"JULD",
"JULD_QC",
"TIME",
"TIME_QC",
"CONFIG_MISSION_NUMBER",
]
def list_multiprofile_file_variables():
""" List of variables in a netcdf multiprofile file.
This is for files created by GDAC under <DAC>/<WMO>/<WMO>_prof.nc
"""
return [
"CONFIG_MISSION_NUMBER",
"CYCLE_NUMBER",
"DATA_CENTRE",
"DATA_MODE",
"DATA_STATE_INDICATOR",
"DATA_TYPE",
"DATE_CREATION",
"DATE_UPDATE",
"DC_REFERENCE",
"DIRECTION",
"FIRMWARE_VERSION",
"FLOAT_SERIAL_NO",
"FORMAT_VERSION",
"HANDBOOK_VERSION",
"HISTORY_ACTION",
"HISTORY_DATE",
"HISTORY_INSTITUTION",
"HISTORY_PARAMETER",
"HISTORY_PREVIOUS_VALUE",
"HISTORY_QCTEST",
"HISTORY_REFERENCE",
"HISTORY_SOFTWARE",
"HISTORY_SOFTWARE_RELEASE",
"HISTORY_START_PRES",
"HISTORY_STEP",
"HISTORY_STOP_PRES",
"JULD",
"JULD_LOCATION",
"JULD_QC",
"LATITUDE",
"LONGITUDE",
"PARAMETER",
"PI_NAME",
"PLATFORM_NUMBER",
"PLATFORM_TYPE",
"POSITIONING_SYSTEM",
"POSITION_QC",
"PRES",
"PRES_ADJUSTED",
"PRES_ADJUSTED_ERROR",
"PRES_ADJUSTED_QC",
"PRES_QC",
"PROFILE_PRES_QC",
"PROFILE_PSAL_QC",
"PROFILE_TEMP_QC",
"PROJECT_NAME",
"PSAL",
"PSAL_ADJUSTED",
"PSAL_ADJUSTED_ERROR",
"PSAL_ADJUSTED_QC",
"PSAL_QC",
"REFERENCE_DATE_TIME",
"SCIENTIFIC_CALIB_COEFFICIENT",
"SCIENTIFIC_CALIB_COMMENT",
"SCIENTIFIC_CALIB_DATE",
"SCIENTIFIC_CALIB_EQUATION",
"STATION_PARAMETERS",
"TEMP",
"TEMP_ADJUSTED",
"TEMP_ADJUSTED_ERROR",
"TEMP_ADJUSTED_QC",
"TEMP_QC",
"VERTICAL_SAMPLING_SCHEME",
"WMO_INST_TYPE",
]
def check_localftp(path, errors: str = "ignore"):
""" Check if the path has the expected GDAC ftp structure
Check if the path is structured like:
.
└── dac
├── aoml
├── ...
├── coriolis
├── ...
├── meds
└── nmdis
Parameters
----------
path: str
Path name to check
errors: str
"ignore" or "raise" (or "warn"
Returns
-------
checked: boolean
True if at least one DAC folder is found under path/dac/<dac_name>
False otherwise
"""
dacs = [
"aoml",
"bodc",
"coriolis",
"csio",
"csiro",
"incois",
"jma",
"kma",
"kordi",
"meds",
"nmdis",
]
# Case 1:
check1 = (
os.path.isdir(path)
and os.path.isdir(os.path.join(path, "dac"))
and np.any([os.path.isdir(os.path.join(path, "dac", dac)) for dac in dacs])
)
if check1:
return True
elif errors == "raise":
# This was possible up to v0.1.3:
check2 = os.path.isdir(path) and np.any(
[os.path.isdir(os.path.join(path, dac)) for dac in dacs]
)
if check2:
raise FtpPathError(
"This path is no longer GDAC compliant for argopy.\n"
"Please make sure you point toward a path with a 'dac' folder:\n%s"
% path
)
else:
raise FtpPathError("This path is not GDAC compliant:\n%s" % path)
elif errors == "warn":
# This was possible up to v0.1.3:
check2 = os.path.isdir(path) and np.any(
[os.path.isdir(os.path.join(path, dac)) for dac in dacs]
)
if check2:
warnings.warn(
"This path is no longer GDAC compliant for argopy. This will raise an error in the future.\n"
"Please make sure you point toward a path with a 'dac' folder:\n%s"
% path
)
return False
else:
warnings.warn("This path is not GDAC compliant:\n%s" % path)
return False
else:
return False
def get_sys_info():
"Returns system information as a dict"
blob = []
# get full commit hash
commit = None
if os.path.isdir(".git") and os.path.isdir("argopy"):
try:
pipe = subprocess.Popen(
'git log --format="%H" -n 1'.split(" "),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
so, serr = pipe.communicate()
except Exception:
pass
else:
if pipe.returncode == 0:
commit = so
try:
commit = so.decode("utf-8")
except ValueError:
pass
commit = commit.strip().strip('"')
blob.append(("commit", commit))
try:
(sysname, nodename, release, version_, machine, processor) = platform.uname()
blob.extend(
[
("python", sys.version),
("python-bits", struct.calcsize("P") * 8),
("OS", "%s" % (sysname)),
("OS-release", "%s" % (release)),
("machine", "%s" % (machine)),
("processor", "%s" % (processor)),
("byteorder", "%s" % sys.byteorder),
("LC_ALL", "%s" % os.environ.get("LC_ALL", "None")),
("LANG", "%s" % os.environ.get("LANG", "None")),
("LOCALE", "%s.%s" % locale.getlocale()),
]
)
except Exception:
pass
return blob
def netcdf_and_hdf5_versions():
libhdf5_version = None
libnetcdf_version = None
try:
import netCDF4
libhdf5_version = netCDF4.__hdf5libversion__
libnetcdf_version = netCDF4.__netcdf4libversion__
except ImportError:
try:
import h5py
libhdf5_version = h5py.version.hdf5_version
except ImportError:
pass
return [("libhdf5", libhdf5_version), ("libnetcdf", libnetcdf_version)]
def show_versions(file=sys.stdout): # noqa: C901
""" Print the versions of argopy and its dependencies
Parameters
----------
file : file-like, optional
print to the given file-like object. Defaults to sys.stdout.
"""
sys_info = get_sys_info()
try:
sys_info.extend(netcdf_and_hdf5_versions())
except Exception as e:
print(f"Error collecting netcdf / hdf5 version: {e}")
deps = [
# (MODULE_NAME, f(mod) -> mod version)
# In REQUIREMENTS:
("argopy", lambda mod: mod.__version__),
("xarray", lambda mod: mod.__version__),
("scipy", lambda mod: mod.__version__),
("sklearn", lambda mod: mod.__version__),
("netCDF4", lambda mod: mod.__version__),
("dask", lambda mod: mod.__version__),
("toolz", lambda mod: mod.__version__),
("erddapy", lambda mod: mod.__version__),
("fsspec", lambda mod: mod.__version__),
("gsw", lambda mod: mod.__version__),
("aiohttp", lambda mod: mod.__version__),
#
("bottleneck", lambda mod: mod.__version__),
("cartopy", lambda mod: mod.__version__),
("cftime", lambda mod: mod.__version__),
("conda", lambda mod: mod.__version__),
("distributed", lambda mod: mod.__version__),
("IPython", lambda mod: mod.__version__),
("iris", lambda mod: mod.__version__),
("matplotlib", lambda mod: mod.__version__),
("nc_time_axis", lambda mod: mod.__version__),
("numpy", lambda mod: mod.__version__),
("pandas", lambda mod: mod.__version__),
("packaging", lambda mod: mod.__version__),
("pip", lambda mod: mod.__version__),
("PseudoNetCDF", lambda mod: mod.__version__),
("pytest", lambda mod: mod.__version__),
("seaborn", lambda mod: mod.__version__),
("setuptools", lambda mod: mod.__version__),
("sphinx", lambda mod: mod.__version__),
("zarr", lambda mod: mod.__version__),
("tqdm", lambda mod: mod.__version__),
("ipykernel", lambda mod: mod.__version__),
("ipywidgets", lambda mod: mod.__version__),
]
deps_blob = list()
for (modname, ver_f) in deps:
try:
if modname in sys.modules:
mod = sys.modules[modname]
else:
mod = importlib.import_module(modname)
except Exception:
deps_blob.append((modname, None))
else:
try:
ver = ver_f(mod)
deps_blob.append((modname, ver))
except Exception:
deps_blob.append((modname, "installed"))
print("\nINSTALLED VERSIONS", file=file)
print("------------------", file=file)
for k, stat in sys_info:
print(f"{k}: {stat}", file=file)
print("", file=file)
for k, stat in deps_blob:
print(f"{k}: {stat}", file=file)
def show_options(file=sys.stdout): # noqa: C901
""" Print options of argopy
Parameters
----------
file : file-like, optional
print to the given file-like object. Defaults to sys.stdout.
"""
print("\nARGOPY OPTIONS", file=file)
print("--------------", file=file)
opts = copy.deepcopy(OPTIONS)
opts = dict(sorted(opts.items()))
for k, v in opts.items():
print(f"{k}: {v}", file=file)
def isconnected(host="https://www.ifremer.fr"):
""" check if we have a live internet connection
Parameters
----------
host: str
URL to use, 'https://www.ifremer.fr' by default
Returns
-------
bool
"""
if "http" in host or "ftp" in host:
try:
urllib.request.urlopen(host, timeout=1) # Python 3.x
return True
except Exception:
return False
else:
return os.path.exists(host)
def isAPIconnected(src="erddap", data=True):
""" Check if a source API is alive or not
The API is connected when it has a live URL or valid folder path.
Parameters
----------
src: str
The data or index source name, 'erddap' default
data: bool
If True check the data fetcher (default), if False, check the index fetcher
Returns
-------
bool
"""
if data:
list_src = list_available_data_src()
else:
list_src = list_available_index_src()
if src in list_src and getattr(
list_src[src], "api_server_check", None
):
if "localftp" in src:
# This is a special case because the source here is a local folder
result = check_localftp(OPTIONS["local_ftp"])
else:
result = isconnected(list_src[src].api_server_check)
return result
else:
raise InvalidFetcher
def erddap_ds_exists(ds: str = "ArgoFloats", erddap: str = 'https://www.ifremer.fr/erddap') -> bool:
""" Check if a dataset exists on a remote erddap server
return a bool
Parameter
---------
ds: str
Name of the erddap dataset to check (default: 'ArgoFloats')
erddap: str
Url of the erddap server (default: 'https://www.ifremer.fr/erddap')
Return
------
bool
"""
with httpstore(timeout=OPTIONS['api_timeout']).open("".join([erddap, "/info/index.json"])) as of:
erddap_index = json.load(of)
return ds in [row[-1] for row in erddap_index["table"]["rows"]]
def badge(label="label", message="message", color="green", insert=False):
""" Return or insert shield.io badge image
Use the shields.io service to create a badge image
https://img.shields.io/static/v1?label=<LABEL>&message=<MESSAGE>&color=<COLOR>
Parameters
----------
label: str
Left side badge text
message: str
Right side badge text
color: str
Right side background color
insert: bool
Return url to badge image (False, default) or directly insert the image with HTML (True)
Returns
-------
str or IPython.display.Image
"""
from IPython.display import Image
url = (
"https://img.shields.io/static/v1?style=flat-square&label={}&message={}&color={}"
).format
img = url(urllib.parse.quote(label), urllib.parse.quote(message), color)
if not insert:
return img
else:
return Image(url=img)
def fetch_status(stdout: str = "html", insert: bool = True):
""" Fetch and report web API status
Parameters
----------
stdout: str
Format of the results, default is 'html'. Otherwise a simple string.
insert: bool
Print or display results directly in stdout format.
Returns
-------
IPython.display.HTML or str
"""
results = {}
list_src = list_available_data_src()
for api, mod in list_src.items():
if getattr(mod, "api_server_check", None):
# status = isconnected(mod.api_server_check)
status = isAPIconnected(api)
if api=='localftp' and OPTIONS['local_ftp'] == '-':
message = "ok" if status else "path undefined !"
else:
# message = "up" if status else "down"
message = "ok" if status else "offline"
results[api] = {"value": status, "message": message}
if "IPython" in sys.modules and stdout == "html":
cols = []
for api in sorted(results.keys()):
color = "green" if results[api]["value"] else "orange"
if isconnected():
# img = badge("src='%s'" % api, message=results[api]['message'], color=color, insert=False)
# img = badge(label="argopy src", message="%s is %s" %
# (api, results[api]['message']), color=color, insert=False)
img = badge(
label="src %s is" % api,
message="%s" % results[api]["message"],
color=color,
insert=False,
)
html = ('<td><img src="{}"></td>').format(img)
else:
# html = "<th>src %s is:</th><td>%s</td>" % (api, results[api]['message'])
html = (
"<th><div>src %s is:</div></th><td><div style='color:%s;'>%s</div></td>"
% (api, color, results[api]["message"])
)
cols.append(html)
this_HTML = ("<table><tr>{}</tr></table>").format("".join(cols))
if insert:
from IPython.display import HTML, display
return display(HTML(this_HTML))
else:
return this_HTML
else:
rows = []
for api in sorted(results.keys()):
# rows.append("argopy src %s: %s" % (api, results[api]['message']))
rows.append("src %s is: %s" % (api, results[api]["message"]))
txt = "\n".join(rows)
if insert:
print(txt)
else:
return txt
class monitor_status:
""" Monitor data source status with a refresh rate """
def __init__(self, refresh=1):
import ipywidgets as widgets
self.refresh_rate = refresh
self.text = widgets.HTML(
value=fetch_status(stdout="html", insert=False),
placeholder="",
description="",
)
self.start()
def work(self):
while True:
time.sleep(self.refresh_rate)
self.text.value = fetch_status(stdout="html", insert=False)
def start(self):
from IPython.display import display
thread = threading.Thread(target=self.work)
display(self.text)
thread.start()
# def open_etopo1(box, res="l"):
# """ Download ETOPO for a box
#
# Parameters
# ----------
# box: [xmin, xmax, ymin, ymax]
#
# Returns
# -------
# xarray.Dataset
# """
# # This function is in utilities to anticipate usage outside of plotting, eg interpolation, grounding detection
# resx, resy = 0.1, 0.1
# if res == "h":
# resx, resy = 0.016, 0.016
#
# uri = (
# "https://gis.ngdc.noaa.gov/mapviewer-support/wcs-proxy/wcs.groovy?filename=etopo1.nc"
# "&request=getcoverage&version=1.0.0&service=wcs&coverage=etopo1&CRS=EPSG:4326&format=netcdf"
# "&resx={}&resy={}"
# "&bbox={}"
# ).format
# thisurl = uri(
# resx, resy, ",".join([str(b) for b in [box[0], box[2], box[1], box[3]]])
# )
# ds = httpstore(cache=True).open_dataset(thisurl)
# da = ds["Band1"].rename("topo")
# for a in ds.attrs:
# da.attrs[a] = ds.attrs[a]
# da.attrs["Data source"] = "https://maps.ngdc.noaa.gov/viewers/wcs-client/"
# da.attrs["URI"] = thisurl
# return da
#
# From xarrayutils : https://github.com/jbusecke/xarrayutils/blob/master/xarrayutils/vertical_coordinates.py
# Direct integration of those 2 functions to minimize dependencies and possibility of tuning them to our needs
#
def linear_interpolation_remap(
z, data, z_regridded, z_dim=None, z_regridded_dim="regridded", output_dim="remapped"
):
# interpolation called in xarray ufunc
def _regular_interp(x, y, target_values):
# remove all nans from input x and y
idx = np.logical_or(np.isnan(x), np.isnan(y))
x = x[~idx]
y = y[~idx]
# Need at least 5 points in the profile to interpolate, otherwise, return NaNs
if len(y) < 5:
interpolated = np.empty(len(target_values))
interpolated[:] = np.nan
else:
# replace nans in target_values with out of bound Values (just in case)
target_values = np.where(
~np.isnan(target_values), target_values, np.nanmax(x) + 1
)
# Interpolate with fill value parameter to extend min pressure toward 0
interpolated = interpolate.interp1d(
x, y, bounds_error=False, fill_value=(y[0], y[-1])
)(target_values)
return interpolated
# infer dim from input
if z_dim is None:
if len(z.dims) != 1:
raise RuntimeError("if z_dim is not specified, x must be a 1D array.")
dim = z.dims[0]
else:
dim = z_dim
# if dataset is passed drop all data_vars that dont contain dim
if isinstance(data, xr.Dataset):
raise ValueError("Dataset input is not supported yet")
# TODO: for a dataset input just apply the function for each appropriate array
if version.parse(xr.__version__) > version.parse("0.15.0"):
kwargs = dict(
input_core_dims=[[dim], [dim], [z_regridded_dim]],
output_core_dims=[[output_dim]],
vectorize=True,
dask="parallelized",
output_dtypes=[data.dtype],
dask_gufunc_kwargs={'output_sizes': {output_dim: len(z_regridded[z_regridded_dim])}},
)
else:
kwargs = dict(
input_core_dims=[[dim], [dim], [z_regridded_dim]],
output_core_dims=[[output_dim]],
vectorize=True,
dask="parallelized",
output_dtypes=[data.dtype],
output_sizes={output_dim: len(z_regridded[z_regridded_dim])},
)
remapped = xr.apply_ufunc(_regular_interp, z, data, z_regridded, **kwargs)
remapped.coords[output_dim] = z_regridded.rename(
{z_regridded_dim: output_dim}
).coords[output_dim]
return remapped
class Chunker:
""" To chunk fetcher requests """
# Default maximum chunks size for all possible request parameters
default_chunksize = {
"box": {
"lon": 20, # degree
"lat": 20, # degree
"dpt": 500, # meters/db
"time": 3 * 30,
}, # Days
"wmo": {"wmo": 5, "cyc": 100}, # Nb of floats
} # Nb of cycles
def __init__(self, request: dict, chunks: str = "auto", chunksize: dict = {}):
""" Create a request Chunker
Allow to easily split an access point request into chunks
Parameters
----------
request: dict
Access point request to be chunked. One of the following:
- {'box': [lon_min, lon_max, lat_min, lat_max, dpt_min, dpt_max, time_min, time_max]}
- {'box': [lon_min, lon_max, lat_min, lat_max, dpt_min, dpt_max]}
- {'wmo': [wmo1, wmo2, ...], 'cyc': [0,1, ...]}
chunks: 'auto' or dict
Dictionary with request access point as keys and number of chunks to create as values.
Eg: {'wmo':10} will create a maximum of 10 chunks along WMOs.
chunksize: dict, optional
Dictionary with request access point as keys and chunk size as values (used as maximum values in
'auto' chunking).
Eg: {'wmo': 5} will create chunks with as many as 5 WMOs each.
"""
self.request = request
if "box" in self.request:
is_box(self.request["box"])
if len(self.request["box"]) == 8:
self.this_chunker = self._chunker_box4d
elif len(self.request["box"]) == 6:
self.this_chunker = self._chunker_box3d
elif "wmo" in self.request:
self.this_chunker = self._chunker_wmo
else:
raise InvalidFetcherAccessPoint(
"'%s' not valid access point" % ",".join(self.request.keys())
)
default = self.default_chunksize[[k for k in self.request.keys()][0]]
if len(chunksize) == 0: # chunksize = {}
chunksize = default
if not isinstance(chunksize, collectionsAbc.Mapping):
raise ValueError("chunksize must be mappable")
else: # merge with default:
chunksize = {**default, **chunksize}
self.chunksize = collections.OrderedDict(sorted(chunksize.items()))
default = {k: "auto" for k in self.chunksize.keys()}
if chunks == "auto": # auto for all
chunks = default
elif len(chunks) == 0: # chunks = {}, i.e. chunk=1 for all
chunks = {k: 1 for k in self.request}
if not isinstance(chunks, collectionsAbc.Mapping):
raise ValueError("chunks must be 'auto' or mappable")
chunks = {**default, **chunks}
self.chunks = collections.OrderedDict(sorted(chunks.items()))
def _split(self, lst, n=1):
"""Yield successive n-sized chunks from lst"""
for i in range(0, len(lst), n):
yield lst[i: i + n]
def _split_list_bychunknb(self, lst, n=1):
"""Split list in n-imposed chunks of similar size
The last chunk may contain more or less element than the others, depending on the size of the list.
"""
res = []
siz = int(np.floor_divide(len(lst), n))
for i in self._split(lst, siz):
res.append(i)
if len(res) > n:
res[n - 1::] = [reduce(lambda i, j: i + j, res[n - 1::])]
return res
def _split_list_bychunksize(self, lst, max_size=1):
"""Split list in chunks of imposed size
The last chunk may contain more or less element than the others, depending on the size of the list.
"""
res = []
for i in self._split(lst, max_size):
res.append(i)
return res
def _split_box(self, large_box, n=1, d="x"): # noqa: C901
"""Split a box domain in one direction in n-imposed equal chunks """
if d == "x":
i_left, i_right = 0, 1
if d == "y":
i_left, i_right = 2, 3
if d == "z":
i_left, i_right = 4, 5
if d == "t":
i_left, i_right = 6, 7
if n == 1:
return [large_box]
boxes = []
if d in ["x", "y", "z"]:
n += 1 # Required because we split in linspace
bins = np.linspace(large_box[i_left], large_box[i_right], n)
for ii, left in enumerate(bins):
if ii < len(bins) - 1:
right = bins[ii + 1]
this_box = large_box.copy()
this_box[i_left] = left
this_box[i_right] = right
boxes.append(this_box)
elif "t" in d:
dates = pd.to_datetime(large_box[i_left: i_right + 1])
date_bounds = [
d.strftime("%Y%m%d%H%M%S")
for d in pd.date_range(dates[0], dates[1], periods=n + 1)
]
for i1, i2 in zip(np.arange(0, n), np.arange(1, n + 1)):
left, right = date_bounds[i1], date_bounds[i2]
this_box = large_box.copy()
this_box[i_left] = left
this_box[i_right] = right
boxes.append(this_box)
return boxes
def _split_this_4Dbox(self, box, nx=1, ny=1, nz=1, nt=1):
box_list = []
split_x = self._split_box(box, n=nx, d="x")
for bx in split_x:
split_y = self._split_box(bx, n=ny, d="y")
for bxy in split_y:
split_z = self._split_box(bxy, n=nz, d="z")
for bxyz in split_z:
split_t = self._split_box(bxyz, n=nt, d="t")
for bxyzt in split_t:
box_list.append(bxyzt)
return box_list
def _split_this_3Dbox(self, box, nx=1, ny=1, nz=1):
box_list = []
split_x = self._split_box(box, n=nx, d="x")
for bx in split_x:
split_y = self._split_box(bx, n=ny, d="y")
for bxy in split_y:
split_z = self._split_box(bxy, n=nz, d="z")
for bxyz in split_z:
box_list.append(bxyz)
return box_list
def _chunker_box4d(self, request, chunks, chunks_maxsize): # noqa: C901
BOX = request["box"]
n_chunks = chunks
for axis, n in n_chunks.items():
if n == "auto":
if axis == "lon":
Lx = BOX[1] - BOX[0]
if Lx > chunks_maxsize["lon"]: # Max box size in longitude
n_chunks["lon"] = int(
np.ceil(np.divide(Lx, chunks_maxsize["lon"]))
)
else:
n_chunks["lon"] = 1
if axis == "lat":
Ly = BOX[3] - BOX[2]
if Ly > chunks_maxsize["lat"]: # Max box size in latitude
n_chunks["lat"] = int(
np.ceil(np.divide(Ly, chunks_maxsize["lat"]))
)
else:
n_chunks["lat"] = 1
if axis == "dpt":
Lz = BOX[5] - BOX[4]
if Lz > chunks_maxsize["dpt"]: # Max box size in depth
n_chunks["dpt"] = int(
np.ceil(np.divide(Lz, chunks_maxsize["dpt"]))
)
else:
n_chunks["dpt"] = 1
if axis == "time":
Lt = np.timedelta64(
pd.to_datetime(BOX[7]) - pd.to_datetime(BOX[6]), "D"
)
MaxLen = np.timedelta64(chunks_maxsize["time"], "D")
if Lt > MaxLen: # Max box size in time
n_chunks["time"] = int(np.ceil(np.divide(Lt, MaxLen)))
else:
n_chunks["time"] = 1
boxes = self._split_this_4Dbox(
BOX,
nx=n_chunks["lon"],
ny=n_chunks["lat"],
nz=n_chunks["dpt"],
nt=n_chunks["time"],
)
return {"chunks": sorted(n_chunks), "values": boxes}
def _chunker_box3d(self, request, chunks, chunks_maxsize):
BOX = request["box"]
n_chunks = chunks
for axis, n in n_chunks.items():
if n == "auto":
if axis == "lon":
Lx = BOX[1] - BOX[0]
if Lx > chunks_maxsize["lon"]: # Max box size in longitude
n_chunks["lon"] = int(
np.floor_divide(Lx, chunks_maxsize["lon"])
)
else:
n_chunks["lon"] = 1
if axis == "lat":
Ly = BOX[3] - BOX[2]
if Ly > chunks_maxsize["lat"]: # Max box size in latitude
n_chunks["lat"] = int(
np.floor_divide(Ly, chunks_maxsize["lat"])
)
else:
n_chunks["lat"] = 1
if axis == "dpt":
Lz = BOX[5] - BOX[4]
if Lz > chunks_maxsize["dpt"]: # Max box size in depth
n_chunks["dpt"] = int(
np.floor_divide(Lz, chunks_maxsize["dpt"])
)
else:
n_chunks["dpt"] = 1
# if axis == 'time':
# Lt = np.timedelta64(pd.to_datetime(BOX[5]) - pd.to_datetime(BOX[4]), 'D')
# MaxLen = np.timedelta64(chunks_maxsize['time'], 'D')
# if Lt > MaxLen: # Max box size in time
# n_chunks['time'] = int(np.floor_divide(Lt, MaxLen))
# else:
# n_chunks['time'] = 1
boxes = self._split_this_3Dbox(
BOX, nx=n_chunks["lon"], ny=n_chunks["lat"], nz=n_chunks["dpt"]
)
return {"chunks": sorted(n_chunks), "values": boxes}
def _chunker_wmo(self, request, chunks, chunks_maxsize):
WMO = request["wmo"]
n_chunks = chunks
if n_chunks["wmo"] == "auto":
wmo_grps = self._split_list_bychunksize(WMO, max_size=chunks_maxsize["wmo"])
else:
n = np.min([n_chunks["wmo"], len(WMO)])
wmo_grps = self._split_list_bychunknb(WMO, n=n)
n_chunks["wmo"] = len(wmo_grps)
return {"chunks": sorted(n_chunks), "values": wmo_grps}
def fit_transform(self):
""" Chunk a fetcher request
Returns
-------
list
"""
self._results = self.this_chunker(self.request, self.chunks, self.chunksize)
# self.chunks = self._results['chunks']
return self._results["values"]
def format_oneline(s, max_width=65):
""" Return a string formatted for a line print """
if len(s) > max_width:
padding = " ... "
n = (max_width - len(padding)) // 2
q = (max_width - len(padding)) % 2
if q == 0:
return "".join([s[0:n], padding, s[-n:]])
else:
return "".join([s[0: n + 1], padding, s[-n:]])
else:
return s
def is_indexbox(box: list, errors="raise"):
""" Check if this array matches a 2d or 3d index box definition
box = [lon_min, lon_max, lat_min, lat_max]
or:
box = [lon_min, lon_max, lat_min, lat_max, datim_min, datim_max]
Parameters
----------
box: list
errors: 'raise'
Returns
-------
bool
"""
tests = {}
# Formats:
tests["index box must be a list"] = lambda b: isinstance(b, list)
tests["index box must be a list with 4 or 6 elements"] = lambda b: len(b) in [4, 6]
error = None
for msg, test in tests.items():
if not test(box):
error = msg
break
if error and errors == "raise":
raise ValueError("%s: %s" % (box, error))
elif error:
return False
else:
# Insert pressure bounds and use full box validator:
tmp_box = box.copy()
tmp_box.insert(4, 0.)
tmp_box.insert(5, 10000.)
return is_box(tmp_box, errors=errors)
def is_box(box: list, errors="raise"):
""" Check if this array matches a 3d or 4d data box definition
box = [lon_min, lon_max, lat_min, lat_max, pres_min, pres_max]
or:
box = [lon_min, lon_max, lat_min, lat_max, pres_min, pres_max, datim_min, datim_max]
Parameters
----------
box: list
errors: 'raise'
Returns
-------
bool
"""
def is_dateconvertible(d):
try:
pd.to_datetime(d)
isit = True
except Exception:
isit = False
return isit
tests = {}
# print(box)
# Formats:
tests["box must be a list"] = lambda b: isinstance(b, list)
tests["box must be a list with 6 or 8 elements"] = lambda b: len(b) in [6, 8]
# Types:
tests["lon_min must be numeric"] = lambda b: (
isinstance(b[0], int) or isinstance(b[0], (np.floating, float))
)
tests["lon_max must be numeric"] = lambda b: (
isinstance(b[1], int) or isinstance(b[1], (np.floating, float))
)
tests["lat_min must be numeric"] = lambda b: (
isinstance(b[2], int) or isinstance(b[2], (np.floating, float))
)
tests["lat_max must be numeric"] = lambda b: (
isinstance(b[3], int) or isinstance(b[3], (np.floating, float))
)
tests["pres_min must be numeric"] = lambda b: (
isinstance(b[4], int) or isinstance(b[4], (np.floating, float))
)
tests["pres_max must be numeric"] = lambda b: (
isinstance(b[5], int) or isinstance(b[5], (np.floating, float))
)
if len(box) == 8:
tests[
"datetim_min must be a string convertible to a Pandas datetime"
] = lambda b: isinstance(b[-2], str) and is_dateconvertible(b[-2])
tests[
"datetim_max must be a string convertible to a Pandas datetime"
] = lambda b: isinstance(b[-1], str) and is_dateconvertible(b[-1])
# Ranges:
tests["lon_min must be in [-180;180] or [0;360]"] = (
lambda b: b[0] >= -180.0 and b[0] <= 360.0
)
tests["lon_max must be in [-180;180] or [0;360]"] = (
lambda b: b[1] >= -180.0 and b[1] <= 360.0
)
tests["lat_min must be in [-90;90]"] = lambda b: b[2] >= -90.0 and b[2] <= 90
tests["lat_max must be in [-90;90]"] = lambda b: b[3] >= -90.0 and b[3] <= 90.0
tests["pres_min must be in [0;10000]"] = lambda b: b[4] >= 0 and b[4] <= 10000
tests["pres_max must be in [0;10000]"] = lambda b: b[5] >= 0 and b[5] <= 10000
# Orders:
tests["lon_max must be larger than lon_min"] = lambda b: b[0] < b[1]
tests["lat_max must be larger than lat_min"] = lambda b: b[2] < b[3]
tests["pres_max must be larger than pres_min"] = lambda b: b[4] < b[5]
if len(box) == 8:
tests["datetim_max must come after datetim_min"] = lambda b: pd.to_datetime(
b[-2]
) < pd.to_datetime(b[-1])
error = None
for msg, test in tests.items():
if not test(box):
error = msg
break
if error and errors == "raise":
raise ValueError("%s: %s" % (box, error))
elif error:
return False
else:
return True
def is_list_of_strings(lst):
return isinstance(lst, list) and all(isinstance(elem, str) for elem in lst)
def is_list_of_dicts(lst):
return all(isinstance(x, dict) for x in lst)
def is_list_of_datasets(lst):
return all(isinstance(x, xr.Dataset) for x in lst)
def is_list_equal(lst1, lst2):
""" Return true if 2 lists contain same elements"""
return len(lst1) == len(lst2) and len(lst1) == sum([1 for i, j in zip(lst1, lst2) if i == j])
def check_wmo(lst):
""" Validate a WMO option and returned it as a list of integers
Parameters
----------
wmo: int
WMO must be an integer or an iterable with elements that can be casted as integers
errors: 'raise'
Returns
-------
list(int)
"""
is_wmo(lst, errors="raise")
# Make sure we deal with a list
if not isinstance(lst, list):
if isinstance(lst, np.ndarray):
lst = list(lst)
else:
lst = [lst]
# Then cast list elements as integers
return [abs(int(x)) for x in lst]
def is_wmo(lst, errors="raise"): # noqa: C901
""" Check if a WMO is valid
Parameters
----------
wmo: int, list(int), array(int)
WMO must be a single or a list of 5/7 digit positive numbers
errors: 'raise'
Possibly raises a ValueError exception, otherwise fails silently.
Returns
-------
bool
True if wmo is indeed a list of integers
"""
# Make sure we deal with a list
if not isinstance(lst, list):
if isinstance(lst, np.ndarray):
lst = list(lst)
else:
lst = [lst]
# Error message:
# msg = "WMO must be an integer or an iterable with elements that can be casted as integers"
msg = "WMO must be a single or a list of 5/7 digit positive numbers"
# Then try to cast list elements as integers, return True if ok
result = True
try:
for x in lst:
if not str(x).isdigit():
result = False
if (len(str(x)) != 5) and (len(str(x)) != 7):
result = False
if int(x) <= 0:
result = False
except Exception:
result = False
if errors == "raise":
raise ValueError(msg)
if not result and errors == "raise":
raise ValueError(msg)
else:
return result
# def docstring(value):
# """Replace one function docstring
#
# To be used as a decorator
# """
# def _doc(func):
# func.__doc__ = value
# return func
# return _doc
def warnUnless(ok, txt):
""" Decorator to raise warning unless condition is True
This function must be used as a decorator
Parameters
----------
ok: bool
Condition to raise the warning or not
txt: str
Text to display in the warning
"""
def inner(fct):
def wrapper(*args, **kwargs):
warnings.warn("%s %s" % (fct.__name__, txt))
return fct(*args, **kwargs)
return wrapper
if not ok:
return inner
else:
return lambda f: f
@contextlib.contextmanager
def modified_environ(*remove, **update):
"""
Temporarily updates the ``os.environ`` dictionary in-place.
The ``os.environ`` dictionary is updated in-place so that the modification
is sure to work in all situations.
:param remove: Environment variables to remove.
:param update: Dictionary of environment variables and values to add/update.
"""
# Source: https://github.com/laurent-laporte-pro/stackoverflow-q2059482
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
def toYearFraction(this_date: pd._libs.tslibs.timestamps.Timestamp = pd.to_datetime('now', utc=True)):
""" Compute decimal year, robust to leap years, precision to the second
Compute the fraction of the year a given timestamp corresponds to.
The "fraction of the year" goes:
- from 0 on 01-01T00:00:00.000 of the year
- to 1 on the 01-01T00:00:00.000 of the following year
1 second corresponds to the number of days in the year times 86400.
The faction of the year is rounded to 10-digits in order to have a "second" precision.
See discussion here: https://github.com/euroargodev/argodmqc_owc/issues/35
Parameters
----------
pd._libs.tslibs.timestamps.Timestamp
Returns
-------
float
"""
if "UTC" in [this_date.tzname() if not this_date.tzinfo is None else ""]:
startOfThisYear = pd.to_datetime("%i-01-01T00:00:00.000" % this_date.year, utc=True)
else:
startOfThisYear = pd.to_datetime("%i-01-01T00:00:00.000" % this_date.year)
yearDuration_sec = (startOfThisYear + pd.offsets.DateOffset(years=1) - startOfThisYear).total_seconds()
yearElapsed_sec = (this_date - startOfThisYear).total_seconds()
fraction = yearElapsed_sec / yearDuration_sec
fraction = np.round(fraction, 10)
return this_date.year + fraction
def YearFraction_to_datetime(yf: float):
""" Compute datetime from year fraction
Inverse the toYearFraction() function
Parameters
----------
float
Returns
-------
pd._libs.tslibs.timestamps.Timestamp
"""
year = np.int32(yf)
fraction = yf - year
fraction = np.round(fraction, 10)
startOfThisYear = pd.to_datetime("%i-01-01T00:00:00" % year)
yearDuration_sec = (startOfThisYear + pd.offsets.DateOffset(years=1) - startOfThisYear).total_seconds()
yearElapsed_sec = pd.Timedelta(fraction * yearDuration_sec, unit='s')
return pd.to_datetime(startOfThisYear + yearElapsed_sec, unit='s')
def wrap_longitude(grid_long):
""" Allows longitude (0-360) to wrap beyond the 360 mark, for mapping purposes.
Makes sure that, if the longitude is near the boundary (0 or 360) that we
wrap the values beyond
360 so it appears nicely on a map
This is a refactor between get_region_data and get_region_hist_locations to
avoid duplicate code
source: https://github.com/euroargodev/argodmqc_owc/blob/e174f4538fdae1534c9740491398972b1ffec3ca/pyowc/utilities.py#L80
Parameters
----------
grid_long: array of longitude values
Returns
-------
array of longitude values that can extend past 360
"""
neg_long = np.argwhere(grid_long < 0)
grid_long[neg_long] = grid_long[neg_long] + 360
# if we have data close to upper boundary (360), then wrap some of the data round
# so it appears on the map
top_long = np.argwhere(grid_long >= 320)
if top_long.__len__() != 0:
bottom_long = np.argwhere(grid_long <= 40)
grid_long[bottom_long] = 360 + grid_long[bottom_long]
return grid_long
def wmo2box(wmo_id: int):
""" Convert WMO square box number into a latitude/longitude box
See:
https://en.wikipedia.org/wiki/World_Meteorological_Organization_squares
https://commons.wikimedia.org/wiki/File:WMO-squares-global.gif
Parameters
----------
wmo_id: int
WMO square number, must be between 1000 and 7817
Returns
-------
box: list(int)
[lon_min, lon_max, lat_min, lat_max] bounds to the WMO square number
"""
if wmo_id < 1000 or wmo_id > 7817:
raise ValueError("Invalid WMO square number, must be between 1000 and 7817.")
wmo_id = str(wmo_id)
# "global quadrant" numbers where 1=NE, 3=SE, 5=SW, 7=NW
quadrant = int(wmo_id[0])
if quadrant not in [1, 3, 5 ,7]:
raise ValueError("Invalid WMO square number, 1st digit must be 1, 3, 5 or 7.")
# 'minimum' Latitude square boundary, nearest to the Equator
nearest_to_the_Equator_latitude = int(wmo_id[1])
# 'minimum' Longitude square boundary, nearest to the Prime Meridian
nearest_to_the_Prime_Meridian = int(wmo_id[2:4])
#
dd = 10
if quadrant in [1, 3]:
lon_min = nearest_to_the_Prime_Meridian*dd
lon_max = nearest_to_the_Prime_Meridian*dd+dd
elif quadrant in [5, 7]:
lon_min = -nearest_to_the_Prime_Meridian*dd-dd
lon_max = -nearest_to_the_Prime_Meridian*dd
if quadrant in [1, 7]:
lat_min = nearest_to_the_Equator_latitude*dd
lat_max = nearest_to_the_Equator_latitude*dd+dd
elif quadrant in [3, 5]:
lat_min = -nearest_to_the_Equator_latitude*dd-dd
lat_max = -nearest_to_the_Equator_latitude*dd
box = [lon_min, lon_max, lat_min, lat_max]
return box
def groupby_remap(z, data, z_regridded, z_dim=None, z_regridded_dim="regridded", output_dim="remapped", select='deep', right=False):
""" todo: Need a docstring here !"""
# sub-sampling called in xarray ufunc
def _subsample_bins(x, y, target_values):
# remove all nans from input x and y
idx = np.logical_or(np.isnan(x), np.isnan(y))
x = x[~idx]
y = y[~idx]
ifound = np.digitize(x, target_values, right=right) # ``bins[i-1] <= x < bins[i]``
ifound -= 1 # Because digitize returns a 1-based indexing, we need to remove 1
y_binned = np.ones_like(target_values) * np.nan
for ib, this_ibin in enumerate(np.unique(ifound)):
ix = np.where(ifound == this_ibin)
iselect = ix[-1]
# Map to y value at specific x index in the bin:
if select == 'shallow':
iselect = iselect[0] # min/shallow
mapped_value = y[iselect]
elif select == 'deep':
iselect = iselect[-1] # max/deep
mapped_value = y[iselect]
elif select == 'middle':
iselect = iselect[np.where(x[iselect] >= np.median(x[iselect]))[0][0]] # median/middle
mapped_value = y[iselect]
elif select == 'random':
iselect = iselect[np.random.randint(len(iselect))]
mapped_value = y[iselect]
# or Map to y statistics in the bin:
elif select == 'mean':
mapped_value = np.nanmean(y[iselect])
elif select == 'min':
mapped_value = np.nanmin(y[iselect])
elif select == 'max':
mapped_value = np.nanmax(y[iselect])
elif select == 'median':
mapped_value = np.median(y[iselect])
else:
raise InvalidOption("`select` option has invalid value (%s)" % select)
y_binned[this_ibin] = mapped_value
return y_binned
# infer dim from input
if z_dim is None:
if len(z.dims) != 1:
raise RuntimeError("if z_dim is not specified, x must be a 1D array.")
dim = z.dims[0]
else:
dim = z_dim
# if dataset is passed drop all data_vars that dont contain dim
if isinstance(data, xr.Dataset):
raise ValueError("Dataset input is not supported yet")
# TODO: for a dataset input just apply the function for each appropriate array
if version.parse(xr.__version__) > version.parse("0.15.0"):
kwargs = dict(
input_core_dims=[[dim], [dim], [z_regridded_dim]],
output_core_dims=[[output_dim]],
vectorize=True,
dask="parallelized",
output_dtypes=[data.dtype],
dask_gufunc_kwargs={'output_sizes': {output_dim: len(z_regridded[z_regridded_dim])}},
)
else:
kwargs = dict(
input_core_dims=[[dim], [dim], [z_regridded_dim]],
output_core_dims=[[output_dim]],
vectorize=True,
dask="parallelized",
output_dtypes=[data.dtype],
output_sizes={output_dim: len(z_regridded[z_regridded_dim])},
)
remapped = xr.apply_ufunc(_subsample_bins, z, data, z_regridded, **kwargs)
remapped.coords[output_dim] = z_regridded.rename({z_regridded_dim: output_dim}).coords[output_dim]
return remapped
class TopoFetcher():
""" Fetch topographic data through an ERDDAP server for an ocean rectangle
Example:
>>> from argopy import TopoFetcher
>>> box = [-75, -45, 20, 30] # Lon_min, lon_max, lat_min, lat_max
>>> ds = TopoFetcher(box).to_xarray()
>>> ds = TopoFetcher(box, ds='gebco', stride=[10, 10], cache=True).to_xarray()
"""
class ERDDAP():
def __init__(self, server: str, protocol: str = 'tabledap'):
self.server = server
self.protocol = protocol
self.response = 'nc'
self.dataset_id = ''
self.constraints = ''
def __init__(
self,
box: list,
ds: str = "gebco",
cache: bool = False,
cachedir: str = "",
api_timeout: int = 0,
stride: list = [1, 1],
**kwargs,
):
""" Instantiate an ERDDAP topo data fetcher
Parameters
----------
ds: str (optional), default: 'gebco'
Dataset to load:
- 'gebco' will load the GEBCO_2020 Grid, a continuous terrain model for oceans and land at 15 arc-second intervals
stride: list, default [1, 1]
Strides along longitude and latitude. This allows to change the output resolution
cache: bool (optional)
Cache data or not (default: False)
cachedir: str (optional)
Path to cache folder
api_timeout: int (optional)
Erddap request time out in seconds. Set to OPTIONS['api_timeout'] by default.
"""
timeout = OPTIONS["api_timeout"] if api_timeout == 0 else api_timeout
self.fs = httpstore(cache=cache, cachedir=cachedir, timeout=timeout, size_policy='head')
self.definition = "Erddap topographic data fetcher"
self.BOX = box
self.stride = stride
if ds == "gebco":
self.definition = "NOAA erddap gebco data fetcher for a space region"
self.server = 'https://coastwatch.pfeg.noaa.gov/erddap'
self.server_name = 'NOAA'
self.dataset_id = 'gebco'
self._init_erddap()
def _init_erddap(self):
# Init erddap
self.erddap = self.ERDDAP(server=self.server, protocol="griddap")
self.erddap.response = (
"nc"
)
if self.dataset_id == "gebco":
self.erddap.dataset_id = "GEBCO_2020"
else:
raise ValueError(
"Invalid database short name for %s erddap" % self.server_name
)
return self
def _cname(self) -> str:
""" Fetcher one line string definition helper """
cname = "?"
if hasattr(self, "BOX"):
BOX = self.BOX
cname = ("[x=%0.2f/%0.2f; y=%0.2f/%0.2f]") % (
BOX[0],
BOX[1],
BOX[2],
BOX[3],
)
return cname
def __repr__(self):
summary = ["<topofetcher.erddap>"]
summary.append("Name: %s" % self.definition)
summary.append("API: %s" % self.server)
summary.append("Domain: %s" % format_oneline(self.cname()))
return "\n".join(summary)
def cname(self):
""" Return a unique string defining the constraints """
return self._cname()
@property
def cachepath(self):
""" Return path to cached file(s) for this request
Returns
-------
list(str)
"""
return [self.fs.cachepath(uri) for uri in self.uri]
def define_constraints(self):
""" Define request constraints """
# Eg: https://coastwatch.pfeg.noaa.gov/erddap/griddap/GEBCO_2020.nc?elevation%5B(34):5:(42)%5D%5B(-21):7:(-12)%5D
self.erddap.constraints = "%s(%0.2f):%i:(%0.2f)%s%s(%0.2f):%i:(%0.2f)%s" % (
"%5B", self.BOX[2], self.stride[1], self.BOX[3], "%5D",
"%5B", self.BOX[0], self.stride[0], self.BOX[1], "%5D")
return None
# @property
# def _minimal_vlist(self):
# """ Return the minimal list of variables to retrieve """
# vlist = list()
# vlist.append("latitude")
# vlist.append("longitude")
# vlist.append("elevation")
# return vlist
def get_url(self):
""" Return the URL to download data requested
Returns
-------
str
"""
# First part of the URL:
protocol = self.erddap.protocol
dataset_id = self.erddap.dataset_id
response = self.erddap.response
url = f"{self.erddap.server}/{protocol}/{dataset_id}.{response}?"
# Add variables to retrieve:
variables = ["elevation"]
variables = ",".join(variables)
url += f"{variables}"
# Add constraints:
self.define_constraints() # Define constraint to select this box of data (affect self.erddap.constraints)
url += f"{self.erddap.constraints}"
return url
@property
def uri(self):
""" List of files to load for a request
Returns
-------
list(str)
"""
return [self.get_url()]
def to_xarray(self, errors: str = 'ignore'):
""" Load Topographic data and return a xarray.DataSet """
# Download data
if len(self.uri) == 1:
ds = self.fs.open_dataset(self.uri[0])
return ds
def load(self, errors: str = 'ignore'):
""" Load Topographic data and return a xarray.DataSet """
return self.to_xarray(errors=errors)
|
test_concurrency.py | import random
import threading
import time
from sqlalchemy import Column
from sqlalchemy import exc
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import clear_mappers
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.testing import fixtures
class ConcurrentUseDeclMappingTest(fixtures.TestBase):
def teardown(self):
clear_mappers()
@classmethod
def make_a(cls, Base):
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
data = Column(String)
bs = relationship("B")
# need a strong ref so that the class is not gc'ed
cls.A = A
@classmethod
def query_a(cls, Base, result):
s = Session()
time.sleep(random.random() / 100)
A = cls.A
try:
s.query(A).join(A.bs)
except orm_exc.UnmappedClassError as oe:
# this is the failure mode, where B is being handled by
# declarative and is in the registry but not mapped yet.
result[0] = oe
except exc.InvalidRequestError as err:
# if make_b() starts too slowly, we can reach here, because
# B isn't in the registry yet. We can't guard against this
# case in the library because a class can refer to a name that
# doesn't exist and that has to raise.
result[0] = True
else:
# no conflict
result[0] = True
@classmethod
def make_b(cls, Base):
class B(Base):
__tablename__ = "b"
id = Column(Integer, primary_key=True)
@declared_attr
def data(cls):
time.sleep(0.001)
return Column(String)
a_id = Column(ForeignKey("a.id"))
cls.B = B
def test_concurrent_create(self):
for i in range(50):
Base = declarative_base()
clear_mappers()
self.make_a(Base)
result = [False]
threads = [
threading.Thread(target=self.make_b, args=(Base,)),
threading.Thread(target=self.query_a, args=(Base, result)),
]
for t in threads:
t.start()
for t in threads:
t.join()
if isinstance(result[0], orm_exc.UnmappedClassError):
raise result[0]
|
main.py | #!venv/Scripts/python.exe
import discord
from discord import permissions
from discord import webhook
from discord import channel
from discord.ext.commands.core import check
from dotenv import load_dotenv
import mysql.connector
from discord.ext import commands
import os
from discord.ext.commands import errors
import random as ra
from datetime import datetime
import re
import asyncio
import copypasta
import opencv
from Network import get_html
from threading import Thread
from strings import get_string
from strings import reload_lang
import youtube_dl
#region init
insulti = []
langs = {}
url_pattern = r'(http|https)://.*'
youtube_url = r'(http|https)://(www.youtube.com|youtu.be)/.*'
emoji_patterns = r'^<a:[a-zA-Z0-9_-]+:[0-9]+>$'
load_dotenv()
DATABASE_PASSWORD = os.environ.get('DB_PASS')
bot = commands.Bot(command_prefix='$')
TOKEN = os.environ.get('TOKEN')
creator_id = os.environ.get("CREATORE")
bot.remove_command('help')
#endregion
# region Funzioni
def use_database(command, fetch=False, commit=False):
_ = None
conn = mysql.connector.connect(
host='remotemysql.com',
user='4IMMhUUnvb',
password=DATABASE_PASSWORD,
database='4IMMhUUnvb')
c = conn.cursor()
c.execute(command)
if fetch:
_ = c.fetchall()
if commit:
conn.commit()
conn.close()
return _
def rigenera_insulti():
global insulti
_ = use_database('SELECT * FROM insulti', fetch=True)
for i in _:
insulti.append(i[1])
del _
rigenera_insulti()
def get_name(ctx):
name = ctx.author.nick or ctx.author.name
return name
async def check_admin(ctx):
if not ctx.message.author.guild_permissions.administrator and ctx.message.author.id != int(creator_id):
await ctx.send(get_string(ctx, 'admin_error') + genera_insulto())
raise Exception("Comando admin da persone non admin!")
async def check_creator(ctx):
if ctx.message.author.id != int(creator_id):
await ctx.send(get_string(ctx, 'creator_error') + genera_insulto())
raise Exception("Comando creatore da persone non creatore!")
async def send_webhook(ctx, message, user, avatar):
wbhk = [x for x in await ctx.channel.webhooks()]
if ctx.channel.name not in set([x.name for x in set(wbhk)]):
whk = await ctx.channel.create_webhook(name=ctx.channel.name)
else:
whk = [x for x in set(wbhk)][0]
await whk.send(content=message, username=user, avatar_url=avatar)
def genera_insulto():
return insulti[ra.randint(0, len(insulti) - 1)]
risposte_dic = {
'hellothere': 'General Kenobi!',
'gigi': ('func','msgg = "IL MIO ACERRIMO NEMICO"'),
'nigga': 'Un po\' razzista ma ok',
'negro': 'Un po\' razzista ma ok',
'pepsiman': ['Pepsi Man!🍾', 'https://www.player.it/wp-content/uploads/2018/12/Pepsiman-il-videogioco.jpg', 'https://youtu.be/z54MpfR3XE4'],
'grazie':'Prego',
'flymetothemoon':'🚀🌑🌠',
'mussolini':['VIVA IL DVCE!✋', 'https://youtu.be/i4J4xSzpSuA'],
':nonni:':[':Nonni:', '^\n|', 'Epic Nonni fail'],
'rasputin':['https://youtu.be/WhPvJOnHotE', copypasta.RASPUTIN, copypasta.RASPUTIN2],
'🍷':copypasta.WINE,
'easports':copypasta.EA,
'obama':copypasta.OBAMA,
'ahegao':copypasta.AHEGAO,
'bitcoin':copypasta.BITCOIN,
'bruh':copypasta.BRUH,
'ciao':'https://tenor.com/view/culo-jodete-fuck-you-drunk-gif-10066511',
# Emoji
'🔫': '<:pistola:821669164107825174>',
# Emoji Animate
':love:': '<a:love:807947104164118558>',
':index:': '<a:index:807948759047733268>',
':ncry:': '<a:ncry:807989716011712532>',
':dance:': '<a:dance:807989758151360562>',
':pepelaugh:': '<a:pepelaugh:807990173282467840>',
':pepehype:': '<a:pepehype:807990347099537429>',
':pepesimp:': '<a:pepesimp:807990373167267870>',
':pepegacredit:': '<a:pepegacredit:807990388160987227>',
':ultrayaya:': '<a:ultrayaya:807990399155044373>',
':catjamdisco:': '<a:catjamdisco:808006353594482728>',
':cringepepepet:': '<a:cringepepepet:808006318359052378>',
':dogdance:': '<a:ultrayaya:808006262834724866>',
':frogroll:': '<a:frogroll:820979762977439744>',
':frogspeed:': '<a:frogspeed:835822136731762749>',
':frograinbow:': '<a:frograinbow:835822233740902412>',
':flushedwiggle:': '<a:flushedwiggle:836159484111486976>',
':pepehey:':'<a:pepehey:836159681915256842>',
':catmad:':'<a:catmad:836159843751690281>'
}
def switch_messaggi(msg):
# Piccola funzione molto utile
for key in risposte_dic.keys():
if msg.__contains__(key):
return risposte_dic[key]
return 404
#endregion
#region Sezione comandi bot
@bot.event
async def on_ready():
print('We have logged in as {0.user}'.format(bot))
for guild in bot.guilds:
print(f"Bot is being used in {guild.name} (id:{guild.id})")
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name="you"))
@bot.command()
async def test(ctx : discord.Message):
pass
@bot.command(aliases=['p', 'probability'])
async def probabilita(ctx, *, arg):
import random
await ctx.send(arg + get_string(ctx, 'probabilita') + str(random.randint(0, 100)) + '%')
@bot.command(aliases=['i'])
async def insulta(ctx, *, member: discord.Member):
# Prende un insulto dalla lista scaricata precedentemente dal database
await ctx.send(f'{member.mention} è un {genera_insulto().lower()}\n\n> -Messaggio cordialmente inviato da *{get_name(ctx)}*')
@bot.command()
async def warn(ctx, member: discord.Member, *, reason='no reason'):
await check_admin(ctx)
m = await ctx.send(f'{member.mention}' + get_string(ctx, 'warn') + f'{reason}')
await ctx.message.add_reaction('<:pepefedora:822422976796295198>')
data = datetime.now().strftime(r'%Y-%m-%d %H:%M:%S') # Formattazione ora
reason = reason.replace("'", "")
use_database(f"INSERT INTO fedina VALUES ({member.id}, '{reason}', '{data}')", commit=True)
@bot.command(aliases=['mi', 'show_infractions'])
async def mostra_infrazioni(ctx, *, member: discord.Member = None):
msg = ''
if not member:
member = ctx.author
infrazioni = use_database(f'SELECT reason, date FROM fedina WHERE user_id = {member.id}', True)
for i, infrazione in enumerate(infrazioni, 1):
msg += f'> {get_string(ctx, "infrazione")} {i}: `{infrazione[0]}` {get_string(ctx, "in_data")} `{infrazione[1]}`\n'
if len(infrazioni) == 0:
await ctx.send(f"{member.mention} {get_string(ctx, 'mai_infra')}")
await ctx.send(msg)
@bot.command(aliases=['pf', 'clean_infractions'])
async def pulisci_fedina(ctx, *, member: discord.Member):
await check_creator(ctx)
use_database(f"DELETE FROM fedina WHERE user_id = {member.id}", commit=True)
await ctx.send(f'{get_string(ctx, "fed_pen_di")} {member.mention} {get_string(ctx, "pulita_con_succ")}')
@bot.command(aliases=['sum'])
async def somma(ctx, a : float, b : float):
await ctx.send(f'{genera_insulto()}, non sai neanche fare {a} + {b} = {a + b}')
@bot.command(aliases=['divide'])
async def dividi(ctx, a : float, b : float):
await ctx.send(f'{genera_insulto()}, non sai neanche fare {a} / {b} = {a / b}')
@bot.command(aliases=['multiply'])
async def moltiplica(ctx, a : float, b : float):
await ctx.send(f'{genera_insulto()}, non sai neanche fare {a} * {b} = {a * b}')
@bot.command()
async def visualizza_lista_insulti(ctx):
await check_creator(ctx)
_ = use_database('SELECT * FROM insulti', True)
insulti = ''
for i in _:
insulti += '> ' + str(i[1]) + ' id:' + str(i[0]) + '\n'
em = discord.Embed(title='Lista insulti', description=insulti)
await ctx.send(embed=em)
@bot.command()
async def cancella_insulto_dalla_lista(ctx, num):
await check_creator(ctx)
use_database('DELETE FROM insulti WHERE id = ' + num, commit=True)
await ctx.send('Insulto id: ' + num + ' cancellato se esiste')
rigenera_insulti()
@bot.command(aliases=['ai'])
async def aggiungi_insulto(ctx, *, arg):
pattern = r'[a-zA-Z0-9 ]+'
if not re.match(pattern, arg):
await ctx.send('Formato insulto non supportato :(, sono accettate solo lettere e numeri')
return
use_database(f"INSERT INTO insulti VALUES (null, '{arg}')", commit=True)
await ctx.send("Insulto aggiunto!")
rigenera_insulti()
@bot.command()
async def kick(ctx, member : discord.Member, *, reason='no reason'):
await check_admin(ctx)
if not ctx.message.author.guild_permissions.kick_members:
await ctx.channel.send(f'{get_string(ctx, "kick_error")} ' + genera_insulto())
return
elif member.guild_permissions.administrator:
await ctx.channel.send(f'{get_string(ctx, "kick_amm")}')
return
else:
await member.kick(reason=reason)
@bot.command()
async def ban(ctx, member : discord.Member, *, reason='no reason'):
await check_admin(ctx)
if not ctx.message.author.guild_permissions.ban_members:
await ctx.channel.send(f'{get_string(ctx, "ban_error")} ' + genera_insulto())
return
elif member.guild_permissions.administrator:
await ctx.channel.send(get_string(ctx, "ban_amm"))
return
else:
await member.ban(reason=reason)
@bot.command()
async def clean(ctx, arg):
await check_admin(ctx)
def check_member(ctx, arg):
return ctx.author == arg
try:
# Se si vuole cancellare oltre 5000 messaggi no
if int(arg) > 5000:
if ctx.message.author.id != int(creator_id):
await ctx.channel.send(get_string(ctx, 'canc_errore'))
return
except:
try:
# Se e' un membro cancella messaggi suoi
converter = commands.MemberConverter()
member = await converter.convert(ctx, arg)
await ctx.channel.purge(check=lambda ctx:check_member(ctx, member))
except errors.MemberNotFound:
# Altrimenti cancella il numero indicato
await ctx.channel.purge(limit=int(arg))
m = await ctx.channel.send(f'{get_string(ctx, "costo")} {ra.randint(10, 200)}$')
await m.add_reaction('🧹')
await asyncio.sleep(4)
await m.delete()
@bot.command(aliases=['dice'])
async def dado(ctx):
await ctx.channel.send(get_string(ctx, 'dado'))
await asyncio.sleep(2)
await ctx.channel.send(ra.randint(1, 6))
@bot.command(aliases=['gm'])
async def gaymeter(ctx, member : discord.Member):
perc = ra.randint(0, 100)
BARRA = '█'
VUOTO = '░'
if str(member.id) == creator_id:
perc = 0
elif member.id == bot.user.id:
perc = 100
quanti = int(perc/10)
restanti = 10-quanti
await ctx.channel.send(f'{member.mention} {get_string(ctx, "gay")} {BARRA*quanti}{VUOTO*restanti} {perc}%\n')
@bot.command()
async def furrymeter(ctx, member : discord.Member):
perc = ra.randint(0, 100)
BARRA = '█'
VUOTO = '░'
if str(member.id) == creator_id:
perc = 0
quanti = int(perc/10)
restanti = 10-quanti
await ctx.channel.send(f'{member.mention} {get_string(ctx, "furry")} {BARRA*quanti}{VUOTO*restanti} {perc}%:cat:\n')
@bot.command()
async def coin(ctx):
num = ra.randint(1, 2)
coin = get_string(ctx, 'testa') if num == 1 else get_string(ctx, 'croce')
await ctx.channel.send(f"{get_string(ctx, 'uscito')}{coin}")
@bot.command()
async def modify_role(ctx, member : discord.Member, role_input : discord.Role, add_remove : bool):
await check_creator(ctx)
if add_remove:
await member.add_roles(role_input)
else:
await member.remove_roles(role_input)
await ctx.message.delete()
@bot.command(aliases=['grey', 'gray'])
async def grigio(ctx, member : discord.Member = None):
if not member:
member = ctx.author
file, filename = await opencv.grey(member)
await ctx.channel.send(file=file)
os.remove(filename)
@bot.command(aliases=['lines'])
async def linee(ctx, member : discord.Member = None):
if not member:
member = ctx.author
file, filename = await opencv.canny(member)
await ctx.channel.send(file=file)
os.remove(filename)
@bot.command()
async def buff(ctx, member : discord.Member = None):
if not member:
member = ctx.author
file, filename = await opencv.rock(member)
await ctx.channel.send(file=file)
os.remove(filename)
@bot.command(aliases=['pirate'])
async def pirata(ctx, member : discord.Member = None):
if not member:
member = ctx.author
file, filename = await opencv.pirate(member)
await ctx.channel.send(file=file)
os.remove(filename)
@bot.command(aliases=['inspire'])
async def ispira(ctx):
# Ottenimento link immagine e spedizione via embed di discord
html = get_html('https://inspirobot.me/api?generate=true')
em = discord.Embed()
em.set_image(url=html)
await ctx.channel.send(get_string(ctx, 'motivante'), embed=em)
@bot.command(aliases=['mc'])
async def morracinese(ctx, *,scelta : str = ...):
# Controllo di ogni scelta e vincita del bot
if scelta.strip().lower() == 'carta':
msg = 'Ho scelto forbici, ho vinto io'
elif scelta.strip().lower() == 'forbici' or scelta.strip().lower() == 'forbice':
msg = 'Ho scelto sasso, ho vinto io'
elif scelta.strip().lower() == 'sasso':
msg = 'Ho scelto carta, ho vinto io'
elif scelta.strip().lower() == ...:
msg = 'Siccome non hai messo niente ho vinto io'
else:
msg = 'Non ho riconosciuto una opzione valida, ho vinto io'
await ctx.channel.send(msg)
# Comando per scaricare avatar
@bot.command()
async def avatar(ctx, member : discord.Member):
em = discord.Embed(title=f'Avatar di {member.display_name}', description=f'''{get_string(ctx, 'scaricalo')} [64]({str(member.avatar_url).replace("?size=1024", "?size=64")})
| [128]({str(member.avatar_url).replace("?size=1024", "?size=128")})
| [256]({str(member.avatar_url).replace("?size=1024", "?size=256")})
| [512]({str(member.avatar_url).replace("?size=1024", "?size=512")})
| [1024]({str(member.avatar_url)})
| [2048]({str(member.avatar_url).replace("?size=1024", "?size=2048")})
| [4096]({str(member.avatar_url).replace("?size=1024", "?size=4096")})'''.replace('\n', ""))
em.set_image(url=str(member.avatar_url))
await ctx.channel.send(embed=em)
silenziati = []
[silenziati.append(int(x[0])) for x in use_database('SELECT * FROM silenziati', True)]
@bot.command()
async def mute(ctx, member : discord.Member):
global silenziati
if member.id in set(silenziati):
await ctx.channel.send(f'{member.display_name} {get_string(ctx, "gia_silenziato")}')
return
await check_admin(ctx)
# Inserimento persona dentro lista silenziati
Thread(target=lambda:use_database(f"INSERT INTO silenziati VALUES ('{member.id}')", commit=True)).start()
ROLE_NAME = 'Silenziato'
guild = ctx.guild
role = discord.utils.get(ctx.guild.roles, name=ROLE_NAME)
if not role:
perms = discord.Permissions(send_messages=False, speak=False)
role = await guild.create_role(name=ROLE_NAME, permissions=perms)
for channel in guild.channels:
await channel.set_permissions(role, speak=False, send_messages=False)
await member.add_roles(role)
silenziati.append(member.id)
await ctx.channel.send(f'{member.display_name} {get_string(ctx, "silenziato")}')
await ctx.message.add_reaction('<:evilpepe:837050861586087977>')
@bot.command()
async def unmute(ctx, member : discord.Member):
global silenziati
if member.id not in set(silenziati): # Toglimento persona dentro lista silenziati
await ctx.channel.send(f'{member.display_name} {get_string(ctx, "no_silenziato")}')
return
await check_admin(ctx)
# Togli la persona dal database
Thread(target=lambda:use_database(f"DELETE FROM silenziati WHERE user_id = '{member.id}'", commit=True)).start()
ROLE_NAME = 'Silenziato'
role = discord.utils.get(ctx.guild.roles, name=ROLE_NAME) # Ottenimento ruolo
del silenziati[silenziati.index(member.id)]
if role:
# Rimozione ruolo
await member.remove_roles(role)
if len(silenziati) == 0:
await role.delete()
await ctx.channel.send(f'{member.display_name} {get_string(ctx, "ricordato_parlare")}')
await ctx.message.add_reaction('<:feelsgrugman:837051421102047242>')
@bot.command(aliases=['burn'])
async def brucia(ctx, member : discord.Member = None):
if not member:
member = ctx.author
file, filename = await opencv.burn(member)
await ctx.channel.send(file=file)
os.remove(filename)
@bot.command(aliases=['scegli'])
async def choose(ctx, *, scelte : str = None):
lista_scelte = scelte.split(',') if scelte != None else []
if len(lista_scelte) <= 1 or all(x.strip() == lista_scelte[0] for x in lista_scelte): # se le scelte sono <= 1 allora non si ha vera scelta
await ctx.channel.send(f'{get_string(ctx, "no_scelta")} <:pepesad:806184708655808543>')
return
num = ra.randint(0, len(lista_scelte) - 1)
await ctx.channel.send(lista_scelte[num])
@bot.command(aliases=['impersonate'])
async def impersona(ctx, member, *, message):
await ctx.message.delete()
try:
member = await commands.MemberConverter().convert(ctx, member) # Se esiste un membro convertilo
nome = member.display_name # altrimenti usa come nome la stringa
avatar = member.avatar_url # e come avatar il default
except:
nome = member
avatar = None
await send_webhook(ctx, message, nome, avatar)
# Comando per cambiare le lingue
@bot.command()
async def lang(ctx : discord.Message, language : str):
# Controllo lingua selezionata e inserimento nel database dopo cancellamento
# con un thread per mandare il messaggio prima
if language == 'it':
Thread(target=lambda:[
use_database(f"DELETE FROM lang WHERE ch_id = {ctx.guild.id}", commit=True),
use_database(f"INSERT INTO lang VALUES({ctx.guild.id}, 'it')", commit=True),
reload_lang()]
).start()
await ctx.channel.send('Lingua messa in italiano!')
elif language == 'en':
Thread(target=lambda:[
use_database(f"DELETE FROM lang WHERE ch_id = {ctx.guild.id}", commit=True),
use_database(f"INSERT INTO lang VALUES({ctx.guild.id}, 'en')", commit=True),
reload_lang()]
).start()
await ctx.channel.send('Language set to english!')
elif language == 'OwO':
Thread(target=lambda:[
use_database(f"DELETE FROM lang WHERE ch_id = {ctx.guild.id}", commit=True),
use_database(f"INSERT INTO lang VALUES({ctx.guild.id}, 'OwO')", commit=True),
reload_lang()]
).start()
await ctx.channel.send('Language set to OwO!')
else:
await ctx.channel.send(get_string(ctx, 'no_ling'))
@bot.command(aliases=['vm', 'sm', 'show_muted'])
async def visualizza_mutati(ctx):
await check_admin(ctx)
msg = ''
if not set(silenziati):
msg = get_string(ctx, 'ness_silenziato')
else:
converter = commands.MemberConverter()
for user in set(silenziati):
member = await converter.convert(ctx, f'<@!{user}>')
msg += f'> {member.display_name}\n'
await ctx.channel.send(msg)
#region Musica
@bot.command()
async def join(ctx):
if ctx.author.voice is None:
await ctx.send(get_string(ctx, 'no_can_voc'))
canale = ctx.author.voice.channel
if ctx.voice_client is None:
await canale.connect()
elif ctx.voice_client:
return
else:
await ctx.voice_client_move_to(canale)
@bot.command()
async def disconnect(ctx):
await ctx.voice_client.disconnect()
@bot.command()
async def play(ctx, *, url):
# Si unisce al canale
await join(ctx)
# Ferma la canzone precedente (sarebbe da implementare la coda)
ctx.voice_client.stop()
# Opzione di FFMPEG
FFMPEG_OPTIONS = {'before_options' : '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5', 'options' : '-vn'}
# Opzione youtube_dl
YDL_OPTIONS = {'format': 'worstaudio'}
vc = ctx.voice_client
with youtube_dl.YoutubeDL(YDL_OPTIONS) as ydl:
# Se e' un link allora lo fa partire cosi'
if re.fullmatch(url_pattern, url):
info = ydl.extract_info(url, download=False)
url2 = info['formats'][0]['url']
title = info['title']
else:
# Altrimenti cerca su youtube la canzone
info = ydl.extract_info(f'ytsearch:{url}', download=False)
url2 = info['entries'][0]['formats'][0]['url']
title = info['entries'][0]['title']
# Source e' il link dell'audio estratto
source = await discord.FFmpegOpusAudio.from_probe(url2, **FFMPEG_OPTIONS)
vc.play(source)
await ctx.send(get_string(ctx, 'now_playing') + title)
@bot.command()
async def pause(ctx):
# Mette in pausa la riproduzione di una canzone
ctx.voice_client.pause()
await ctx.send(get_string(ctx, 'pausa'))
@bot.command()
async def resume(ctx):
# Ricomincia a riprodurrre l'audio dopo un pause
ctx.voice_client.resume()
await ctx.send(get_string(ctx, 'riprendi'))
@bot.command()
async def stop(ctx):
# Smette di riprodurre l'audio
ctx.voice_client.stop()
#endregion
#endregion
#region Sezione intercettazione messaggi
@bot.event
async def on_message(message: discord.Message):
# Controlla se il messaggio e' stato inviato dal bot
if message.author == bot.user or message.webhook_id:
return
# Per quelli silenziati
if message.author.id in set(silenziati) and str(message.author.id) != creator_id:
await message.delete()
return
msg = message.content.replace(' ', '').lower()
messaggio = switch_messaggi(msg)
# Se messaggio ritorna un valore
if messaggio != 404:
# Se e' una lista manda tutte le cose nella lista
if isinstance(messaggio, list):
for m in messaggio:
# Se e' un URL manda un immagine
if re.match(url_pattern, m) and not re.match(youtube_url, m):
e = discord.Embed()
e.set_image(url=m)
await message.channel.send(embed=e)
else:
await message.channel.send(m)
# Se contiene func allora esegui la funzione e scrivi il messaggio msgg
elif messaggio[0] == 'func':
loc = {}
exec(messaggio[1], globals(), loc)
if loc['msgg'].__contains__('NON SI BESTEMMIA') or loc['msgg'].__contains__('IL MIO ACERRIMO NEMICO') and message.channel.guild.id == 829765996771803157:
return
await message.channel.send(loc['msgg'])
# Se contiene embed allora manda un embed del messaggio dopo aver eseguito il codice
elif messaggio[0] == 'embed':
loc = {}
exec(messaggio[1], globals(), loc)
await message.channel.send(embed=loc['msgg'])
# Se e' una emoji allora sostituisci e manda
elif re.match(emoji_patterns, messaggio):
author = message.author
try:
await message.delete()
except:
pass
await send_webhook(message, messaggio, author.display_name, author.avatar_url)
# Altrimenti manda il messaggio e basta
else:
await message.channel.send(messaggio)
await bot.process_commands(message) # Vai alla parte comandi dopo aver controllato
#endregion
#region Error handler
@somma.error
@dividi.error
@moltiplica.error
@clean.error
@lang.error
async def somma_error(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("Hai messo tutti i parametri? :thinking:")
@warn.error
@mostra_infrazioni.error
@pulisci_fedina.error
@insulta.error
@kick.error
@gaymeter.error
@grigio.error
@linee.error
@buff.error
@avatar.error
@pirata.error
@brucia.error
@mute.error
@unmute.error
async def membro_non_trovato(ctx, error):
if isinstance(error, commands.MemberNotFound):
await ctx.send('Persona non trovata! Ma sei ' + genera_insulto() + '?')
elif isinstance(error, commands.MissingRequiredArgument):
await ctx.send('Devi indicare una pesona su cui eseguire questo comando ' + genera_insulto() + '!')
else:
print(error)
@cancella_insulto_dalla_lista.error
async def cosa_non_trovata(ctx, error):
if isinstance(error, commands.CommandInvokeError):
await ctx.send('L\' id deve essere un numero ' + genera_insulto().lower() + '!')
#endregion
#region help
from bonus import Help, Tris
bot.add_cog(Help(bot, risposte_dic))
bot.add_cog(Tris(bot))
#endregion
bot.run(TOKEN)
|
ServerSide.py | import socket
import threading
def removeTokenandSendMessage(encryptedmsg):
msg = encryptedmsg[0:-1]
for i in range(0, len(ipA)):
server.sendto(msg.encode(character), (ipA[i], ipB[i]))
def serverSide():
global ipA, ipB
while True:
while True:
try:
msgBytes, clientIP = server.recvfrom(BUFFSIZE) # Receber mensagem e IP
break
except:
pass
msgAnswer = msgBytes.decode(character)
token = msgAnswer[-1]
if token == '0':
ipA.append(clientIP[0])
ipB.append(clientIP[1]) # Adicionar os IPs as listas
removeTokenandSendMessage(msgAnswer)
else:
removeTokenandSendMessage(msgAnswer)
if token == '2':
for i in range(0, len(ipA)):
if ipA[i] == clientIP[0] and ipB[i] == clientIP[1]:
del ipA[i]
del ipB[i] # Deletar IP do usuário da lista
print('Usuário removido com sucesso')
break
print(msgAnswer)
ipA = []
ipB = []
BUFFSIZE = 16384
HOST = ''
PORT = 12000
ADDR = (HOST, PORT)
character = "utf-8"
server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server.bind(ADDR)
print('Aguardando conexões...')
ServerSideThread = threading.Thread(target=serverSide())
ServerSideThread.start()
ServerSideThread.join()
|
vesper_recorder.py | """Module containing the `VesperRecorder` class."""
from http.server import HTTPServer, BaseHTTPRequestHandler
from logging import FileHandler, Formatter
from threading import Thread
import datetime
import logging
import math
import os
import wave
import pyaudio
import pytz
from vesper.util.audio_recorder import AudioRecorder, AudioRecorderListener
from vesper.util.bunch import Bunch
from vesper.util.schedule import Schedule
import vesper.util.yaml_utils as yaml_utils
# TODO: Review threads involved in recording (schedule, recorder, and server),
# clarify their responsibilities, and improve error handling and shutdown.
# Implement configuration updates and remote logging and control. How does
# recording relate to Vesper job system (as of this writing it's completely
# independent, but perhaps it should not be)? How does it relate to other
# processing, like detection and classification, that we would like to be
# able to schedule?
_HOME_DIR_VAR_NAME = 'VESPER_RECORDER_HOME'
_LOG_FILE_NAME = 'Vesper Recorder Log.txt'
_CONFIG_FILE_NAME = 'Vesper Recorder Config.yaml'
_AUDIO_FILE_NAME_EXTENSION = '.wav'
_AUDIO_FILE_HEADER_SIZE = 44 # bytes, size of .wav file header
_DEFAULT_STATION_NAME = 'Vesper'
_DEFAULT_LATITUDE = None
_DEFAULT_LONGITUDE = None
_DEFAULT_TIME_ZONE = 'UTC'
_DEFAULT_NUM_CHANNELS = 1
_DEFAULT_SAMPLE_RATE = 22050
_DEFAULT_BUFFER_SIZE = .05
_DEFAULT_TOTAL_BUFFER_SIZE = 60
_DEFAULT_RECORDINGS_DIR_PATH = 'Recordings'
_DEFAULT_MAX_AUDIO_FILE_SIZE = 2**31 # bytes
_DEFAULT_PORT_NUM = 8001
_logger = logging.getLogger(__name__)
class VesperRecorder:
"""Records audio to .wav files according to a schedule."""
VERSION_NUMBER = '0.2.0a0'
@staticmethod
def get_input_devices():
return AudioRecorder.get_input_devices()
@staticmethod
def create_and_start_recorder(message):
return _create_and_start_recorder(message)
def __init__(self, config):
self._config = config
def start(self):
c = self._config
self._recorder = AudioRecorder(
c.input_device_index, c.num_channels, c.sample_rate, c.buffer_size,
c.total_buffer_size, c.schedule)
self._recorder.add_listener(_Logger())
self._recorder.add_listener(_AudioFileWriter(
c.station_name, c.recordings_dir_path, c.max_audio_file_size))
server = _HttpServer(
c.port_num, c.station_name, c.lat, c.lon, c.time_zone,
self._recorder, c.recordings_dir_path, c.max_audio_file_size)
Thread(target=server.serve_forever, daemon=True).start()
self._recorder.start()
def wait(self, timeout=None):
self._recorder.wait(timeout)
def stop(self):
self._recorder.stop()
def _create_and_start_recorder(message):
home_dir_path = os.environ.get(_HOME_DIR_VAR_NAME)
# Check that home directory path environment variable is set.
if home_dir_path is None:
_logger.error(
'Required {} environment variable is not set.'.format(
_HOME_DIR_VAR_NAME))
return None
# Check that home directory exists.
if not os.path.exists(home_dir_path):
_logger.error(
'Recorder home directory "{}" does not exist.'.format(
home_dir_path))
return None
# Now that we know that we have a home directory, and hence a place
# for a log file, add file logging.
_add_file_logging(home_dir_path)
_logger.info(message)
_logger.info(
'Recorder version number is {}.'.format(VesperRecorder.VERSION_NUMBER))
config_file_path = os.path.join(home_dir_path, _CONFIG_FILE_NAME)
# Check that configuration file exists.
if not os.path.exists(config_file_path):
_logger.error(
'Recorder configuration file "{}" does not exist.'.format(
config_file_path))
return None
# Parse configuration file.
try:
config = _parse_config_file(
config_file_path, home_dir_path)
except Exception as e:
_logger.error((
'Could not parse recorder configuration file "{}". Error '
'message was: {}').format(config_file_path, str(e)))
return None
_logger.info(
'Starting recorder with HTTP server at port {}.'.format(
config.port_num))
# Create recorder.
try:
recorder = VesperRecorder(config)
except Exception as e:
_logger.error(
'Could not create recorder. Error message was: {}'.format(str(e)))
return None
# Start recorder.
try:
recorder.start()
except Exception as e:
_logger.error(
'Could not start recorder. Error message was: {}'.format(str(e)))
return None
# Phew. We made it!
return recorder
def _add_file_logging(home_dir_path):
# Create handler that appends messages to log file.
log_file_path = os.path.join(home_dir_path, _LOG_FILE_NAME)
handler = FileHandler(log_file_path)
formatter = Formatter('%(asctime)s %(name)s %(levelname)s %(message)s')
handler.setFormatter(formatter)
# Add handler to root logger.
logger = logging.getLogger()
logger.addHandler(handler)
def _parse_config_file(file_path, home_dir_path):
with open(file_path) as f:
config = yaml_utils.load(f)
station_name = config.get('station', _DEFAULT_STATION_NAME)
lat = config.get('latitude', _DEFAULT_LATITUDE)
if lat is not None:
lat = float(lat)
lon = config.get('longitude', _DEFAULT_LONGITUDE)
if lon is not None:
lon = float(lon)
time_zone = pytz.timezone(config.get('time_zone', _DEFAULT_TIME_ZONE))
input_device_index = _get_input_device_index(config.get('input_device'))
num_channels = int(config.get('num_channels', _DEFAULT_NUM_CHANNELS))
sample_rate = int(config.get('sample_rate', _DEFAULT_SAMPLE_RATE))
buffer_size = float(config.get('buffer_size', _DEFAULT_BUFFER_SIZE))
total_buffer_size = \
float(config.get('total_buffer_size', _DEFAULT_TOTAL_BUFFER_SIZE))
schedule_dict = config.get('schedule', {})
schedule = Schedule.compile_dict(
schedule_dict, latitude=lat, longitude=lon, time_zone=time_zone)
recordings_dir_path = config.get(
'recordings_dir_path', _DEFAULT_RECORDINGS_DIR_PATH)
if not os.path.isabs(recordings_dir_path):
recordings_dir_path = os.path.join(home_dir_path, recordings_dir_path)
max_audio_file_size = config.get(
'max_audio_file_size', _DEFAULT_MAX_AUDIO_FILE_SIZE)
port_num = int(config.get('port_num', _DEFAULT_PORT_NUM))
return Bunch(
station_name=station_name,
lat=lat,
lon=lon,
time_zone=time_zone,
input_device_index=input_device_index,
num_channels=num_channels,
sample_rate=sample_rate,
buffer_size=buffer_size,
total_buffer_size=total_buffer_size,
schedule=schedule,
recordings_dir_path=recordings_dir_path,
max_audio_file_size=max_audio_file_size,
port_num=port_num)
def _get_input_device_index(device):
if device is None:
return _get_default_input_device_index()
else:
try:
return int(device)
except ValueError:
return _get_input_device_index_from_device_name(device)
def _get_default_input_device_index():
pa = pyaudio.PyAudio()
try:
info = pa.get_default_input_device_info()
except IOError:
raise ValueError('Could not get default input device info.')
finally:
pa.terminate()
return info['index']
def _get_input_device_index_from_device_name(name):
pa = pyaudio.PyAudio()
# Get all device infos.
num_devices = pa.get_device_count()
infos = [pa.get_device_info_by_index(i) for i in range(num_devices)]
pa.terminate()
# Remove non-input device infos.
infos = [i for i in infos if i['maxInputChannels'] != 0]
if len(infos) == 0:
raise ValueError('No input devices were found.')
# Find infos for devices whose names include `name`.
infos = [i for i in infos if name in i['name']]
if len(infos) == 0:
raise ValueError(
'No input device name includes "{}".'.format(name))
elif len(infos) > 1:
raise ValueError(
'More than one input device name includes "{}".'.format(name))
else:
return infos[0]['index']
class _Logger(AudioRecorderListener):
def __init__(self):
super().__init__()
self._pyaudio_overflow_buffer_count = 0
self._num_recorder_overflow_frames = 0
def recording_started(self, recorder, time):
self._sample_rate = recorder.sample_rate
_logger.info('Started recording.')
def input_arrived(
self, recorder, time, samples, num_frames, pyaudio_overflow):
self._log_pyaudio_overflow_if_needed(pyaudio_overflow)
self._log_recorder_overflow_if_needed(False)
def _log_pyaudio_overflow_if_needed(self, overflow):
if overflow:
if self._pyaudio_overflow_buffer_count == 0:
# overflow has just started
_logger.error(
'PyAudio input overflow: PyAudio has reported that '
'an unspecified number of input samples were dropped '
'before or during the current buffer. A second message '
'will be logged later indicating the number of '
'consecutive buffers for which this error occurred.')
self._pyaudio_overflow_buffer_count += 1
else:
if self._pyaudio_overflow_buffer_count > 0:
# overflow has just ended
if self._pyaudio_overflow_buffer_count == 1:
_logger.error(
'PyAudio input overflow: Overflow was reported for '
'one buffer.')
else:
_logger.error((
'PyAudio input overflow: Overflow was reported for '
'{} consecutive buffers.').format(
self._pyaudio_overflow_buffer_count))
self._pyaudio_overflow_buffer_count = 0
def _log_recorder_overflow_if_needed(self, overflow, num_frames=0):
if overflow:
if self._num_recorder_overflow_frames == 0:
# overflow has just started
_logger.error(
'Recorder input overflow: The recorder has run out of '
'buffers for arriving input samples. It will substitute '
'zero samples until buffers become available, and then '
'log another message to report the duration of the lost '
'samples.')
self._num_recorder_overflow_frames += num_frames
else:
if self._num_recorder_overflow_frames > 0:
# overflow has just ended
_logger.error((
'Recorder input overflow: {:.3f} seconds of zero samples '
'were substituted for lost input samples.').format(
self._num_recorder_overflow_frames / self._sample_rate)
)
self._num_recorder_overflow_frames = 0
def input_overflowed(self, recorder, time, num_frames, pyaudio_overflow):
self._log_pyaudio_overflow_if_needed(pyaudio_overflow)
self._log_recorder_overflow_if_needed(True, num_frames)
def recording_stopped(self, recorder, time):
self._log_pyaudio_overflow_if_needed(False)
self._log_recorder_overflow_if_needed(False)
_logger.info('Stopped recording.')
class _AudioFileWriter(AudioRecorderListener):
def __init__(self, station_name, recordings_dir_path, max_file_size):
super().__init__()
self._station_name = station_name
self._recordings_dir_path = recordings_dir_path
self._max_file_size = max_file_size
# Create recordings directory if needed.
os.makedirs(self._recordings_dir_path, exist_ok=True)
def recording_starting(self, recorder, time):
self._num_channels = recorder.num_channels
self._sample_rate = recorder.sample_rate
self._sample_size = recorder.sample_size
self._frame_size = self._num_channels * self._sample_size
self._zeros = bytearray(recorder.frames_per_buffer * self._frame_size)
max_num_audio_bytes = self._max_file_size - _AUDIO_FILE_HEADER_SIZE
self._max_num_file_frames = \
int(math.floor(max_num_audio_bytes / self._frame_size))
self._file_namer = _AudioFileNamer(
self._station_name, _AUDIO_FILE_NAME_EXTENSION)
self._file = None
def input_arrived(
self, recorder, time, samples, num_frames, pyaudio_overflow):
self._write_samples(time, samples, num_frames)
def _write_samples(self, time, samples, num_frames):
num_frames_remaining = num_frames
buffer_index = 0
while num_frames_remaining != 0:
if self._file is None:
self._file = self._open_audio_file(time)
self._num_file_frames = 0
num_frames = min(
num_frames_remaining,
self._max_num_file_frames - self._num_file_frames)
num_bytes = num_frames * self._frame_size
# TODO: We assume here that the sample bytes are in
# little-endian order, but perhaps we shouldn't.
self._file.writeframes(
samples[buffer_index:buffer_index + num_bytes])
num_frames_remaining -= num_frames
self._num_file_frames += num_frames
buffer_index += num_bytes
if self._num_file_frames == self._max_num_file_frames:
self._file.close()
self._file = None
def input_overflowed(self, recorder, time, num_frames, pyaudio_overflow):
self._write_samples(time, self._zeros, num_frames)
def _open_audio_file(self, time):
file_name = self._file_namer.create_file_name(time)
file_path = os.path.join(self._recordings_dir_path, file_name)
file_ = wave.open(file_path, 'wb')
file_.setnchannels(self._num_channels)
file_.setframerate(self._sample_rate)
file_.setsampwidth(self._sample_size)
return file_
def recording_stopped(self, recorder, time):
if self._file is not None:
self._file.close()
class _AudioFileNamer:
def __init__(self, station_name, file_name_extension):
self.station_name = station_name
self.file_name_extension = file_name_extension
def create_file_name(self, start_time):
time = start_time.strftime('%Y-%m-%d_%H.%M.%S')
return '{}_{}_Z{}'.format(
self.station_name, time, self.file_name_extension)
class _HttpServer(HTTPServer):
def __init__(
self, port_num, station_name, lat, lon, time_zone, recorder,
recordings_dir_path, max_audio_file_size):
address = ('', port_num)
super().__init__(address, _HttpRequestHandler)
self._recording_data = Bunch(
station_name=station_name,
lat=lat,
lon=lon,
time_zone=time_zone,
recorder=recorder,
recordings_dir_path=recordings_dir_path,
max_audio_file_size=max_audio_file_size
)
_PAGE = '''<!DOCTYPE html>
<html>
<head>
<title>Vesper Recorder</title>
{}
</head>
<body>
<h1>Vesper Recorder {}</h1>
<p>
Welcome to the Vesper Recorder! This page displays information regarding
your recorder. Refresh the page to update the information.
</p>
<h2>Recording Status</h2>
{}
<h2>Station Configuration</h2>
{}
<h2>Input Devices</h2>
{}
<h2>Input Configuration</h2>
{}
<h2>Output Configuration</h2>
{}
<h2>Scheduled Recordings</h2>
{}
</body>
</html>
'''
_CSS = '''
<style>
h2 {
margin-top: 30px;
margin-bottom: 5px;
}
table {
border-collapse: collapse;
width: 600px;
}
td, th {
border: 1px solid #a0a0a0;
text-align: left;
padding: 8px;
}
tr:nth-child(even) {
background-color: #d0d0d0;
}
</style>
'''
class _HttpRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
if self.path == '/':
body = self._create_status_page_body()
self.send_response(200, 'OK')
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(body)
else:
self.send_response(404, 'Not Found')
self.end_headers()
def _create_status_page_body(self):
data = self.server._recording_data
recorder = data.recorder
now = datetime.datetime.now(tz=pytz.utc)
status_table = self._create_status_table(data, recorder, now)
station_table = self._create_station_table(data)
devices = recorder.get_input_devices()
devices_table = self._create_devices_table(devices)
input_table = self._create_input_table(devices, recorder)
output_table = self._create_output_table(data)
recordings_table = self._create_recordings_table(
recorder.schedule, data.time_zone, now)
body = _PAGE.format(
_CSS, VesperRecorder.VERSION_NUMBER, status_table, station_table,
devices_table, input_table, output_table, recordings_table)
return body.encode()
def _create_status_table(self, data, recorder, now):
time_zone = data.time_zone
time = _format_datetime(now, time_zone)
recording = 'Yes' if recorder.recording else 'No'
interval = self._get_status_schedule_interval(recorder.schedule, now)
if interval is None:
prefix = 'Next'
start_time = 'None'
end_time = 'None'
else:
start_time = _format_datetime(interval.start, time_zone)
end_time = _format_datetime(interval.end, time_zone)
prefix = 'Current' if interval.start <= now else 'Next'
rows = (
('Time', time),
('Recording', recording),
(prefix + ' Recording Start Time', start_time),
(prefix + ' Recording End Time', end_time)
)
return _create_table(rows)
def _get_status_schedule_interval(self, schedule, time):
intervals = schedule.get_intervals(start=time)
try:
return next(intervals)
except StopIteration:
return None
def _create_station_table(self, data):
rows = (
('Station Name', data.station_name),
('Latitude (degrees north)', data.lat),
('Longitude (degrees east)', data.lon),
('Time Zone', str(data.time_zone)))
return _create_table(rows)
def _create_devices_table(self, devices):
if len(devices) == 0:
return '<p>No input devices were found.</p>'
else:
recorder = self.server._recording_data.recorder
selected_device_index = recorder.input_device_index
rows = [
self._create_devices_table_row(d, selected_device_index)
for d in devices]
header = ('Index', 'Name', 'Number of Channels')
table = _create_table(rows, header)
if selected_device_index < len(devices):
table += '<p>* Selected input device.</p>'
return table
def _create_devices_table_row(self, device, selected_device_index):
prefix = '*' if device.index == selected_device_index else ''
return (
prefix + str(device.index), device.name, device.num_input_channels)
def _create_input_table(self, devices, recorder):
device_index = recorder.input_device_index
if device_index < len(devices):
device_name = devices[device_index].name
else:
message = 'There is no input device with index {}.'
device_name = message.format(device_index)
rows = (
('Device Index', device_index),
('Device Name', device_name),
('Number of Channels', recorder.num_channels),
('Sample Rate (Hz)', recorder.sample_rate),
('Buffer Size (seconds)', recorder.buffer_size)
)
return _create_table(rows)
def _create_output_table(self, data):
recordings_dir_path = os.path.abspath(data.recordings_dir_path)
rows = (
('Recordings Directory', recordings_dir_path),
('Max Audio File Size (bytes)', data.max_audio_file_size)
)
return _create_table(rows)
def _create_recordings_table(self, schedule, time_zone, now):
rows = [
self._create_recordings_table_row(index, interval, time_zone, now)
for index, interval in enumerate(schedule.get_intervals())]
header = ('Index', 'Start Time', 'End Time', 'Status')
return _create_table(rows, header)
def _create_recordings_table_row(self, index, interval, time_zone, now):
start_time = _format_datetime(interval.start, time_zone)
end_time = _format_datetime(interval.end, time_zone)
if now > interval.end:
status = 'Past'
elif now < interval.start:
status = 'Future'
else:
status = 'Current'
return (index, start_time, end_time, status)
def _format_datetime(dt, time_zone=None):
if time_zone is not None:
dt = dt.astimezone(time_zone)
return dt.strftime('%Y-%m-%d %H:%M:%S %Z')
def _create_table(rows, header=None):
header = _create_table_header(header)
rows = ''.join(_create_table_row(r) for r in rows)
return '<table>\n' + header + rows + '</table>\n'
def _create_table_header(items):
return _create_table_row(items, 'h') if items is not None else ''
def _create_table_row(items, tag_letter='d'):
items = ''.join(_create_table_item(i, tag_letter) for i in items)
return ' <tr>\n' + items + ' </tr>\n'
def _create_table_item(item, tag_letter):
return ' <t{}>{}</t{}>\n'.format(tag_letter, item, tag_letter)
|
abi_thread.py | #importing threading module
#to create threads
#threads are light weight process used for multi tasking
import threading
def division(a,b):
print("Normal as usual division:")
print(a/b)
def floordiv(a,b):
print("Floor rounded off division:")
print(a//b)
print("Enter two numbers:")
print("Number 1:")
num1=int(input())
print("Number 2:")
num2=int(input())
#creating 2 threads target is the function assigned for that specific thread and it's arguments are given
s1 = threading.Thread(target=division, args=(num1,num2))
s2 = threading.Thread(target=floordiv, args=(num1,num2))
#starting the execution of threads simultaneously
s1.start()
s2.start()
#wait until s1 completes its task
s1.join()
#wait until s2 completes its task
s2.join()
|
PlayGame.py | import pygame, numpy, threading, timeit
from .SceneBase import SceneBase
from .DrawingUtils import *
from models.game import Game, Board, Move, TimeLimitedBot
from services import ImageService, FontService, SceneManager, SettingsService as Settings
class PlayGame(SceneBase):
"""
This scene shows a graphical representation of a game between two players
If one or both of the players are human, then it allows that player to make moves with a mouse
"""
def __init__(self, player1, player2):
SceneBase.__init__(self)
# data needed to play the game
self.game = Game(player1, player2)
# game object needs to be locked when the board is being rendered or when Bot players are ready to make a move
self.game_lock = threading.Lock()
self.bot_is_thinking = False
self.bot_start_time = timeit.default_timer()
self.ghost_move = None # this Move object is used to show human players where their mouse is hovering
# calculate constants used for rendering
# (these are all done in the fixed transform space, so we can safely use constants)
self.MARGIN = 96
self.CELL_SIZE = 83
self.CELL_SPACING = 10
self.LOCAL_BOARD_SPACING = 25
self.BOARD_AREA_X = 1920 - self.MARGIN - 9*(self.CELL_SIZE + self.CELL_SPACING) - 2*self.LOCAL_BOARD_SPACING
self.BOARD_AREA_Y = self.MARGIN
# bounding boxes for player info
self.P1_BOX = pygame.Rect(self.MARGIN, self.MARGIN, 1920 - 3*self.MARGIN - self.BOARD_AREA_X,
3*(self.CELL_SIZE + self.CELL_SPACING) - self.LOCAL_BOARD_SPACING )
self.P2_BOX = pygame.Rect(self.MARGIN, self.MARGIN + 6*(self.CELL_SIZE + self.CELL_SPACING) + 2*self.LOCAL_BOARD_SPACING,
1920 - 3*self.MARGIN - self.BOARD_AREA_X, 3*(self.CELL_SIZE + self.CELL_SPACING) - self.LOCAL_BOARD_SPACING )
# text for player boxes
self.FONT_SIZE = 48
font_color = Settings.theme['font']
self.p1_name = FontService.get_regular_font(self.FONT_SIZE)
self.p1_name_surface = self.p1_name.render(self.game.player1.name, False, font_color)
self.p1_name_size = self.p1_name.size(self.game.player1.name)
self.p1_name_location = (self.P1_BOX.centerx - 0.5 * self.p1_name_size[0], self.P1_BOX.top + 0.5 * self.p1_name_size[1] + 10)
self.p2_name = FontService.get_regular_font(self.FONT_SIZE)
self.p2_name_surface = self.p2_name.render(self.game.player2.name, False, font_color)
self.p2_name_size = self.p2_name.size(self.game.player2.name)
self.p2_name_location = (self.P2_BOX.centerx - 0.5 * self.p2_name_size[0], self.P2_BOX.top + 0.5 * self.p2_name_size[1] + 10)
self.cell_sprites = ImageService.get_board_cell_sprites()
for key in self.cell_sprites.keys():
self.cell_sprites[key] = pygame.transform.scale(self.cell_sprites[key], (self.CELL_SIZE, self.CELL_SIZE))
# compute cell bounding boxes - Each element is a 4-tuple (left, top, right, bottom)
self.cell_locations = numpy.empty((3, 3, 3, 3), object)
for i in list(range(0, 9)):
metarow = i // 3
row = i % 3
for j in list(range(0, 9)):
metacol = j // 3
col = j % 3
# compute the location of the cell in the grid and shift it into the board area
location_x = (metacol * 3 + col)*(self.CELL_SIZE + self.CELL_SPACING) \
+ self.LOCAL_BOARD_SPACING*metacol \
+ self.BOARD_AREA_X
location_y = (metarow * 3 + row) * (self.CELL_SIZE + self.CELL_SPACING) \
+ self.LOCAL_BOARD_SPACING * metarow \
+ self.BOARD_AREA_Y
self.cell_locations[metarow][metacol][row][col] = (location_x, location_y, location_x + self.CELL_SIZE, location_y + self.CELL_SIZE)
def make_bot_move():
self.bot_is_thinking = True
self.bot_start_time = timeit.default_timer()
move = self.game.active_player.compute_next_move(self.game.board, self.game.get_valid_moves())
self.game_lock.acquire()
self.game.make_move(move)
self.game_lock.release()
self.bot_is_thinking = False
self.make_bot_move = make_bot_move
def process_input(self, events, pressed_keys):
for widget in self.widgets:
widget.process_input(events, pressed_keys)
# if the current player is a human, then respond to mouse events
if not self.game.active_player.is_bot():
for event in events:
if event.type == pygame.MOUSEMOTION:
# highlight the move that's about to be selected if the mouse moves over a cell
self.game_lock.acquire() # acquire a lock while reading the board to get valid moves
valid_moves = self.game.get_valid_moves()
self.game_lock.release()
location = event.pos
ghost_move_found = False # used to clear ghost marker if ghost move is not found
for move in valid_moves:
cell_location = self.cell_locations[move.metarow][move.metacol][move.row][move.col]
# check if mouse motion is within bounding box of cell
if cell_location[0] <= location[0] <= cell_location[2] and cell_location[1] <= location[1] <= cell_location[3]:
self.ghost_move = move
ghost_move_found = True
if not ghost_move_found:
self.ghost_move = None
elif event.type == pygame.MOUSEBUTTONDOWN:
if self.ghost_move:
self.game.make_move(self.ghost_move)
def update(self):
if self.game.is_game_over():
SceneManager.go_to_game_completed(self, self.game)
self.game_lock.acquire()
bots_turn = self.game.active_player.is_bot()
self.game_lock.release()
if bots_turn and not self.bot_is_thinking and not self.game.is_game_over():
thread = threading.Thread(target=self.make_bot_move)
thread.setDaemon(True) # Daemon threads will be stopped when main terminates
thread.start()
def render(self, screen):
bg = ImageService.get_game_bg()
screen.blit(bg, (0, 0))
# render the info box for player1
border_color = Settings.theme['primary'] if self.game.active_player.number == Board.X else Settings.theme['widget_highlight']
# draw box
aa_border_rounded_rect(screen, self.P1_BOX, Settings.theme['widget_background'], border_color)
screen.blit(self.p1_name_surface, self.p1_name_location) # player name
# render the timestamp for player 1
timestamp = FontService.get_regular_font(self.FONT_SIZE)
if isinstance(self.game.active_player, TimeLimitedBot) and self.game.active_player.number == Board.X:
time_left = -1
if self.game.active_player.is_bot():
now = timeit.default_timer()
time_left = self.game.active_player.time_limit - (now - self.bot_start_time)
time_string = seconds_to_timestamp(time_left)
p1_time = timestamp.render(time_string, False, Settings.theme['font'])
p1_time_size = timestamp.size(time_string)
p1_time_location = (self.P1_BOX.centerx - 0.5 * p1_time_size[0], self.P1_BOX.bottom - p1_time_size[1] - 10)
screen.blit(p1_time, p1_time_location)
# render the info box for player2
border_color = Settings.theme['secondary'] if self.game.active_player.number == Board.O else Settings.theme['widget_highlight']
# draw box
aa_border_rounded_rect(screen, self.P2_BOX, Settings.theme['widget_background'], border_color)
screen.blit(self.p2_name_surface, self.p2_name_location) # player 2's name
# render the timestamp for player 2
if isinstance(self.game.active_player, TimeLimitedBot) and self.game.active_player.number == Board.O:
time_left = -1
if self.game.active_player.is_bot():
now = timeit.default_timer()
time_left = self.game.active_player.time_limit - (now - self.bot_start_time)
time_string = seconds_to_timestamp(time_left)
p2_time = timestamp.render(time_string, False, Settings.theme['font'])
p2_time_size = timestamp.size(time_string)
p2_time_location = (self.P2_BOX.centerx - 0.5 * p2_time_size[0], self.P2_BOX.bottom - p2_time_size[1] - 10)
screen.blit(p2_time, p2_time_location)
# render the board
self.game_lock.acquire() # need to read values from the board while rendering
valid_moves = self.game.get_valid_moves()
current_player = self.game.active_player
current_player_symbol = self.game.active_player.number
for i in list(range(0, 9)):
metarow = i // 3
row = i % 3
for j in list(range(0, 9)):
metacol = j // 3
col = j % 3
board_winner = self.game.board.check_cell(metarow, metacol)
cell_owner = self.game.board.check_small_cell(metarow, metacol, row, col)
move_object = Move(current_player_symbol, metarow, metacol, row, col)
# compute the location of the cell in the grid and shift it into the board area
location = self.cell_locations[metarow][metacol][row][col]
location_x, location_y = location[0], location[1]
# render the correct background for the cell:
if board_winner == Board.X :
screen.blit(self.cell_sprites['p1_won'], (location_x, location_y))
elif board_winner == Board.O:
screen.blit(self.cell_sprites['p2_won'], (location_x, location_y))
elif move_object in valid_moves:
if current_player.number == Board.X:
screen.blit(self.cell_sprites['p1_highlight'], (location_x, location_y))
if current_player.number == Board.O:
screen.blit(self.cell_sprites['p2_highlight'], (location_x, location_y))
else:
screen.blit(self.cell_sprites['blank'], (location_x, location_y))
# render the cell's owner:
if cell_owner == Board.X:
screen.blit(self.cell_sprites['p1_marker'], (location_x, location_y))
elif cell_owner == Board.O:
screen.blit(self.cell_sprites['p2_marker'], (location_x, location_y))
# render a ghost move if there is one:
if self.ghost_move is not None:
move_location = self.cell_locations[self.ghost_move.metarow][self.ghost_move.metacol][self.ghost_move.row][self.ghost_move.col]
if self.ghost_move.player == Board.X:
screen.blit(self.cell_sprites['p1_marker'], (move_location[0], move_location[1]))
else:
screen.blit(self.cell_sprites['p2_marker'], (move_location[0], move_location[1]))
self.game_lock.release() # rendering is done
for widget in self.widgets:
widget.render(screen)
def seconds_to_timestamp(seconds):
if seconds < 0:
return "Unlimited"
whole_s = round(seconds)
s = whole_s % 60
if s < 10:
s = "0%s" % s
m = whole_s // 60
return "%s:%s" % (m, s)
|
test_util_test.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.test_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import random
import threading
import unittest
import weakref
from absl.testing import parameterized
import numpy as np
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops # pylint: disable=unused-import
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class TestUtilTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def test_assert_ops_in_graph(self):
with ops.Graph().as_default():
constant_op.constant(["hello", "taffy"], name="hello")
test_util.assert_ops_in_graph({"hello": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"bye": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"hello": "Variable"}, ops.get_default_graph())
@test_util.run_deprecated_v1
def test_session_functions(self):
with self.test_session() as sess:
sess_ref = weakref.ref(sess)
with self.cached_session(graph=None, config=None) as sess2:
# We make sure that sess2 is sess.
assert sess2 is sess
# We make sure we raise an exception if we use cached_session with
# different values.
with self.assertRaises(ValueError):
with self.cached_session(graph=ops.Graph()) as sess2:
pass
with self.assertRaises(ValueError):
with self.cached_session(force_gpu=True) as sess2:
pass
# We make sure that test_session will cache the session even after the
# with scope.
assert not sess_ref()._closed
with self.session() as unique_sess:
unique_sess_ref = weakref.ref(unique_sess)
with self.session() as sess2:
assert sess2 is not unique_sess
# We make sure the session is closed when we leave the with statement.
assert unique_sess_ref()._closed
def test_assert_equal_graph_def(self):
with ops.Graph().as_default() as g:
def_empty = g.as_graph_def()
constant_op.constant(5, name="five")
constant_op.constant(7, name="seven")
def_57 = g.as_graph_def()
with ops.Graph().as_default() as g:
constant_op.constant(7, name="seven")
constant_op.constant(5, name="five")
def_75 = g.as_graph_def()
# Comparing strings is order dependent
self.assertNotEqual(str(def_57), str(def_75))
# assert_equal_graph_def doesn't care about order
test_util.assert_equal_graph_def(def_57, def_75)
# Compare two unequal graphs
with self.assertRaisesRegex(AssertionError,
r"^Found unexpected node '{{node seven}}"):
test_util.assert_equal_graph_def(def_57, def_empty)
def testIsGoogleCudaEnabled(self):
# The test doesn't assert anything. It ensures the py wrapper
# function is generated correctly.
if test_util.IsGoogleCudaEnabled():
print("GoogleCuda is enabled")
else:
print("GoogleCuda is disabled")
def testIsMklEnabled(self):
# This test doesn't assert anything.
# It ensures the py wrapper function is generated correctly.
if test_util.IsMklEnabled():
print("MKL is enabled")
else:
print("MKL is disabled")
@test_util.run_in_graph_and_eager_modes
def testAssertProtoEqualsStr(self):
graph_str = "node { name: 'w1' op: 'params' }"
graph_def = graph_pb2.GraphDef()
text_format.Merge(graph_str, graph_def)
# test string based comparison
self.assertProtoEquals(graph_str, graph_def)
# test original comparison
self.assertProtoEquals(graph_def, graph_def)
@test_util.run_in_graph_and_eager_modes
def testAssertProtoEqualsAny(self):
# Test assertProtoEquals with a protobuf.Any field.
meta_graph_def_str = """
meta_info_def {
meta_graph_version: "outer"
any_info {
[type.googleapis.com/tensorflow.MetaGraphDef] {
meta_info_def {
meta_graph_version: "inner"
}
}
}
}
"""
meta_graph_def_outer = meta_graph_pb2.MetaGraphDef()
meta_graph_def_outer.meta_info_def.meta_graph_version = "outer"
meta_graph_def_inner = meta_graph_pb2.MetaGraphDef()
meta_graph_def_inner.meta_info_def.meta_graph_version = "inner"
meta_graph_def_outer.meta_info_def.any_info.Pack(meta_graph_def_inner)
self.assertProtoEquals(meta_graph_def_str, meta_graph_def_outer)
self.assertProtoEquals(meta_graph_def_outer, meta_graph_def_outer)
# Check if the assertion failure message contains the content of
# the inner proto.
with self.assertRaisesRegex(AssertionError, r'meta_graph_version: "inner"'):
self.assertProtoEquals("", meta_graph_def_outer)
@test_util.run_in_graph_and_eager_modes
def testNDArrayNear(self):
a1 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a2 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a3 = np.array([[10.0, 20.0, 30.0], [40.0, 50.0, 60.0]])
self.assertTrue(self._NDArrayNear(a1, a2, 1e-5))
self.assertFalse(self._NDArrayNear(a1, a3, 1e-5))
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadSucceeds(self):
def noop(ev):
ev.set()
event_arg = threading.Event()
self.assertFalse(event_arg.is_set())
t = self.checkedThread(target=noop, args=(event_arg,))
t.start()
t.join()
self.assertTrue(event_arg.is_set())
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadFails(self):
def err_func():
return 1 // 0
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("integer division or modulo by zero" in str(fe.exception))
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadWithWrongAssertionFails(self):
x = 37
def err_func():
self.assertTrue(x < 10)
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("False is not true" in str(fe.exception))
@test_util.run_in_graph_and_eager_modes
def testMultipleThreadsWithOneFailure(self):
def err_func(i):
self.assertTrue(i != 7)
threads = [
self.checkedThread(
target=err_func, args=(i,)) for i in range(10)
]
for t in threads:
t.start()
for i, t in enumerate(threads):
if i == 7:
with self.assertRaises(self.failureException):
t.join()
else:
t.join()
def _WeMustGoDeeper(self, msg):
with self.assertRaisesOpError(msg):
with ops.Graph().as_default():
node_def = ops._NodeDef("IntOutput", "name")
node_def_orig = ops._NodeDef("IntOutput", "orig")
op_orig = ops.Operation(node_def_orig, ops.get_default_graph())
op = ops.Operation(node_def, ops.get_default_graph(),
original_op=op_orig)
raise errors.UnauthenticatedError(node_def, op, "true_err")
@test_util.run_in_graph_and_eager_modes
def testAssertRaisesOpErrorDoesNotPassMessageDueToLeakedStack(self):
with self.assertRaises(AssertionError):
self._WeMustGoDeeper("this_is_not_the_error_you_are_looking_for")
self._WeMustGoDeeper("true_err")
self._WeMustGoDeeper("name")
self._WeMustGoDeeper("orig")
@test_util.run_in_graph_and_eager_modes
def testAllCloseTensors(self):
a_raw_data = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a = constant_op.constant(a_raw_data)
b = math_ops.add(1, constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8]]))
self.assertAllClose(a, b)
self.assertAllClose(a, a_raw_data)
a_dict = {"key": a}
b_dict = {"key": b}
self.assertAllClose(a_dict, b_dict)
x_list = [a, b]
y_list = [a_raw_data, b]
self.assertAllClose(x_list, y_list)
@test_util.run_in_graph_and_eager_modes
def testAllCloseScalars(self):
self.assertAllClose(7, 7 + 1e-8)
with self.assertRaisesRegex(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(7, 7 + 1e-5)
@test_util.run_in_graph_and_eager_modes
def testAllCloseList(self):
with self.assertRaisesRegex(AssertionError, r"not close dif"):
self.assertAllClose([0], [1])
@test_util.run_in_graph_and_eager_modes
def testAllCloseDictToNonDict(self):
with self.assertRaisesRegex(ValueError, r"Can't compare dict to non-dict"):
self.assertAllClose(1, {"a": 1})
with self.assertRaisesRegex(ValueError, r"Can't compare dict to non-dict"):
self.assertAllClose({"a": 1}, 1)
@test_util.run_in_graph_and_eager_modes
def testAllCloseNamedtuples(self):
a = 7
b = (2., 3.)
c = np.ones((3, 2, 4)) * 7.
expected = {"a": a, "b": b, "c": c}
my_named_tuple = collections.namedtuple("MyNamedTuple", ["a", "b", "c"])
# Identity.
self.assertAllClose(expected, my_named_tuple(a=a, b=b, c=c))
self.assertAllClose(
my_named_tuple(a=a, b=b, c=c), my_named_tuple(a=a, b=b, c=c))
@test_util.run_in_graph_and_eager_modes
def testAllCloseDicts(self):
a = 7
b = (2., 3.)
c = np.ones((3, 2, 4)) * 7.
expected = {"a": a, "b": b, "c": c}
# Identity.
self.assertAllClose(expected, expected)
self.assertAllClose(expected, dict(expected))
# With each item removed.
for k in expected:
actual = dict(expected)
del actual[k]
with self.assertRaisesRegex(AssertionError, r"mismatched keys"):
self.assertAllClose(expected, actual)
# With each item changed.
with self.assertRaisesRegex(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(expected, {"a": a + 1e-5, "b": b, "c": c})
with self.assertRaisesRegex(AssertionError, r"Shape mismatch"):
self.assertAllClose(expected, {"a": a, "b": b + (4.,), "c": c})
c_copy = np.array(c)
c_copy[1, 1, 1] += 1e-5
with self.assertRaisesRegex(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(expected, {"a": a, "b": b, "c": c_copy})
@test_util.run_in_graph_and_eager_modes
def testAllCloseListOfNamedtuples(self):
my_named_tuple = collections.namedtuple("MyNamedTuple", ["x", "y"])
l1 = [
my_named_tuple(x=np.array([[2.3, 2.5]]), y=np.array([[0.97, 0.96]])),
my_named_tuple(x=np.array([[3.3, 3.5]]), y=np.array([[0.98, 0.99]]))
]
l2 = [
([[2.3, 2.5]], [[0.97, 0.96]]),
([[3.3, 3.5]], [[0.98, 0.99]]),
]
self.assertAllClose(l1, l2)
@test_util.run_in_graph_and_eager_modes
def testAllCloseNestedStructure(self):
a = {"x": np.ones((3, 2, 4)) * 7, "y": (2, [{"nested": {"m": 3, "n": 4}}])}
self.assertAllClose(a, a)
b = copy.deepcopy(a)
self.assertAllClose(a, b)
# Test mismatched values
b["y"][1][0]["nested"]["n"] = 4.2
with self.assertRaisesRegex(AssertionError,
r"\[y\]\[1\]\[0\]\[nested\]\[n\]"):
self.assertAllClose(a, b)
@test_util.run_in_graph_and_eager_modes
def testArrayNear(self):
a = [1, 2]
b = [1, 2, 5]
with self.assertRaises(AssertionError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [[1, 2], [3, 4]]
with self.assertRaises(TypeError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [1, 2]
self.assertArrayNear(a, b, 0.001)
@test_util.skip_if(True) # b/117665998
def testForceGPU(self):
with self.assertRaises(errors.InvalidArgumentError):
with self.test_session(force_gpu=True):
# this relies on us not having a GPU implementation for assert, which
# seems sensible
x = constant_op.constant(True)
y = [15]
control_flow_ops.Assert(x, y).run()
@test_util.run_in_graph_and_eager_modes
def testAssertAllCloseAccordingToType(self):
# test plain int
self.assertAllCloseAccordingToType(1, 1, rtol=1e-8, atol=1e-8)
# test float64
self.assertAllCloseAccordingToType(
np.asarray([1e-8], dtype=np.float64),
np.asarray([2e-8], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-8], dtype=dtypes.float64),
constant_op.constant([2e-8], dtype=dtypes.float64),
rtol=1e-8,
atol=1e-8)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float64),
np.asarray([2e-7], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
# test float32
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float32),
np.asarray([2e-7], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-7], dtype=dtypes.float32),
constant_op.constant([2e-7], dtype=dtypes.float32),
rtol=1e-8,
atol=1e-8,
float_rtol=1e-7,
float_atol=1e-7)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-6], dtype=np.float32),
np.asarray([2e-6], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
# test float16
self.assertAllCloseAccordingToType(
np.asarray([1e-4], dtype=np.float16),
np.asarray([2e-4], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-4], dtype=dtypes.float16),
constant_op.constant([2e-4], dtype=dtypes.float16),
rtol=1e-8,
atol=1e-8,
float_rtol=1e-7,
float_atol=1e-7,
half_rtol=1e-4,
half_atol=1e-4)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-3], dtype=np.float16),
np.asarray([2e-3], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
@test_util.run_in_graph_and_eager_modes
def testAssertAllEqual(self):
i = variables.Variable([100] * 3, dtype=dtypes.int32, name="i")
j = constant_op.constant([20] * 3, dtype=dtypes.int32, name="j")
k = math_ops.add(i, j, name="k")
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual([100] * 3, i)
self.assertAllEqual([120] * 3, k)
self.assertAllEqual([20] * 3, j)
with self.assertRaisesRegex(AssertionError, r"not equal lhs"):
self.assertAllEqual([0] * 3, k)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllEqual(self):
i = variables.Variable([100], dtype=dtypes.int32, name="i")
j = constant_op.constant([20], dtype=dtypes.int32, name="j")
k = math_ops.add(i, j, name="k")
self.evaluate(variables.global_variables_initializer())
self.assertNotAllEqual([100] * 3, i)
self.assertNotAllEqual([120] * 3, k)
self.assertNotAllEqual([20] * 3, j)
with self.assertRaisesRegex(
AssertionError, r"two values are equal at all elements.*extra message"):
self.assertNotAllEqual([120], k, msg="extra message")
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllClose(self):
# Test with arrays
self.assertNotAllClose([0.1], [0.2])
with self.assertRaises(AssertionError):
self.assertNotAllClose([-1.0, 2.0], [-1.0, 2.0])
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
self.assertNotAllClose([0.9, 1.0], x)
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.0, 1.0], x)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllCloseRTol(self):
# Test with arrays
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.1, 2.1], [1.0, 2.0], rtol=0.2)
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
with self.assertRaises(AssertionError):
self.assertNotAllClose([0.9, 1.0], x, rtol=0.2)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllCloseATol(self):
# Test with arrays
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.1, 2.1], [1.0, 2.0], atol=0.2)
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
with self.assertRaises(AssertionError):
self.assertNotAllClose([0.9, 1.0], x, atol=0.2)
@test_util.run_in_graph_and_eager_modes
def testAssertAllGreaterLess(self):
x = constant_op.constant([100.0, 110.0, 120.0], dtype=dtypes.float32)
y = constant_op.constant([10.0] * 3, dtype=dtypes.float32)
z = math_ops.add(x, y)
self.assertAllClose([110.0, 120.0, 130.0], z)
self.assertAllGreater(x, 95.0)
self.assertAllLess(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllGreater(x, 105.0)
with self.assertRaises(AssertionError):
self.assertAllGreater(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllLess(x, 115.0)
with self.assertRaises(AssertionError):
self.assertAllLess(x, 95.0)
@test_util.run_in_graph_and_eager_modes
def testAssertAllGreaterLessEqual(self):
x = constant_op.constant([100.0, 110.0, 120.0], dtype=dtypes.float32)
y = constant_op.constant([10.0] * 3, dtype=dtypes.float32)
z = math_ops.add(x, y)
self.assertAllEqual([110.0, 120.0, 130.0], z)
self.assertAllGreaterEqual(x, 95.0)
self.assertAllLessEqual(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllGreaterEqual(x, 105.0)
with self.assertRaises(AssertionError):
self.assertAllGreaterEqual(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllLessEqual(x, 115.0)
with self.assertRaises(AssertionError):
self.assertAllLessEqual(x, 95.0)
def testAssertAllInRangeWithNonNumericValuesFails(self):
s1 = constant_op.constant("Hello, ", name="s1")
c = constant_op.constant([1 + 2j, -3 + 5j], name="c")
b = constant_op.constant([False, True], name="b")
with self.assertRaises(AssertionError):
self.assertAllInRange(s1, 0.0, 1.0)
with self.assertRaises(AssertionError):
self.assertAllInRange(c, 0.0, 1.0)
with self.assertRaises(AssertionError):
self.assertAllInRange(b, 0, 1)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRange(self):
x = constant_op.constant([10.0, 15.0], name="x")
self.assertAllInRange(x, 10, 15)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, 15, open_lower_bound=True)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, 15, open_upper_bound=True)
with self.assertRaises(AssertionError):
self.assertAllInRange(
x, 10, 15, open_lower_bound=True, open_upper_bound=True)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeErrorMessageEllipses(self):
x_init = np.array([[10.0, 15.0]] * 12)
x = constant_op.constant(x_init, name="x")
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 5, 10)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeDetectsNaNs(self):
x = constant_op.constant(
[[np.nan, 0.0], [np.nan, np.inf], [np.inf, np.nan]], name="x")
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 0.0, 2.0)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeWithInfinities(self):
x = constant_op.constant([10.0, np.inf], name="x")
self.assertAllInRange(x, 10, np.inf)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, np.inf, open_upper_bound=True)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInSet(self):
b = constant_op.constant([True, False], name="b")
x = constant_op.constant([13, 37], name="x")
self.assertAllInSet(b, [False, True])
self.assertAllInSet(b, (False, True))
self.assertAllInSet(b, {False, True})
self.assertAllInSet(x, [0, 13, 37, 42])
self.assertAllInSet(x, (0, 13, 37, 42))
self.assertAllInSet(x, {0, 13, 37, 42})
with self.assertRaises(AssertionError):
self.assertAllInSet(b, [False])
with self.assertRaises(AssertionError):
self.assertAllInSet(x, (42,))
def testRandomSeed(self):
# Call setUp again for WithCApi case (since it makes a new default graph
# after setup).
# TODO(skyewm): remove this when C API is permanently enabled.
with context.eager_mode():
self.setUp()
a = random.randint(1, 1000)
a_np_rand = np.random.rand(1)
a_rand = random_ops.random_normal([1])
# ensure that randomness in multiple testCases is deterministic.
self.setUp()
b = random.randint(1, 1000)
b_np_rand = np.random.rand(1)
b_rand = random_ops.random_normal([1])
self.assertEqual(a, b)
self.assertEqual(a_np_rand, b_np_rand)
self.assertAllEqual(a_rand, b_rand)
@test_util.run_in_graph_and_eager_modes
def test_callable_evaluate(self):
def model():
return resource_variable_ops.ResourceVariable(
name="same_name",
initial_value=1) + 1
with context.eager_mode():
self.assertEqual(2, self.evaluate(model))
@test_util.run_in_graph_and_eager_modes
def test_nested_tensors_evaluate(self):
expected = {"a": 1, "b": 2, "nested": {"d": 3, "e": 4}}
nested = {"a": constant_op.constant(1),
"b": constant_op.constant(2),
"nested": {"d": constant_op.constant(3),
"e": constant_op.constant(4)}}
self.assertEqual(expected, self.evaluate(nested))
def test_run_in_graph_and_eager_modes(self):
l = []
def inc(self, with_brackets):
del self # self argument is required by run_in_graph_and_eager_modes.
mode = "eager" if context.executing_eagerly() else "graph"
with_brackets = "with_brackets" if with_brackets else "without_brackets"
l.append((with_brackets, mode))
f = test_util.run_in_graph_and_eager_modes(inc)
f(self, with_brackets=False)
f = test_util.run_in_graph_and_eager_modes()(inc)
f(self, with_brackets=True)
self.assertEqual(len(l), 4)
self.assertEqual(set(l), {
("with_brackets", "graph"),
("with_brackets", "eager"),
("without_brackets", "graph"),
("without_brackets", "eager"),
})
def test_get_node_def_from_graph(self):
graph_def = graph_pb2.GraphDef()
node_foo = graph_def.node.add()
node_foo.name = "foo"
self.assertIs(test_util.get_node_def_from_graph("foo", graph_def), node_foo)
self.assertIsNone(test_util.get_node_def_from_graph("bar", graph_def))
def test_run_in_eager_and_graph_modes_test_class(self):
msg = "`run_in_graph_and_eager_modes` only supports test methods.*"
with self.assertRaisesRegex(ValueError, msg):
@test_util.run_in_graph_and_eager_modes()
class Foo(object):
pass
del Foo # Make pylint unused happy.
def test_run_in_eager_and_graph_modes_skip_graph_runs_eager(self):
modes = []
def _test(self):
if not context.executing_eagerly():
self.skipTest("Skipping in graph mode")
modes.append("eager" if context.executing_eagerly() else "graph")
test_util.run_in_graph_and_eager_modes(_test)(self)
self.assertEqual(modes, ["eager"])
def test_run_in_eager_and_graph_modes_skip_eager_runs_graph(self):
modes = []
def _test(self):
if context.executing_eagerly():
self.skipTest("Skipping in eager mode")
modes.append("eager" if context.executing_eagerly() else "graph")
test_util.run_in_graph_and_eager_modes(_test)(self)
self.assertEqual(modes, ["graph"])
def test_run_in_graph_and_eager_modes_setup_in_same_mode(self):
modes = []
mode_name = lambda: "eager" if context.executing_eagerly() else "graph"
class ExampleTest(test_util.TensorFlowTestCase):
def runTest(self):
pass
def setUp(self):
modes.append("setup_" + mode_name())
@test_util.run_in_graph_and_eager_modes
def testBody(self):
modes.append("run_" + mode_name())
e = ExampleTest()
e.setUp()
e.testBody()
self.assertEqual(modes[1:2], ["run_graph"])
self.assertEqual(modes[2:], ["setup_eager", "run_eager"])
@parameterized.named_parameters(dict(testcase_name="argument",
arg=True))
@test_util.run_in_graph_and_eager_modes
def test_run_in_graph_and_eager_works_with_parameterized_keyword(self, arg):
self.assertEqual(arg, True)
@combinations.generate(combinations.combine(arg=True))
@test_util.run_in_graph_and_eager_modes
def test_run_in_graph_and_eager_works_with_combinations(self, arg):
self.assertEqual(arg, True)
def test_build_as_function_and_v1_graph(self):
class GraphModeAndFunctionTest(parameterized.TestCase):
def __init__(inner_self): # pylint: disable=no-self-argument
super(GraphModeAndFunctionTest, inner_self).__init__()
inner_self.graph_mode_tested = False
inner_self.inside_function_tested = False
def runTest(self):
del self
@test_util.build_as_function_and_v1_graph
def test_modes(inner_self): # pylint: disable=no-self-argument
if ops.inside_function():
self.assertFalse(inner_self.inside_function_tested)
inner_self.inside_function_tested = True
else:
self.assertFalse(inner_self.graph_mode_tested)
inner_self.graph_mode_tested = True
test_object = GraphModeAndFunctionTest()
test_object.test_modes_v1_graph()
test_object.test_modes_function()
self.assertTrue(test_object.graph_mode_tested)
self.assertTrue(test_object.inside_function_tested)
def test_with_forward_compatibility_horizons(self):
tested_codepaths = set()
def some_function_with_forward_compat_behavior():
if compat.forward_compatible(2050, 1, 1):
tested_codepaths.add("future")
else:
tested_codepaths.add("present")
@test_util.with_forward_compatibility_horizons(None, [2051, 1, 1])
def some_test(self):
del self # unused
some_function_with_forward_compat_behavior()
some_test(None)
self.assertEqual(tested_codepaths, set(["present", "future"]))
class SkipTestTest(test_util.TensorFlowTestCase):
def _verify_test_in_set_up_or_tear_down(self):
with self.assertRaises(unittest.SkipTest):
with test_util.skip_if_error(self, ValueError,
["foo bar", "test message"]):
raise ValueError("test message")
try:
with self.assertRaisesRegex(ValueError, "foo bar"):
with test_util.skip_if_error(self, ValueError, "test message"):
raise ValueError("foo bar")
except unittest.SkipTest:
raise RuntimeError("Test is not supposed to skip.")
def setUp(self):
super(SkipTestTest, self).setUp()
self._verify_test_in_set_up_or_tear_down()
def tearDown(self):
super(SkipTestTest, self).tearDown()
self._verify_test_in_set_up_or_tear_down()
def test_skip_if_error_should_skip(self):
with self.assertRaises(unittest.SkipTest):
with test_util.skip_if_error(self, ValueError, "test message"):
raise ValueError("test message")
def test_skip_if_error_should_skip_with_list(self):
with self.assertRaises(unittest.SkipTest):
with test_util.skip_if_error(self, ValueError,
["foo bar", "test message"]):
raise ValueError("test message")
def test_skip_if_error_should_skip_without_expected_message(self):
with self.assertRaises(unittest.SkipTest):
with test_util.skip_if_error(self, ValueError):
raise ValueError("test message")
def test_skip_if_error_should_skip_without_error_message(self):
with self.assertRaises(unittest.SkipTest):
with test_util.skip_if_error(self, ValueError):
raise ValueError()
def test_skip_if_error_should_raise_message_mismatch(self):
try:
with self.assertRaisesRegex(ValueError, "foo bar"):
with test_util.skip_if_error(self, ValueError, "test message"):
raise ValueError("foo bar")
except unittest.SkipTest:
raise RuntimeError("Test is not supposed to skip.")
def test_skip_if_error_should_raise_no_message(self):
try:
with self.assertRaisesRegex(ValueError, ""):
with test_util.skip_if_error(self, ValueError, "test message"):
raise ValueError()
except unittest.SkipTest:
raise RuntimeError("Test is not supposed to skip.")
# Its own test case to reproduce variable sharing issues which only pop up when
# setUp() is overridden and super() is not called.
class GraphAndEagerNoVariableSharing(test_util.TensorFlowTestCase):
def setUp(self):
pass # Intentionally does not call TensorFlowTestCase's super()
@test_util.run_in_graph_and_eager_modes
def test_no_variable_sharing(self):
variable_scope.get_variable(
name="step_size",
initializer=np.array(1e-5, np.float32),
use_resource=True,
trainable=False)
class GarbageCollectionTest(test_util.TensorFlowTestCase):
def test_no_reference_cycle_decorator(self):
class ReferenceCycleTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
@test_util.assert_no_garbage_created
def test_has_cycle(self):
a = []
a.append(a)
@test_util.assert_no_garbage_created
def test_has_no_cycle(self):
pass
with self.assertRaises(AssertionError):
ReferenceCycleTest().test_has_cycle()
ReferenceCycleTest().test_has_no_cycle()
@test_util.run_in_graph_and_eager_modes
def test_no_leaked_tensor_decorator(self):
class LeakedTensorTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
@test_util.assert_no_new_tensors
def test_has_leak(self):
self.a = constant_op.constant([3.], name="leak")
@test_util.assert_no_new_tensors
def test_has_no_leak(self):
constant_op.constant([3.], name="no-leak")
with self.assertRaisesRegex(AssertionError, "Tensors not deallocated"):
LeakedTensorTest().test_has_leak()
LeakedTensorTest().test_has_no_leak()
def test_no_new_objects_decorator(self):
class LeakedObjectTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
inner_self.accumulation = []
@test_util.assert_no_new_pyobjects_executing_eagerly
def test_has_leak(self):
self.accumulation.append([1.])
@test_util.assert_no_new_pyobjects_executing_eagerly
def test_has_no_leak(self):
self.not_accumulating = [1.]
with self.assertRaises(AssertionError):
LeakedObjectTest().test_has_leak()
LeakedObjectTest().test_has_no_leak()
if __name__ == "__main__":
googletest.main()
|
runtest.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
import os
import random
import re
import setproctitle
import shutil
import string
import subprocess
import sys
import tempfile
import threading
import time
from collections import defaultdict, namedtuple, OrderedDict
from concurrent.futures import ThreadPoolExecutor
import numpy as np
import pickle
import pytest
import ray
import ray.test.cluster_utils
import ray.test.test_utils
from ray.utils import _random_string
logger = logging.getLogger(__name__)
def assert_equal(obj1, obj2):
module_numpy = (type(obj1).__module__ == np.__name__
or type(obj2).__module__ == np.__name__)
if module_numpy:
empty_shape = ((hasattr(obj1, "shape") and obj1.shape == ())
or (hasattr(obj2, "shape") and obj2.shape == ()))
if empty_shape:
# This is a special case because currently np.testing.assert_equal
# fails because we do not properly handle different numerical
# types.
assert obj1 == obj2, ("Objects {} and {} are "
"different.".format(obj1, obj2))
else:
np.testing.assert_equal(obj1, obj2)
elif hasattr(obj1, "__dict__") and hasattr(obj2, "__dict__"):
special_keys = ["_pytype_"]
assert (set(list(obj1.__dict__.keys()) + special_keys) == set(
list(obj2.__dict__.keys()) + special_keys)), ("Objects {} "
"and {} are "
"different.".format(
obj1, obj2))
for key in obj1.__dict__.keys():
if key not in special_keys:
assert_equal(obj1.__dict__[key], obj2.__dict__[key])
elif type(obj1) is dict or type(obj2) is dict:
assert_equal(obj1.keys(), obj2.keys())
for key in obj1.keys():
assert_equal(obj1[key], obj2[key])
elif type(obj1) is list or type(obj2) is list:
assert len(obj1) == len(obj2), ("Objects {} and {} are lists with "
"different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif type(obj1) is tuple or type(obj2) is tuple:
assert len(obj1) == len(obj2), ("Objects {} and {} are tuples with "
"different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif (ray.serialization.is_named_tuple(type(obj1))
or ray.serialization.is_named_tuple(type(obj2))):
assert len(obj1) == len(obj2), ("Objects {} and {} are named tuples "
"with different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
else:
assert obj1 == obj2, "Objects {} and {} are different.".format(
obj1, obj2)
if sys.version_info >= (3, 0):
long_extras = [0, np.array([["hi", u"hi"], [1.3, 1]])]
else:
long_extras = [
long(0), # noqa: E501,F821
np.array([
["hi", u"hi"],
[1.3, long(1)] # noqa: E501,F821
])
]
PRIMITIVE_OBJECTS = [
0, 0.0, 0.9, 1 << 62, 1 << 100, 1 << 999, [1 << 100, [1 << 100]], "a",
string.printable, "\u262F", u"hello world", u"\xff\xfe\x9c\x001\x000\x00",
None, True, False, [], (), {},
np.int8(3),
np.int32(4),
np.int64(5),
np.uint8(3),
np.uint32(4),
np.uint64(5),
np.float32(1.9),
np.float64(1.9),
np.zeros([100, 100]),
np.random.normal(size=[100, 100]),
np.array(["hi", 3]),
np.array(["hi", 3], dtype=object)
] + long_extras
COMPLEX_OBJECTS = [
[[[[[[[[[[[[]]]]]]]]]]]],
{"obj{}".format(i): np.random.normal(size=[100, 100])
for i in range(10)},
# {(): {(): {(): {(): {(): {(): {(): {(): {(): {(): {
# (): {(): {}}}}}}}}}}}}},
(
(((((((((), ), ), ), ), ), ), ), ), ),
{
"a": {
"b": {
"c": {
"d": {}
}
}
}
}
]
class Foo(object):
def __init__(self, value=0):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return other.value == self.value
class Bar(object):
def __init__(self):
for i, val in enumerate(PRIMITIVE_OBJECTS + COMPLEX_OBJECTS):
setattr(self, "field{}".format(i), val)
class Baz(object):
def __init__(self):
self.foo = Foo()
self.bar = Bar()
def method(self, arg):
pass
class Qux(object):
def __init__(self):
self.objs = [Foo(), Bar(), Baz()]
class SubQux(Qux):
def __init__(self):
Qux.__init__(self)
class CustomError(Exception):
pass
Point = namedtuple("Point", ["x", "y"])
NamedTupleExample = namedtuple("Example",
"field1, field2, field3, field4, field5")
CUSTOM_OBJECTS = [
Exception("Test object."),
CustomError(),
Point(11, y=22),
Foo(),
Bar(),
Baz(), # Qux(), SubQux(),
NamedTupleExample(1, 1.0, "hi", np.zeros([3, 5]), [1, 2, 3])
]
BASE_OBJECTS = PRIMITIVE_OBJECTS + COMPLEX_OBJECTS + CUSTOM_OBJECTS
LIST_OBJECTS = [[obj] for obj in BASE_OBJECTS]
TUPLE_OBJECTS = [(obj, ) for obj in BASE_OBJECTS]
# The check that type(obj).__module__ != "numpy" should be unnecessary, but
# otherwise this seems to fail on Mac OS X on Travis.
DICT_OBJECTS = (
[{
obj: obj
} for obj in PRIMITIVE_OBJECTS
if (obj.__hash__ is not None and type(obj).__module__ != "numpy")] + [{
0: obj
} for obj in BASE_OBJECTS] + [{
Foo(123): Foo(456)
}])
RAY_TEST_OBJECTS = BASE_OBJECTS + LIST_OBJECTS + TUPLE_OBJECTS + DICT_OBJECTS
@pytest.fixture
def ray_start():
# Start the Ray processes.
ray.init(num_cpus=1)
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
@pytest.fixture
def shutdown_only():
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
def test_passing_arguments_by_value(ray_start):
@ray.remote
def f(x):
return x
# Check that we can pass arguments by value to remote functions and
# that they are uncorrupted.
for obj in RAY_TEST_OBJECTS:
assert_equal(obj, ray.get(f.remote(obj)))
def test_ray_recursive_objects(ray_start):
class ClassA(object):
pass
# Make a list that contains itself.
lst = []
lst.append(lst)
# Make an object that contains itself as a field.
a1 = ClassA()
a1.field = a1
# Make two objects that contain each other as fields.
a2 = ClassA()
a3 = ClassA()
a2.field = a3
a3.field = a2
# Make a dictionary that contains itself.
d1 = {}
d1["key"] = d1
# Create a list of recursive objects.
recursive_objects = [lst, a1, a2, a3, d1]
# Check that exceptions are thrown when we serialize the recursive
# objects.
for obj in recursive_objects:
with pytest.raises(Exception):
ray.put(obj)
def test_passing_arguments_by_value_out_of_the_box(ray_start):
@ray.remote
def f(x):
return x
# Test passing lambdas.
def temp():
return 1
assert ray.get(f.remote(temp))() == 1
assert ray.get(f.remote(lambda x: x + 1))(3) == 4
# Test sets.
assert ray.get(f.remote(set())) == set()
s = {1, (1, 2, "hi")}
assert ray.get(f.remote(s)) == s
# Test types.
assert ray.get(f.remote(int)) == int
assert ray.get(f.remote(float)) == float
assert ray.get(f.remote(str)) == str
class Foo(object):
def __init__(self):
pass
# Make sure that we can put and get a custom type. Note that the result
# won't be "equal" to Foo.
ray.get(ray.put(Foo))
def test_putting_object_that_closes_over_object_id(ray_start):
# This test is here to prevent a regression of
# https://github.com/ray-project/ray/issues/1317.
class Foo(object):
def __init__(self):
self.val = ray.put(0)
def method(self):
f
f = Foo()
ray.put(f)
def test_put_get(shutdown_only):
ray.init(num_cpus=0)
for i in range(100):
value_before = i * 10**6
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = i * 10**6 * 1.0
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = "h" * i
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = [1] * i
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
def test_custom_serializers(shutdown_only):
ray.init(num_cpus=1)
class Foo(object):
def __init__(self):
self.x = 3
def custom_serializer(obj):
return 3, "string1", type(obj).__name__
def custom_deserializer(serialized_obj):
return serialized_obj, "string2"
ray.register_custom_serializer(
Foo, serializer=custom_serializer, deserializer=custom_deserializer)
assert ray.get(ray.put(Foo())) == ((3, "string1", Foo.__name__), "string2")
class Bar(object):
def __init__(self):
self.x = 3
ray.register_custom_serializer(
Bar, serializer=custom_serializer, deserializer=custom_deserializer)
@ray.remote
def f():
return Bar()
assert ray.get(f.remote()) == ((3, "string1", Bar.__name__), "string2")
def test_serialization_final_fallback(ray_start):
pytest.importorskip("catboost")
# This test will only run when "catboost" is installed.
from catboost import CatBoostClassifier
model = CatBoostClassifier(
iterations=2,
depth=2,
learning_rate=1,
loss_function="Logloss",
logging_level="Verbose")
reconstructed_model = ray.get(ray.put(model))
assert set(model.get_params().items()) == set(
reconstructed_model.get_params().items())
def test_register_class(shutdown_only):
ray.init(num_cpus=2)
# Check that putting an object of a class that has not been registered
# throws an exception.
class TempClass(object):
pass
ray.get(ray.put(TempClass()))
# Test subtypes of dictionaries.
value_before = OrderedDict([("hello", 1), ("world", 2)])
object_id = ray.put(value_before)
assert value_before == ray.get(object_id)
value_before = defaultdict(lambda: 0, [("hello", 1), ("world", 2)])
object_id = ray.put(value_before)
assert value_before == ray.get(object_id)
value_before = defaultdict(lambda: [], [("hello", 1), ("world", 2)])
object_id = ray.put(value_before)
assert value_before == ray.get(object_id)
# Test passing custom classes into remote functions from the driver.
@ray.remote
def f(x):
return x
foo = ray.get(f.remote(Foo(7)))
assert foo == Foo(7)
regex = re.compile(r"\d+\.\d*")
new_regex = ray.get(f.remote(regex))
# This seems to fail on the system Python 3 that comes with
# Ubuntu, so it is commented out for now:
# assert regex == new_regex
# Instead, we do this:
assert regex.pattern == new_regex.pattern
# Test returning custom classes created on workers.
@ray.remote
def g():
return SubQux(), Qux()
subqux, qux = ray.get(g.remote())
assert subqux.objs[2].foo.value == 0
# Test exporting custom class definitions from one worker to another
# when the worker is blocked in a get.
class NewTempClass(object):
def __init__(self, value):
self.value = value
@ray.remote
def h1(x):
return NewTempClass(x)
@ray.remote
def h2(x):
return ray.get(h1.remote(x))
assert ray.get(h2.remote(10)).value == 10
# Test registering multiple classes with the same name.
@ray.remote(num_return_vals=3)
def j():
class Class0(object):
def method0(self):
pass
c0 = Class0()
class Class0(object):
def method1(self):
pass
c1 = Class0()
class Class0(object):
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = []
for _ in range(5):
results += j.remote()
for i in range(len(results) // 3):
c0, c1, c2 = ray.get(results[(3 * i):(3 * (i + 1))])
c0.method0()
c1.method1()
c2.method2()
assert not hasattr(c0, "method1")
assert not hasattr(c0, "method2")
assert not hasattr(c1, "method0")
assert not hasattr(c1, "method2")
assert not hasattr(c2, "method0")
assert not hasattr(c2, "method1")
@ray.remote
def k():
class Class0(object):
def method0(self):
pass
c0 = Class0()
class Class0(object):
def method1(self):
pass
c1 = Class0()
class Class0(object):
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = ray.get([k.remote() for _ in range(5)])
for c0, c1, c2 in results:
c0.method0()
c1.method1()
c2.method2()
assert not hasattr(c0, "method1")
assert not hasattr(c0, "method2")
assert not hasattr(c1, "method0")
assert not hasattr(c1, "method2")
assert not hasattr(c2, "method0")
assert not hasattr(c2, "method1")
def test_keyword_args(shutdown_only):
@ray.remote
def keyword_fct1(a, b="hello"):
return "{} {}".format(a, b)
@ray.remote
def keyword_fct2(a="hello", b="world"):
return "{} {}".format(a, b)
@ray.remote
def keyword_fct3(a, b, c="hello", d="world"):
return "{} {} {} {}".format(a, b, c, d)
ray.init(num_cpus=1)
x = keyword_fct1.remote(1)
assert ray.get(x) == "1 hello"
x = keyword_fct1.remote(1, "hi")
assert ray.get(x) == "1 hi"
x = keyword_fct1.remote(1, b="world")
assert ray.get(x) == "1 world"
x = keyword_fct1.remote(a=1, b="world")
assert ray.get(x) == "1 world"
x = keyword_fct2.remote(a="w", b="hi")
assert ray.get(x) == "w hi"
x = keyword_fct2.remote(b="hi", a="w")
assert ray.get(x) == "w hi"
x = keyword_fct2.remote(a="w")
assert ray.get(x) == "w world"
x = keyword_fct2.remote(b="hi")
assert ray.get(x) == "hello hi"
x = keyword_fct2.remote("w")
assert ray.get(x) == "w world"
x = keyword_fct2.remote("w", "hi")
assert ray.get(x) == "w hi"
x = keyword_fct3.remote(0, 1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, b=1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(a=0, b=1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, 1, d="hi", c="w")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, 1, c="w")
assert ray.get(x) == "0 1 w world"
x = keyword_fct3.remote(0, 1, d="hi")
assert ray.get(x) == "0 1 hello hi"
x = keyword_fct3.remote(0, 1)
assert ray.get(x) == "0 1 hello world"
x = keyword_fct3.remote(a=0, b=1)
assert ray.get(x) == "0 1 hello world"
# Check that we cannot pass invalid keyword arguments to functions.
@ray.remote
def f1():
return
@ray.remote
def f2(x, y=0, z=0):
return
# Make sure we get an exception if too many arguments are passed in.
with pytest.raises(Exception):
f1.remote(3)
with pytest.raises(Exception):
f1.remote(x=3)
with pytest.raises(Exception):
f2.remote(0, w=0)
with pytest.raises(Exception):
f2.remote(3, x=3)
# Make sure we get an exception if too many arguments are passed in.
with pytest.raises(Exception):
f2.remote(1, 2, 3, 4)
@ray.remote
def f3(x):
return x
assert ray.get(f3.remote(4)) == 4
def test_variable_number_of_args(shutdown_only):
@ray.remote
def varargs_fct1(*a):
return " ".join(map(str, a))
@ray.remote
def varargs_fct2(a, *b):
return " ".join(map(str, b))
try:
@ray.remote
def kwargs_throw_exception(**c):
return ()
kwargs_exception_thrown = False
except Exception:
kwargs_exception_thrown = True
ray.init(num_cpus=1)
x = varargs_fct1.remote(0, 1, 2)
assert ray.get(x) == "0 1 2"
x = varargs_fct2.remote(0, 1, 2)
assert ray.get(x) == "1 2"
assert kwargs_exception_thrown
@ray.remote
def f1(*args):
return args
@ray.remote
def f2(x, y, *args):
return x, y, args
assert ray.get(f1.remote()) == ()
assert ray.get(f1.remote(1)) == (1, )
assert ray.get(f1.remote(1, 2, 3)) == (1, 2, 3)
with pytest.raises(Exception):
f2.remote()
with pytest.raises(Exception):
f2.remote(1)
assert ray.get(f2.remote(1, 2)) == (1, 2, ())
assert ray.get(f2.remote(1, 2, 3)) == (1, 2, (3, ))
assert ray.get(f2.remote(1, 2, 3, 4)) == (1, 2, (3, 4))
def testNoArgs(self):
@ray.remote
def no_op():
pass
self.init_ray()
ray.get(no_op.remote())
def test_defining_remote_functions(shutdown_only):
ray.init(num_cpus=3)
# Test that we can define a remote function in the shell.
@ray.remote
def f(x):
return x + 1
assert ray.get(f.remote(0)) == 1
# Test that we can redefine the remote function.
@ray.remote
def f(x):
return x + 10
while True:
val = ray.get(f.remote(0))
assert val in [1, 10]
if val == 10:
break
else:
logger.info("Still using old definition of f, trying again.")
# Test that we can close over plain old data.
data = [
np.zeros([3, 5]), (1, 2, "a"), [0.0, 1.0, 1 << 62], 1 << 60, {
"a": np.zeros(3)
}
]
@ray.remote
def g():
return data
ray.get(g.remote())
# Test that we can close over modules.
@ray.remote
def h():
return np.zeros([3, 5])
assert_equal(ray.get(h.remote()), np.zeros([3, 5]))
@ray.remote
def j():
return time.time()
ray.get(j.remote())
# Test that we can define remote functions that call other remote
# functions.
@ray.remote
def k(x):
return x + 1
@ray.remote
def k2(x):
return ray.get(k.remote(x))
@ray.remote
def m(x):
return ray.get(k2.remote(x))
assert ray.get(k.remote(1)) == 2
assert ray.get(k2.remote(1)) == 2
assert ray.get(m.remote(1)) == 2
def test_submit_api(shutdown_only):
ray.init(num_cpus=1, num_gpus=1, resources={"Custom": 1})
@ray.remote
def f(n):
return list(range(n))
@ray.remote
def g():
return ray.get_gpu_ids()
assert f._remote([0], num_return_vals=0) is None
id1 = f._remote(args=[1], num_return_vals=1)
assert ray.get(id1) == [0]
id1, id2 = f._remote(args=[2], num_return_vals=2)
assert ray.get([id1, id2]) == [0, 1]
id1, id2, id3 = f._remote(args=[3], num_return_vals=3)
assert ray.get([id1, id2, id3]) == [0, 1, 2]
assert ray.get(
g._remote(
args=[], num_cpus=1, num_gpus=1,
resources={"Custom": 1})) == [0]
infeasible_id = g._remote(args=[], resources={"NonexistentCustom": 1})
ready_ids, remaining_ids = ray.wait([infeasible_id], timeout=0.05)
assert len(ready_ids) == 0
assert len(remaining_ids) == 1
@ray.remote
class Actor(object):
def __init__(self, x, y=0):
self.x = x
self.y = y
def method(self, a, b=0):
return self.x, self.y, a, b
def gpu_ids(self):
return ray.get_gpu_ids()
a = Actor._remote(
args=[0], kwargs={"y": 1}, num_gpus=1, resources={"Custom": 1})
id1, id2, id3, id4 = a.method._remote(
args=["test"], kwargs={"b": 2}, num_return_vals=4)
assert ray.get([id1, id2, id3, id4]) == [0, 1, "test", 2]
def test_get_multiple(shutdown_only):
ray.init(num_cpus=1)
object_ids = [ray.put(i) for i in range(10)]
assert ray.get(object_ids) == list(range(10))
# Get a random choice of object IDs with duplicates.
indices = list(np.random.choice(range(10), 5))
indices += indices
results = ray.get([object_ids[i] for i in indices])
assert results == indices
def test_get_multiple_experimental(shutdown_only):
ray.init(num_cpus=1)
object_ids = [ray.put(i) for i in range(10)]
object_ids_tuple = tuple(object_ids)
assert ray.experimental.get(object_ids_tuple) == list(range(10))
object_ids_nparray = np.array(object_ids)
assert ray.experimental.get(object_ids_nparray) == list(range(10))
def test_get_dict(shutdown_only):
ray.init(num_cpus=1)
d = {str(i): ray.put(i) for i in range(5)}
for i in range(5, 10):
d[str(i)] = i
result = ray.experimental.get(d)
expected = {str(i): i for i in range(10)}
assert result == expected
def test_wait(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
ready_ids, remaining_ids = ray.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
ready_ids, remaining_ids = ray.wait(objectids, num_returns=4)
assert set(ready_ids) == set(objectids)
assert remaining_ids == []
objectids = [f.remote(0.5), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
start_time = time.time()
ready_ids, remaining_ids = ray.wait(objectids, timeout=1.75, num_returns=4)
assert time.time() - start_time < 2
assert len(ready_ids) == 3
assert len(remaining_ids) == 1
ray.wait(objectids)
objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
start_time = time.time()
ready_ids, remaining_ids = ray.wait(objectids, timeout=5.0)
assert time.time() - start_time < 5
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
# Verify that calling wait with duplicate object IDs throws an
# exception.
x = ray.put(1)
with pytest.raises(Exception):
ray.wait([x, x])
# Make sure it is possible to call wait with an empty list.
ready_ids, remaining_ids = ray.wait([])
assert ready_ids == []
assert remaining_ids == []
# Test semantics of num_returns with no timeout.
oids = [ray.put(i) for i in range(10)]
(found, rest) = ray.wait(oids, num_returns=2)
assert len(found) == 2
assert len(rest) == 8
# Verify that incorrect usage raises a TypeError.
x = ray.put(1)
with pytest.raises(TypeError):
ray.wait(x)
with pytest.raises(TypeError):
ray.wait(1)
with pytest.raises(TypeError):
ray.wait([1])
def test_wait_iterables(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
objectids = (f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5))
ready_ids, remaining_ids = ray.experimental.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
objectids = np.array(
[f.remote(1.0),
f.remote(0.5),
f.remote(0.5),
f.remote(0.5)])
ready_ids, remaining_ids = ray.experimental.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
def test_multiple_waits_and_gets(shutdown_only):
# It is important to use three workers here, so that the three tasks
# launched in this experiment can run at the same time.
ray.init(num_cpus=3)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
@ray.remote
def g(l):
# The argument l should be a list containing one object ID.
ray.wait([l[0]])
@ray.remote
def h(l):
# The argument l should be a list containing one object ID.
ray.get(l[0])
# Make sure that multiple wait requests involving the same object ID
# all return.
x = f.remote(1)
ray.get([g.remote([x]), g.remote([x])])
# Make sure that multiple get requests involving the same object ID all
# return.
x = f.remote(1)
ray.get([h.remote([x]), h.remote([x])])
def test_caching_functions_to_run(shutdown_only):
# Test that we export functions to run on all workers before the driver
# is connected.
def f(worker_info):
sys.path.append(1)
ray.worker.global_worker.run_function_on_all_workers(f)
def f(worker_info):
sys.path.append(2)
ray.worker.global_worker.run_function_on_all_workers(f)
def g(worker_info):
sys.path.append(3)
ray.worker.global_worker.run_function_on_all_workers(g)
def f(worker_info):
sys.path.append(4)
ray.worker.global_worker.run_function_on_all_workers(f)
ray.init(num_cpus=1)
@ray.remote
def get_state():
time.sleep(1)
return sys.path[-4], sys.path[-3], sys.path[-2], sys.path[-1]
res1 = get_state.remote()
res2 = get_state.remote()
assert ray.get(res1) == (1, 2, 3, 4)
assert ray.get(res2) == (1, 2, 3, 4)
# Clean up the path on the workers.
def f(worker_info):
sys.path.pop()
sys.path.pop()
sys.path.pop()
sys.path.pop()
ray.worker.global_worker.run_function_on_all_workers(f)
def test_running_function_on_all_workers(shutdown_only):
ray.init(num_cpus=1)
def f(worker_info):
sys.path.append("fake_directory")
ray.worker.global_worker.run_function_on_all_workers(f)
@ray.remote
def get_path1():
return sys.path
assert "fake_directory" == ray.get(get_path1.remote())[-1]
def f(worker_info):
sys.path.pop(-1)
ray.worker.global_worker.run_function_on_all_workers(f)
# Create a second remote function to guarantee that when we call
# get_path2.remote(), the second function to run will have been run on
# the worker.
@ray.remote
def get_path2():
return sys.path
assert "fake_directory" not in ray.get(get_path2.remote())
def test_profiling_api(shutdown_only):
ray.init(num_cpus=2)
@ray.remote
def f():
with ray.profile(
"custom_event",
extra_data={"name": "custom name"}) as ray_prof:
ray_prof.set_attribute("key", "value")
ray.put(1)
object_id = f.remote()
ray.wait([object_id])
ray.get(object_id)
# Wait until all of the profiling information appears in the profile
# table.
timeout_seconds = 20
start_time = time.time()
while True:
if time.time() - start_time > timeout_seconds:
raise Exception("Timed out while waiting for information in "
"profile table.")
profile_data = ray.global_state.chrome_tracing_dump()
event_types = {event["cat"] for event in profile_data}
expected_types = [
"worker_idle",
"task",
"task:deserialize_arguments",
"task:execute",
"task:store_outputs",
"wait_for_function",
"ray.get",
"ray.put",
"ray.wait",
"submit_task",
"fetch_and_run_function",
"register_remote_function",
"custom_event", # This is the custom one from ray.profile.
]
if all(expected_type in event_types
for expected_type in expected_types):
break
@pytest.fixture()
def ray_start_cluster():
cluster = ray.test.cluster_utils.Cluster()
yield cluster
# The code after the yield will run as teardown code.
ray.shutdown()
cluster.shutdown()
def test_object_transfer_dump(ray_start_cluster):
cluster = ray_start_cluster
num_nodes = 3
for i in range(num_nodes):
cluster.add_node(resources={str(i): 1}, object_store_memory=10**9)
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f(x):
return
# These objects will live on different nodes.
object_ids = [
f._remote(args=[1], resources={str(i): 1}) for i in range(num_nodes)
]
# Broadcast each object from each machine to each other machine.
for object_id in object_ids:
ray.get([
f._remote(args=[object_id], resources={str(i): 1})
for i in range(num_nodes)
])
# The profiling information only flushes once every second.
time.sleep(1.1)
transfer_dump = ray.global_state.chrome_tracing_object_transfer_dump()
# Make sure the transfer dump can be serialized with JSON.
json.loads(json.dumps(transfer_dump))
assert len(transfer_dump) >= num_nodes**2
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_receive"
}) == num_nodes
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_send"
}) == num_nodes
def test_identical_function_names(shutdown_only):
# Define a bunch of remote functions and make sure that we don't
# accidentally call an older version.
ray.init(num_cpus=1)
num_calls = 200
@ray.remote
def f():
return 1
results1 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 2
results2 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 3
results3 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 4
results4 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 5
results5 = [f.remote() for _ in range(num_calls)]
assert ray.get(results1) == num_calls * [1]
assert ray.get(results2) == num_calls * [2]
assert ray.get(results3) == num_calls * [3]
assert ray.get(results4) == num_calls * [4]
assert ray.get(results5) == num_calls * [5]
@ray.remote
def g():
return 1
@ray.remote # noqa: F811
def g():
return 2
@ray.remote # noqa: F811
def g():
return 3
@ray.remote # noqa: F811
def g():
return 4
@ray.remote # noqa: F811
def g():
return 5
result_values = ray.get([g.remote() for _ in range(num_calls)])
assert result_values == num_calls * [5]
def test_illegal_api_calls(shutdown_only):
ray.init(num_cpus=1)
# Verify that we cannot call put on an ObjectID.
x = ray.put(1)
with pytest.raises(Exception):
ray.put(x)
# Verify that we cannot call get on a regular value.
with pytest.raises(Exception):
ray.get(3)
def test_multithreading(shutdown_only):
# This test requires at least 2 CPUs to finish since the worker does not
# relase resources when joining the threads.
ray.init(num_cpus=2)
def run_test_in_multi_threads(test_case, num_threads=20, num_repeats=50):
"""A helper function that runs test cases in multiple threads."""
def wrapper():
for _ in range(num_repeats):
test_case()
time.sleep(random.randint(0, 10) / 1000.0)
return "ok"
executor = ThreadPoolExecutor(max_workers=num_threads)
futures = [executor.submit(wrapper) for _ in range(num_threads)]
for future in futures:
assert future.result() == "ok"
@ray.remote
def echo(value, delay_ms=0):
if delay_ms > 0:
time.sleep(delay_ms / 1000.0)
return value
@ray.remote
class Echo(object):
def echo(self, value):
return value
def test_api_in_multi_threads():
"""Test using Ray api in multiple threads."""
# Test calling remote functions in multiple threads.
def test_remote_call():
value = random.randint(0, 1000000)
result = ray.get(echo.remote(value))
assert value == result
run_test_in_multi_threads(test_remote_call)
# Test multiple threads calling one actor.
actor = Echo.remote()
def test_call_actor():
value = random.randint(0, 1000000)
result = ray.get(actor.echo.remote(value))
assert value == result
run_test_in_multi_threads(test_call_actor)
# Test put and get.
def test_put_and_get():
value = random.randint(0, 1000000)
result = ray.get(ray.put(value))
assert value == result
run_test_in_multi_threads(test_put_and_get)
# Test multiple threads waiting for objects.
num_wait_objects = 10
objects = [
echo.remote(i, delay_ms=10) for i in range(num_wait_objects)
]
def test_wait():
ready, _ = ray.wait(
objects,
num_returns=len(objects),
timeout=1000.0,
)
assert len(ready) == num_wait_objects
assert ray.get(ready) == list(range(num_wait_objects))
run_test_in_multi_threads(test_wait, num_repeats=1)
# Run tests in a driver.
test_api_in_multi_threads()
# Run tests in a worker.
@ray.remote
def run_tests_in_worker():
test_api_in_multi_threads()
return "ok"
assert ray.get(run_tests_in_worker.remote()) == "ok"
# Test actor that runs background threads.
@ray.remote
class MultithreadedActor(object):
def __init__(self):
self.lock = threading.Lock()
self.thread_results = []
def background_thread(self, wait_objects):
try:
# Test wait
ready, _ = ray.wait(
wait_objects,
num_returns=len(wait_objects),
timeout=1000.0,
)
assert len(ready) == len(wait_objects)
for _ in range(50):
num = 20
# Test remote call
results = [echo.remote(i) for i in range(num)]
assert ray.get(results) == list(range(num))
# Test put and get
objects = [ray.put(i) for i in range(num)]
assert ray.get(objects) == list(range(num))
time.sleep(random.randint(0, 10) / 1000.0)
except Exception as e:
with self.lock:
self.thread_results.append(e)
else:
with self.lock:
self.thread_results.append("ok")
def spawn(self):
wait_objects = [echo.remote(i, delay_ms=10) for i in range(20)]
self.threads = [
threading.Thread(
target=self.background_thread, args=(wait_objects, ))
for _ in range(20)
]
[thread.start() for thread in self.threads]
def join(self):
[thread.join() for thread in self.threads]
assert self.thread_results == ["ok"] * len(self.threads)
return "ok"
actor = MultithreadedActor.remote()
actor.spawn.remote()
ray.get(actor.join.remote()) == "ok"
def test_free_objects_multi_node(ray_start_cluster):
# This test will do following:
# 1. Create 3 raylets that each hold an actor.
# 2. Each actor creates an object which is the deletion target.
# 3. Invoke 64 methods on each actor to flush plasma client.
# 4. After flushing, the plasma client releases the targets.
# 5. Check that the deletion targets have been deleted.
# Caution: if remote functions are used instead of actor methods,
# one raylet may create more than one worker to execute the
# tasks, so the flushing operations may be executed in different
# workers and the plasma client holding the deletion target
# may not be flushed.
cluster = ray_start_cluster
config = json.dumps({"object_manager_repeated_push_delay_ms": 1000})
for i in range(3):
cluster.add_node(
num_cpus=1,
resources={"Custom{}".format(i): 1},
_internal_config=config)
ray.init(redis_address=cluster.redis_address)
@ray.remote(resources={"Custom0": 1})
class ActorOnNode0(object):
def get(self):
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"Custom1": 1})
class ActorOnNode1(object):
def get(self):
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"Custom2": 1})
class ActorOnNode2(object):
def get(self):
return ray.worker.global_worker.plasma_client.store_socket_name
def create(actors):
a = actors[0].get.remote()
b = actors[1].get.remote()
c = actors[2].get.remote()
(l1, l2) = ray.wait([a, b, c], num_returns=3)
assert len(l1) == 3
assert len(l2) == 0
return (a, b, c)
def flush(actors):
# Flush the Release History.
# Current Plasma Client Cache will maintain 64-item list.
# If the number changed, this will fail.
logger.info("Start Flush!")
for i in range(64):
ray.get([actor.get.remote() for actor in actors])
logger.info("Flush finished!")
def run_one_test(actors, local_only):
(a, b, c) = create(actors)
# The three objects should be generated on different object stores.
assert ray.get(a) != ray.get(b)
assert ray.get(a) != ray.get(c)
assert ray.get(c) != ray.get(b)
ray.internal.free([a, b, c], local_only=local_only)
flush(actors)
return (a, b, c)
actors = [
ActorOnNode0.remote(),
ActorOnNode1.remote(),
ActorOnNode2.remote()
]
# Case 1: run this local_only=False. All 3 objects will be deleted.
(a, b, c) = run_one_test(actors, False)
(l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=1)
# All the objects are deleted.
assert len(l1) == 0
assert len(l2) == 3
# Case 2: run this local_only=True. Only 1 object will be deleted.
(a, b, c) = run_one_test(actors, True)
(l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=3)
# One object is deleted and 2 objects are not.
assert len(l1) == 2
assert len(l2) == 1
# The deleted object will have the same store with the driver.
local_return = ray.worker.global_worker.plasma_client.store_socket_name
for object_id in l1:
assert ray.get(object_id) != local_return
def test_local_mode(shutdown_only):
@ray.remote
def local_mode_f():
return np.array([0, 0])
@ray.remote
def local_mode_g(x):
x[0] = 1
return x
ray.init(local_mode=True)
@ray.remote
def f():
return np.ones([3, 4, 5])
xref = f.remote()
# Remote functions should return by value.
assert_equal(xref, np.ones([3, 4, 5]))
# Check that ray.get is the identity.
assert_equal(xref, ray.get(xref))
y = np.random.normal(size=[11, 12])
# Check that ray.put is the identity.
assert_equal(y, ray.put(y))
# Make sure objects are immutable, this example is why we need to copy
# arguments before passing them into remote functions in python mode
aref = local_mode_f.remote()
assert_equal(aref, np.array([0, 0]))
bref = local_mode_g.remote(aref)
# Make sure local_mode_g does not mutate aref.
assert_equal(aref, np.array([0, 0]))
assert_equal(bref, np.array([1, 0]))
# wait should return the first num_returns values passed in as the
# first list and the remaining values as the second list
num_returns = 5
object_ids = [ray.put(i) for i in range(20)]
ready, remaining = ray.wait(
object_ids, num_returns=num_returns, timeout=None)
assert_equal(ready, object_ids[:num_returns])
assert_equal(remaining, object_ids[num_returns:])
# Test actors in LOCAL_MODE.
@ray.remote
class LocalModeTestClass(object):
def __init__(self, array):
self.array = array
def set_array(self, array):
self.array = array
def get_array(self):
return self.array
def modify_and_set_array(self, array):
array[0] = -1
self.array = array
test_actor = LocalModeTestClass.remote(np.arange(10))
# Remote actor functions should return by value
assert_equal(test_actor.get_array.remote(), np.arange(10))
test_array = np.arange(10)
# Remote actor functions should not mutate arguments
test_actor.modify_and_set_array.remote(test_array)
assert_equal(test_array, np.arange(10))
# Remote actor functions should keep state
test_array[0] = -1
assert_equal(test_array, test_actor.get_array.remote())
# Check that actor handles work in Python mode.
@ray.remote
def use_actor_handle(handle):
array = np.ones(10)
handle.set_array.remote(array)
assert np.alltrue(array == ray.get(handle.get_array.remote()))
ray.get(use_actor_handle.remote(test_actor))
def test_resource_constraints(shutdown_only):
num_workers = 20
ray.init(num_cpus=10, num_gpus=2)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
time_buffer = 0.3
# At most 10 copies of this can run at once.
@ray.remote(num_cpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(10)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(11)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
@ray.remote(num_cpus=3)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
@ray.remote(num_gpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(2)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
def test_multi_resource_constraints(shutdown_only):
num_workers = 20
ray.init(num_cpus=10, num_gpus=10)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
@ray.remote(num_cpus=1, num_gpus=9)
def f(n):
time.sleep(n)
@ray.remote(num_cpus=9, num_gpus=1)
def g(n):
time.sleep(n)
time_buffer = 0.3
start_time = time.time()
ray.get([f.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5), g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
def test_gpu_ids(shutdown_only):
num_gpus = 10
ray.init(num_cpus=10, num_gpus=num_gpus)
@ray.remote(num_gpus=0)
def f0():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=1)
def f1():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=2)
def f2():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 2
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=3)
def f3():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 3
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=4)
def f4():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 4
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=5)
def f5():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 5
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
# Wait for all workers to start up.
@ray.remote
def f():
time.sleep(0.1)
return os.getpid()
start_time = time.time()
while True:
if len(set(ray.get([f.remote() for _ in range(10)]))) == 10:
break
if time.time() > start_time + 10:
raise Exception("Timed out while waiting for workers to start "
"up.")
list_of_ids = ray.get([f0.remote() for _ in range(10)])
assert list_of_ids == 10 * [[]]
list_of_ids = ray.get([f1.remote() for _ in range(10)])
set_of_ids = {tuple(gpu_ids) for gpu_ids in list_of_ids}
assert set_of_ids == {(i, ) for i in range(10)}
list_of_ids = ray.get([f2.remote(), f4.remote(), f4.remote()])
all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]
assert set(all_ids) == set(range(10))
remaining = [f5.remote() for _ in range(20)]
for _ in range(10):
t1 = time.time()
ready, remaining = ray.wait(remaining, num_returns=2)
t2 = time.time()
# There are only 10 GPUs, and each task uses 2 GPUs, so there
# should only be 2 tasks scheduled at a given time, so if we wait
# for 2 tasks to finish, then it should take at least 0.1 seconds
# for each pair of tasks to finish.
assert t2 - t1 > 0.09
list_of_ids = ray.get(ready)
all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]
# Commenting out the below assert because it seems to fail a lot.
# assert set(all_ids) == set(range(10))
# Test that actors have CUDA_VISIBLE_DEVICES set properly.
@ray.remote
class Actor0(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
return self.x
@ray.remote(num_gpus=1)
class Actor1(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
return self.x
a0 = Actor0.remote()
ray.get(a0.test.remote())
a1 = Actor1.remote()
ray.get(a1.test.remote())
def test_zero_cpus(shutdown_only):
ray.init(num_cpus=0)
@ray.remote(num_cpus=0)
def f():
return 1
# The task should be able to execute.
ray.get(f.remote())
def test_zero_cpus_actor(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
cluster.add_node(num_cpus=2)
ray.init(redis_address=cluster.redis_address)
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote
class Foo(object):
def method(self):
return ray.worker.global_worker.plasma_client.store_socket_name
# Make sure tasks and actors run on the remote local scheduler.
a = Foo.remote()
assert ray.get(a.method.remote()) != local_plasma
def test_fractional_resources(shutdown_only):
ray.init(num_cpus=6, num_gpus=3, resources={"Custom": 1})
@ray.remote(num_gpus=0.5)
class Foo1(object):
def method(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
return gpu_ids[0]
foos = [Foo1.remote() for _ in range(6)]
gpu_ids = ray.get([f.method.remote() for f in foos])
for i in range(3):
assert gpu_ids.count(i) == 2
del foos
@ray.remote
class Foo2(object):
def method(self):
pass
# Create an actor that requires 0.7 of the custom resource.
f1 = Foo2._remote([], {}, resources={"Custom": 0.7})
ray.get(f1.method.remote())
# Make sure that we cannot create an actor that requires 0.7 of the
# custom resource. TODO(rkn): Re-enable this once ray.wait is
# implemented.
f2 = Foo2._remote([], {}, resources={"Custom": 0.7})
ready, _ = ray.wait([f2.method.remote()], timeout=0.5)
assert len(ready) == 0
# Make sure we can start an actor that requries only 0.3 of the custom
# resource.
f3 = Foo2._remote([], {}, resources={"Custom": 0.3})
ray.get(f3.method.remote())
del f1, f3
# Make sure that we get exceptions if we submit tasks that require a
# fractional number of resources greater than 1.
@ray.remote(num_cpus=1.5)
def test():
pass
with pytest.raises(ValueError):
test.remote()
with pytest.raises(ValueError):
Foo2._remote([], {}, resources={"Custom": 1.5})
def test_multiple_local_schedulers(ray_start_cluster):
# This test will define a bunch of tasks that can only be assigned to
# specific local schedulers, and we will check that they are assigned
# to the correct local schedulers.
cluster = ray_start_cluster
cluster.add_node(num_cpus=11, num_gpus=0)
cluster.add_node(num_cpus=5, num_gpus=5)
cluster.add_node(num_cpus=10, num_gpus=1)
ray.init(redis_address=cluster.redis_address)
cluster.wait_for_nodes()
# Define a bunch of remote functions that all return the socket name of
# the plasma store. Since there is a one-to-one correspondence between
# plasma stores and local schedulers (at least right now), this can be
# used to identify which local scheduler the task was assigned to.
# This must be run on the zeroth local scheduler.
@ray.remote(num_cpus=11)
def run_on_0():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the first local scheduler.
@ray.remote(num_gpus=2)
def run_on_1():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the second local scheduler.
@ray.remote(num_cpus=6, num_gpus=1)
def run_on_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This can be run anywhere.
@ray.remote(num_cpus=0, num_gpus=0)
def run_on_0_1_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the first or second local scheduler.
@ray.remote(num_gpus=1)
def run_on_1_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the zeroth or second local scheduler.
@ray.remote(num_cpus=8)
def run_on_0_2():
return ray.worker.global_worker.plasma_client.store_socket_name
def run_lots_of_tasks():
names = []
results = []
for i in range(100):
index = np.random.randint(6)
if index == 0:
names.append("run_on_0")
results.append(run_on_0.remote())
elif index == 1:
names.append("run_on_1")
results.append(run_on_1.remote())
elif index == 2:
names.append("run_on_2")
results.append(run_on_2.remote())
elif index == 3:
names.append("run_on_0_1_2")
results.append(run_on_0_1_2.remote())
elif index == 4:
names.append("run_on_1_2")
results.append(run_on_1_2.remote())
elif index == 5:
names.append("run_on_0_2")
results.append(run_on_0_2.remote())
return names, results
client_table = ray.global_state.client_table()
store_names = []
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"]["GPU"] == 0
]
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"]["GPU"] == 5
]
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"]["GPU"] == 1
]
assert len(store_names) == 3
def validate_names_and_results(names, results):
for name, result in zip(names, ray.get(results)):
if name == "run_on_0":
assert result in [store_names[0]]
elif name == "run_on_1":
assert result in [store_names[1]]
elif name == "run_on_2":
assert result in [store_names[2]]
elif name == "run_on_0_1_2":
assert (result in [
store_names[0], store_names[1], store_names[2]
])
elif name == "run_on_1_2":
assert result in [store_names[1], store_names[2]]
elif name == "run_on_0_2":
assert result in [store_names[0], store_names[2]]
else:
raise Exception("This should be unreachable.")
assert set(ray.get(results)) == set(store_names)
names, results = run_lots_of_tasks()
validate_names_and_results(names, results)
# Make sure the same thing works when this is nested inside of a task.
@ray.remote
def run_nested1():
names, results = run_lots_of_tasks()
return names, results
@ray.remote
def run_nested2():
names, results = ray.get(run_nested1.remote())
return names, results
names, results = ray.get(run_nested2.remote())
validate_names_and_results(names, results)
def test_custom_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=3, resources={"CustomResource": 0})
cluster.add_node(num_cpus=3, resources={"CustomResource": 1})
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource": 1})
def h():
ray.get([f.remote() for _ in range(5)])
return ray.worker.global_worker.plasma_client.store_socket_name
# The f tasks should be scheduled on both local schedulers.
assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
# The g tasks should be scheduled only on the second local scheduler.
local_scheduler_ids = set(ray.get([g.remote() for _ in range(50)]))
assert len(local_scheduler_ids) == 1
assert list(local_scheduler_ids)[0] != local_plasma
# Make sure that resource bookkeeping works when a task that uses a
# custom resources gets blocked.
ray.get([h.remote() for _ in range(5)])
def test_two_custom_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(
num_cpus=3, resources={
"CustomResource1": 1,
"CustomResource2": 2
})
cluster.add_node(
num_cpus=3, resources={
"CustomResource1": 3,
"CustomResource2": 4
})
ray.init(redis_address=cluster.redis_address)
@ray.remote(resources={"CustomResource1": 1})
def f():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource2": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource1": 1, "CustomResource2": 3})
def h():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource1": 4})
def j():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource3": 1})
def k():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
# The f and g tasks should be scheduled on both local schedulers.
assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2
assert len(set(ray.get([g.remote() for _ in range(50)]))) == 2
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
# The h tasks should be scheduled only on the second local scheduler.
local_scheduler_ids = set(ray.get([h.remote() for _ in range(50)]))
assert len(local_scheduler_ids) == 1
assert list(local_scheduler_ids)[0] != local_plasma
# Make sure that tasks with unsatisfied custom resource requirements do
# not get scheduled.
ready_ids, remaining_ids = ray.wait([j.remote(), k.remote()], timeout=0.5)
assert ready_ids == []
def test_many_custom_resources(shutdown_only):
num_custom_resources = 10000
total_resources = {
str(i): np.random.randint(1, 7)
for i in range(num_custom_resources)
}
ray.init(num_cpus=5, resources=total_resources)
def f():
return 1
remote_functions = []
for _ in range(20):
num_resources = np.random.randint(0, num_custom_resources + 1)
permuted_resources = np.random.permutation(
num_custom_resources)[:num_resources]
random_resources = {
str(i): total_resources[str(i)]
for i in permuted_resources
}
remote_function = ray.remote(resources=random_resources)(f)
remote_functions.append(remote_function)
remote_functions.append(ray.remote(f))
remote_functions.append(ray.remote(resources=total_resources)(f))
results = []
for remote_function in remote_functions:
results.append(remote_function.remote())
results.append(remote_function.remote())
results.append(remote_function.remote())
ray.get(results)
@pytest.fixture
def save_gpu_ids_shutdown_only():
# Record the curent value of this environment variable so that we can
# reset it after the test.
original_gpu_ids = os.environ.get("CUDA_VISIBLE_DEVICES", None)
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
# Reset the environment variable.
if original_gpu_ids is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = original_gpu_ids
else:
del os.environ["CUDA_VISIBLE_DEVICES"]
def test_specific_gpus(save_gpu_ids_shutdown_only):
allowed_gpu_ids = [4, 5, 6]
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
[str(i) for i in allowed_gpu_ids])
ray.init(num_gpus=3)
@ray.remote(num_gpus=1)
def f():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert gpu_ids[0] in allowed_gpu_ids
@ray.remote(num_gpus=2)
def g():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 2
assert gpu_ids[0] in allowed_gpu_ids
assert gpu_ids[1] in allowed_gpu_ids
ray.get([f.remote() for _ in range(100)])
ray.get([g.remote() for _ in range(100)])
def test_blocking_tasks(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def f(i, j):
return (i, j)
@ray.remote
def g(i):
# Each instance of g submits and blocks on the result of another
# remote task.
object_ids = [f.remote(i, j) for j in range(2)]
return ray.get(object_ids)
@ray.remote
def h(i):
# Each instance of g submits and blocks on the result of another
# remote task using ray.wait.
object_ids = [f.remote(i, j) for j in range(2)]
return ray.wait(object_ids, num_returns=len(object_ids))
ray.get([h.remote(i) for i in range(4)])
@ray.remote
def _sleep(i):
time.sleep(0.01)
return (i)
@ray.remote
def sleep():
# Each instance of sleep submits and blocks on the result of
# another remote task, which takes some time to execute.
ray.get([_sleep.remote(i) for i in range(10)])
ray.get(sleep.remote())
def test_max_call_tasks(shutdown_only):
ray.init(num_cpus=1)
@ray.remote(max_calls=1)
def f():
return os.getpid()
pid = ray.get(f.remote())
ray.test.test_utils.wait_for_pid_to_exit(pid)
@ray.remote(max_calls=2)
def f():
return os.getpid()
pid1 = ray.get(f.remote())
pid2 = ray.get(f.remote())
assert pid1 == pid2
ray.test.test_utils.wait_for_pid_to_exit(pid1)
def attempt_to_load_balance(remote_function,
args,
total_tasks,
num_nodes,
minimum_count,
num_attempts=100):
attempts = 0
while attempts < num_attempts:
locations = ray.get(
[remote_function.remote(*args) for _ in range(total_tasks)])
names = set(locations)
counts = [locations.count(name) for name in names]
logger.info("Counts are {}.".format(counts))
if (len(names) == num_nodes
and all(count >= minimum_count for count in counts)):
break
attempts += 1
assert attempts < num_attempts
def test_load_balancing(ray_start_cluster):
# This test ensures that tasks are being assigned to all local
# schedulers in a roughly equal manner.
cluster = ray_start_cluster
num_nodes = 3
num_cpus = 7
for _ in range(num_nodes):
cluster.add_node(num_cpus=num_cpus)
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f():
time.sleep(0.01)
return ray.worker.global_worker.plasma_client.store_socket_name
attempt_to_load_balance(f, [], 100, num_nodes, 10)
attempt_to_load_balance(f, [], 1000, num_nodes, 100)
def test_load_balancing_with_dependencies(ray_start_cluster):
# This test ensures that tasks are being assigned to all local
# schedulers in a roughly equal manner even when the tasks have
# dependencies.
cluster = ray_start_cluster
num_nodes = 3
for _ in range(num_nodes):
cluster.add_node(num_cpus=1)
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f(x):
time.sleep(0.010)
return ray.worker.global_worker.plasma_client.store_socket_name
# This object will be local to one of the local schedulers. Make sure
# this doesn't prevent tasks from being scheduled on other local
# schedulers.
x = ray.put(np.zeros(1000000))
attempt_to_load_balance(f, [x], 100, num_nodes, 25)
def wait_for_num_tasks(num_tasks, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.global_state.task_table()) >= num_tasks:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for global state.")
def wait_for_num_objects(num_objects, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.global_state.object_table()) >= num_objects:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for global state.")
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_global_state_api(shutdown_only):
with pytest.raises(Exception):
ray.global_state.object_table()
with pytest.raises(Exception):
ray.global_state.task_table()
with pytest.raises(Exception):
ray.global_state.client_table()
with pytest.raises(Exception):
ray.global_state.function_table()
with pytest.raises(Exception):
ray.global_state.log_files()
ray.init(num_cpus=5, num_gpus=3, resources={"CustomResource": 1})
resources = {"CPU": 5, "GPU": 3, "CustomResource": 1}
assert ray.global_state.cluster_resources() == resources
assert ray.global_state.object_table() == {}
driver_id = ray.experimental.state.binary_to_hex(
ray.worker.global_worker.worker_id)
driver_task_id = ray.worker.global_worker.current_task_id.hex()
# One task is put in the task table which corresponds to this driver.
wait_for_num_tasks(1)
task_table = ray.global_state.task_table()
assert len(task_table) == 1
assert driver_task_id == list(task_table.keys())[0]
task_spec = task_table[driver_task_id]["TaskSpec"]
nil_id_hex = ray.ObjectID.nil_id().hex()
assert task_spec["TaskID"] == driver_task_id
assert task_spec["ActorID"] == nil_id_hex
assert task_spec["Args"] == []
assert task_spec["DriverID"] == driver_id
assert task_spec["FunctionID"] == nil_id_hex
assert task_spec["ReturnObjectIDs"] == []
client_table = ray.global_state.client_table()
node_ip_address = ray.worker.global_worker.node_ip_address
assert len(client_table) == 1
assert client_table[0]["NodeManagerAddress"] == node_ip_address
@ray.remote
def f(*xs):
return 1
x_id = ray.put(1)
result_id = f.remote(1, "hi", x_id)
# Wait for one additional task to complete.
wait_for_num_tasks(1 + 1)
task_table = ray.global_state.task_table()
assert len(task_table) == 1 + 1
task_id_set = set(task_table.keys())
task_id_set.remove(driver_task_id)
task_id = list(task_id_set)[0]
function_table = ray.global_state.function_table()
task_spec = task_table[task_id]["TaskSpec"]
assert task_spec["ActorID"] == nil_id_hex
assert task_spec["Args"] == [1, "hi", x_id]
assert task_spec["DriverID"] == driver_id
assert task_spec["ReturnObjectIDs"] == [result_id]
function_table_entry = function_table[task_spec["FunctionID"]]
assert function_table_entry["Name"] == "runtest.f"
assert function_table_entry["DriverID"] == driver_id
assert function_table_entry["Module"] == "runtest"
assert task_table[task_id] == ray.global_state.task_table(task_id)
# Wait for two objects, one for the x_id and one for result_id.
wait_for_num_objects(2)
def wait_for_object_table():
timeout = 10
start_time = time.time()
while time.time() - start_time < timeout:
object_table = ray.global_state.object_table()
tables_ready = (object_table[x_id]["ManagerIDs"] is not None and
object_table[result_id]["ManagerIDs"] is not None)
if tables_ready:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for object table to "
"update.")
object_table = ray.global_state.object_table()
assert len(object_table) == 2
assert object_table[x_id]["IsEviction"][0] is False
assert object_table[result_id]["IsEviction"][0] is False
assert object_table[x_id] == ray.global_state.object_table(x_id)
object_table_entry = ray.global_state.object_table(result_id)
assert object_table[result_id] == object_table_entry
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_log_file_api(shutdown_only):
ray.init(num_cpus=1, redirect_worker_output=True)
message = "unique message"
@ray.remote
def f():
logger.info(message)
# The call to sys.stdout.flush() seems to be necessary when using
# the system Python 2.7 on Ubuntu.
sys.stdout.flush()
ray.get(f.remote())
# Make sure that the message appears in the log files.
start_time = time.time()
found_message = False
while time.time() - start_time < 10:
log_files = ray.global_state.log_files()
for ip, innerdict in log_files.items():
for filename, contents in innerdict.items():
contents_str = "".join(contents)
if message in contents_str:
found_message = True
if found_message:
break
time.sleep(0.1)
assert found_message is True
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_workers(shutdown_only):
num_workers = 3
ray.init(redirect_worker_output=True, num_cpus=num_workers)
@ray.remote
def f():
return id(ray.worker.global_worker), os.getpid()
# Wait until all of the workers have started.
worker_ids = set()
while len(worker_ids) != num_workers:
worker_ids = set(ray.get([f.remote() for _ in range(10)]))
worker_info = ray.global_state.workers()
assert len(worker_info) >= num_workers
for worker_id, info in worker_info.items():
assert "node_ip_address" in info
assert "plasma_store_socket" in info
assert "stderr_file" in info
assert "stdout_file" in info
def test_specific_driver_id():
dummy_driver_id = ray.ObjectID(b"00112233445566778899")
ray.init(driver_id=dummy_driver_id)
@ray.remote
def f():
return ray.worker.global_worker.task_driver_id.id()
assert_equal(dummy_driver_id.id(), ray.worker.global_worker.worker_id)
task_driver_id = ray.get(f.remote())
assert_equal(dummy_driver_id.id(), task_driver_id)
ray.shutdown()
def test_object_id_properties():
id_bytes = b"00112233445566778899"
object_id = ray.ObjectID(id_bytes)
assert object_id.id() == id_bytes
object_id = ray.ObjectID.nil_id()
assert object_id.is_nil()
with pytest.raises(ValueError, match=r".*needs to have length 20.*"):
ray.ObjectID(id_bytes + b"1234")
with pytest.raises(ValueError, match=r".*needs to have length 20.*"):
ray.ObjectID(b"0123456789")
object_id = ray.ObjectID(_random_string())
assert not object_id.is_nil()
assert object_id.id() != id_bytes
id_dumps = pickle.dumps(object_id)
id_from_dumps = pickle.loads(id_dumps)
assert id_from_dumps == object_id
@pytest.fixture
def shutdown_only_with_initialization_check():
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
assert not ray.is_initialized()
def test_initialized(shutdown_only_with_initialization_check):
assert not ray.is_initialized()
ray.init(num_cpus=0)
assert ray.is_initialized()
def test_initialized_local_mode(shutdown_only_with_initialization_check):
assert not ray.is_initialized()
ray.init(num_cpus=0, local_mode=True)
assert ray.is_initialized()
def test_wait_reconstruction(shutdown_only):
ray.init(num_cpus=1, object_store_memory=10**8)
@ray.remote
def f():
return np.zeros(6 * 10**7, dtype=np.uint8)
x_id = f.remote()
ray.wait([x_id])
ray.wait([f.remote()])
assert not ray.worker.global_worker.plasma_client.contains(
ray.pyarrow.plasma.ObjectID(x_id.id()))
ready_ids, _ = ray.wait([x_id])
assert len(ready_ids) == 1
def test_ray_setproctitle(shutdown_only):
ray.init(num_cpus=2)
@ray.remote
class UniqueName(object):
def __init__(self):
assert setproctitle.getproctitle() == "ray_UniqueName:__init__()"
def f(self):
assert setproctitle.getproctitle() == "ray_UniqueName:f()"
@ray.remote
def unique_1():
assert setproctitle.getproctitle() == "ray_worker:runtest.unique_1()"
actor = UniqueName.remote()
ray.get(actor.f.remote())
ray.get(unique_1.remote())
def test_duplicate_error_messages(shutdown_only):
ray.init(num_cpus=0)
driver_id = ray.ObjectID.nil_id()
error_data = ray.gcs_utils.construct_error_message(driver_id, "test",
"message", 0)
# Push the same message to the GCS twice (they are the same because we
# do not include a timestamp).
r = ray.worker.global_worker.redis_client
r.execute_command("RAY.TABLE_APPEND", ray.gcs_utils.TablePrefix.ERROR_INFO,
ray.gcs_utils.TablePubsub.ERROR_INFO, driver_id.id(),
error_data)
# Before https://github.com/ray-project/ray/pull/3316 this would
# give an error
r.execute_command("RAY.TABLE_APPEND", ray.gcs_utils.TablePrefix.ERROR_INFO,
ray.gcs_utils.TablePubsub.ERROR_INFO, driver_id.id(),
error_data)
@pytest.mark.skipif(
os.getenv("TRAVIS") is None,
reason="This test should only be run on Travis.")
def test_ray_stack(shutdown_only):
ray.init(num_cpus=2)
def unique_name_1():
time.sleep(1000)
@ray.remote
def unique_name_2():
time.sleep(1000)
@ray.remote
def unique_name_3():
unique_name_1()
unique_name_2.remote()
unique_name_3.remote()
success = False
start_time = time.time()
while time.time() - start_time < 30:
# Attempt to parse the "ray stack" call.
output = ray.utils.decode(subprocess.check_output(["ray", "stack"]))
if ("unique_name_1" in output and "unique_name_2" in output
and "unique_name_3" in output):
success = True
break
if not success:
raise Exception("Failed to find necessary information with "
"'ray stack'")
def test_pandas_parquet_serialization():
# Only test this if pandas is installed
pytest.importorskip("pandas")
import pandas as pd
tempdir = tempfile.mkdtemp()
filename = os.path.join(tempdir, "parquet-test")
pd.DataFrame({"col1": [0, 1], "col2": [0, 1]}).to_parquet(filename)
# Clean up
shutil.rmtree(tempdir)
|
taskrunner.py | import sys
import time
import sys
from threading import Thread
from director import asynctaskqueue
from director.timercallback import TimerCallback
class TaskRunner(object):
def __init__(self):
self.interval = 1/60.0
sys.setcheckinterval(1000)
try:
sys.setswitchinterval(self.interval)
except AttributeError:
# sys.setswitchinterval is only python3
pass
self.taskQueue = asynctaskqueue.AsyncTaskQueue()
self.pendingTasks = []
self.threads = []
self.timer = TimerCallback(callback=self._onTimer, targetFps=1/self.interval)
# call timer.start here to initialize the QTimer now on the main thread
self.timer.start()
def _onTimer(self):
# add all tasks in self.pendingTasks to the AsyncTaskQueue
if self.pendingTasks:
while True:
try:
self.taskQueue.addTask(self.pendingTasks.pop(0))
except IndexError:
break
# start the AsyncTaskQueue if it's not already running
if self.taskQueue.tasks and not self.taskQueue.isRunning:
self.taskQueue.start()
# only retain the live threads
self.threads = [t for t in self.threads if t.is_alive()]
if self.threads:
# Give up control to other python threads that are running
time.sleep(self.interval)
else:
# return false to stop the timer
return False
def callOnMain(self, func, *args, **kwargs):
self.pendingTasks.append(lambda: func(*args, **kwargs))
self.timer.start()
def callOnThread(self, func, *args, **kwargs):
t = Thread(target=lambda: func(*args, **kwargs))
self.threads.append(t)
t.start()
self.timer.start()
return t
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.